From 0bb5519856843eb961cdb6cff1d835456c0c29c2 Mon Sep 17 00:00:00 2001 From: Arthur Zucker Date: Thu, 19 Dec 2024 10:33:27 +0100 Subject: [PATCH 01/86] Merge Co-authored-by: Isotr0py <41363108+Isotr0py@users.noreply.github.com> --- .../benchmark-pipeline.yaml | 11 +- .../scripts/wait-for-image.sh | 4 +- .buildkite/release-pipeline.yaml | 33 +- .buildkite/run-gh200-test.sh | 25 + .buildkite/run-xpu-test.sh | 7 +- .buildkite/test-pipeline.yaml | 73 +- .github/workflows/lint-and-deploy.yaml | 81 ++ CMakeLists.txt | 5 +- Dockerfile | 40 +- Dockerfile.neuron | 3 +- README.md | 5 + benchmarks/backend_request_func.py | 6 + benchmarks/benchmark_guided.py | 494 +++++++ benchmarks/benchmark_serving.py | 77 ++ benchmarks/benchmark_serving_guided.py | 881 +++++++++++++ benchmarks/benchmark_throughput.py | 47 +- .../disagg_overhead_benchmark.sh | 144 +++ .../disagg_performance_benchmark.sh | 164 +++ .../disagg_prefill_proxy_server.py | 61 + .../disagg_benchmarks/round_robin_proxy.py | 60 + .../visualize_benchmark_results.py | 46 + .../fused_kernels/layernorm_rms_benchmarks.py | 173 +++ benchmarks/kernels/benchmark_rmsnorm.py | 262 ++++ .../structured_schema_1.json | 113 ++ csrc/attention/paged_attention_v1.cu | 11 +- csrc/attention/paged_attention_v2.cu | 11 +- csrc/cache_kernels.cu | 14 +- csrc/dispatch_utils.h | 14 + csrc/mamba/causal_conv1d/causal_conv1d.cu | 2 +- csrc/ops.h | 8 + csrc/quantization/fp8/common.cuh | 26 +- ...fused_layernorm_dynamic_per_token_quant.cu | 160 +++ .../fused_kernels/layernorm_utils.cuh | 327 +++++ .../fused_kernels/quant_conversions.cuh | 81 ++ csrc/quantization/vectorization.cuh | 33 + csrc/torch_bindings.cpp | 8 + docs/requirements-docs.txt | 6 +- .../usage/disagg_prefill/abstraction.jpg | Bin 0 -> 104673 bytes .../assets/usage/disagg_prefill/overview.jpg | Bin 0 -> 177439 bytes .../automatic_prefix_caching/details.md | 2 +- docs/source/conf.py | 67 + docs/source/design/arch_overview.rst | 2 +- .../design/multimodal/multimodal_index.rst | 5 +- docs/source/design/multiprocessing.md | 195 +++ docs/source/getting_started/debugging.rst | 56 + .../getting_started/gaudi-installation.rst | 36 +- docs/source/getting_started/installation.rst | 43 +- docs/source/index.rst | 31 +- .../models/enabling_multimodal_inputs.rst | 2 +- docs/source/models/generative_models.rst | 146 +++ docs/source/models/pooling_models.rst | 136 ++ docs/source/models/supported_models.rst | 253 +++- docs/source/quantization/bnb.rst | 2 +- docs/source/quantization/fp8.rst | 2 +- docs/source/quantization/int8.rst | 4 +- .../serving/architecture_helm_deployment.png | Bin 0 -> 991484 bytes docs/source/serving/deploying_with_docker.rst | 23 + docs/source/serving/deploying_with_helm.rst | 253 ++++ docs/source/serving/deploying_with_k8s.rst | 4 +- docs/source/serving/deploying_with_kubeai.rst | 17 + docs/source/serving/distributed_serving.rst | 2 +- docs/source/serving/integrations.rst | 1 + .../serving/openai_compatible_server.md | 669 ++++------ .../serving/serving_with_llamastack.rst | 2 +- .../compatibility_matrix.rst | 12 +- docs/source/usage/disagg_prefill.rst | 69 + docs/source/{models => usage}/engine_args.rst | 0 docs/source/{serving => usage}/env_vars.rst | 0 docs/source/{serving => usage}/faq.rst | 9 +- docs/source/{models => usage}/lora.rst | 4 +- .../vlm.rst => usage/multimodal_inputs.rst} | 342 +++-- docs/source/{models => usage}/performance.rst | 0 docs/source/{models => usage}/spec_decode.rst | 11 +- .../{models => usage}/structured_outputs.rst | 0 docs/source/usage/tool_calling.md | 287 +++++ docs/source/{serving => usage}/usage_stats.md | 0 examples/chart-helm/.helmignore | 6 + examples/chart-helm/Chart.yaml | 21 + examples/chart-helm/ct.yaml | 3 + examples/chart-helm/lintconf.yaml | 42 + examples/chart-helm/templates/_helpers.tpl | 164 +++ examples/chart-helm/templates/configmap.yaml | 11 + .../chart-helm/templates/custom-objects.yaml | 6 + examples/chart-helm/templates/deployment.yaml | 122 ++ examples/chart-helm/templates/hpa.yaml | 31 + examples/chart-helm/templates/job.yaml | 37 + .../templates/poddisruptionbudget.yaml | 7 + examples/chart-helm/templates/pvc.yaml | 13 + examples/chart-helm/templates/secrets.yaml | 10 + examples/chart-helm/templates/service.yaml | 14 + examples/chart-helm/values.schema.json | 265 ++++ examples/chart-helm/values.yaml | 119 ++ examples/disaggregated_prefill.sh | 109 ++ examples/offline_inference_audio_language.py | 10 +- examples/offline_inference_chat.py | 72 +- examples/offline_inference_classification.py | 28 + examples/offline_inference_embedding.py | 19 +- examples/offline_inference_openai.md | 92 +- examples/offline_inference_scoring.py | 23 + examples/offline_inference_vision_language.py | 554 ++++---- ...ine_inference_vision_language_embedding.py | 6 +- ...e_inference_vision_language_multi_image.py | 290 ++--- examples/offline_inference_with_profiler.py | 31 +- examples/offline_profile.py | 236 +++- ...i_chat_completion_client_for_multimodal.py | 34 +- ...ai_chat_embedding_client_for_multimodal.py | 2 +- examples/openai_cross_encoder_score.py | 35 +- .../tool_chat_template_llama3.2_json.jinja | 12 +- pyproject.toml | 3 +- python_only_dev.py | 96 +- requirements-build.txt | 2 +- requirements-common.txt | 13 +- requirements-cpu.txt | 3 +- requirements-cuda-arm64.txt | 3 + requirements-cuda.txt | 4 +- requirements-hpu.txt | 2 +- requirements-rocm.txt | 3 +- requirements-test.in | 9 +- requirements-test.txt | 35 +- requirements-tpu.txt | 10 +- setup.py | 96 +- .../test_basic_correctness.py | 16 + tests/compile/piecewise/test_simple.py | 9 +- tests/compile/piecewise/test_toy_llama.py | 33 +- tests/compile/test_basic_correctness.py | 4 +- tests/compile/test_functionalization.py | 21 +- tests/compile/test_fusion.py | 61 +- tests/conftest.py | 36 +- tests/core/block/test_prefix_caching_block.py | 65 +- tests/core/test_scheduler_encoder_decoder.py | 2 +- tests/core/utils.py | 10 + tests/distributed/test_pipeline_parallel.py | 24 +- tests/distributed/test_pynccl.py | 70 +- tests/distributed/test_same_node.py | 29 +- tests/distributed/test_shm_broadcast.py | 84 +- tests/engine/test_arg_utils.py | 27 +- tests/entrypoints/conftest.py | 70 + tests/entrypoints/llm/test_encode.py | 6 +- tests/entrypoints/llm/test_gpu_utilization.py | 25 + tests/entrypoints/llm/test_guided_generate.py | 83 ++ tests/entrypoints/llm/test_lazy_outlines.py | 24 +- .../openai/test_async_tokenization.py | 137 ++ tests/entrypoints/openai/test_audio.py | 126 +- tests/entrypoints/openai/test_basic.py | 51 + tests/entrypoints/openai/test_chat.py | 2 + tests/entrypoints/openai/test_score.py | 16 +- tests/entrypoints/openai/test_serving_chat.py | 1 + .../entrypoints/openai/test_serving_engine.py | 11 + tests/entrypoints/openai/test_vision.py | 4 +- .../openai/test_vision_embedding.py | 6 +- tests/kernels/test_causal_conv1d.py | 39 +- tests/kernels/test_flash_attn.py | 20 +- tests/kernels/test_fused_quant_layernorm.py | 171 +++ tests/kv_transfer/disagg_test.py | 119 ++ tests/kv_transfer/module_test.py | 64 + tests/kv_transfer/test_lookup_buffer.py | 160 +++ tests/kv_transfer/test_lookup_buffer.sh | 3 + tests/kv_transfer/test_send_recv.py | 155 +++ tests/kv_transfer/test_send_recv.sh | 3 + tests/lora/test_chatglm3_tp.py | 9 +- tests/lora/test_gemma.py | 3 +- tests/lora/test_layers.py | 91 +- tests/lora/test_llama_tp.py | 64 +- tests/lora/test_long_context.py | 3 +- tests/lora/test_lora_manager.py | 58 +- tests/lora/test_minicpmv.py | 3 +- tests/lora/test_minicpmv_tp.py | 2 + tests/lora/test_mixtral.py | 1 + tests/lora/test_phi.py | 3 +- tests/lora/test_quant_model.py | 9 +- tests/lora/test_tokenizer_group.py | 20 + tests/metrics/test_metrics.py | 2 +- .../model_executor/test_guided_processors.py | 3 +- .../audio_language/test_ultravox.py | 5 +- .../decoder_only/language/test_jamba.py | 44 +- .../decoder_only/language/test_mamba.py | 42 +- .../mm_processor_kwargs/test_idefics3.py | 9 - .../mm_processor_kwargs/test_phi3v.py | 136 +- .../vision_language/test_models.py | 174 ++- .../vision_language/test_pixtral.py | 2 +- .../vision_language/vlm_utils/core.py | 31 +- .../vision_language/vlm_utils/model_utils.py | 48 +- .../vision_language/vlm_utils/types.py | 26 +- .../embedding/language/test_embedding.py | 7 +- .../models/embedding/language/test_gritlm.py | 200 +++ .../models/embedding/language/test_scoring.py | 22 +- .../vision_language/test_dse_qwen2_vl.py | 2 +- .../vision_language/test_llava_next.py | 4 +- .../embedding/vision_language/test_phi3v.py | 2 +- tests/models/registry.py | 5 + tests/models/test_initialization.py | 7 +- tests/models/test_oot_registration.py | 5 +- tests/models/test_registry.py | 33 +- tests/multimodal/test_mapper.py | 49 +- tests/multimodal/test_processing.py | 297 +++-- tests/multimodal/test_processor_kwargs.py | 169 +-- .../my_gemma_embedding.py | 45 +- .../vllm_add_dummy_model/my_llava.py | 10 +- tests/standalone_tests/lazy_torch_compile.py | 28 + tests/standalone_tests/python_only_compile.sh | 30 + tests/test_config.py | 20 +- tests/test_lazy_torch_compile.py | 16 - tests/test_utils.py | 50 +- tests/utils.py | 48 +- tests/v1/core/test_prefix_caching.py | 124 +- tests/v1/engine/test_engine_args.py | 28 +- tests/v1/engine/test_engine_core.py | 4 +- tests/v1/engine/test_engine_core_client.py | 4 +- .../vllm_test_utils/vllm_test_utils/blame.py | 10 +- .../test_encoder_decoder_model_runner.py | 4 +- tests/worker/test_model_input.py | 4 +- tests/worker/test_model_runner.py | 5 +- tests/worker/test_profile.py | 18 +- tools/mypy.sh | 1 + tools/profiler/print_layerwise_table.py | 9 +- tools/profiler/visualize_layerwise_profile.py | 92 +- vllm/__init__.py | 13 +- vllm/_custom_ops.py | 20 + vllm/attention/backends/abstract.py | 1 + vllm/attention/backends/blocksparse_attn.py | 2 + vllm/attention/backends/flash_attn.py | 55 +- vllm/attention/backends/flashinfer.py | 4 + vllm/attention/backends/hpu_attn.py | 12 + vllm/attention/backends/ipex_attn.py | 1 + vllm/attention/backends/pallas.py | 1 + vllm/attention/backends/placeholder_attn.py | 66 +- vllm/attention/backends/rocm_flash_attn.py | 3 +- vllm/attention/backends/torch_sdpa.py | 24 +- vllm/attention/backends/xformers.py | 9 +- vllm/attention/layer.py | 139 +- vllm/compilation/backends.py | 348 ++++- vllm/compilation/compile_context.py | 23 - vllm/compilation/decorators.py | 43 +- vllm/compilation/fix_functionalization.py | 9 +- vllm/compilation/fusion.py | 719 ++++++++--- vllm/compilation/fx_utils.py | 42 + vllm/compilation/monitor.py | 36 + vllm/compilation/multi_output_match.py | 105 ++ vllm/compilation/reshapes.py | 3 +- vllm/compilation/vllm_inductor_pass.py | 4 - vllm/compilation/wrapper.py | 4 +- vllm/config.py | 1138 +++++++++++++---- vllm/core/block/block_table.py | 46 +- vllm/core/block/common.py | 19 +- vllm/core/block/cpu_gpu_block_allocator.py | 43 +- vllm/core/block/interfaces.py | 32 +- vllm/core/block/naive_block.py | 10 +- vllm/core/block/prefix_caching_block.py | 55 +- vllm/core/block_manager.py | 8 +- vllm/core/evictor.py | 63 +- vllm/core/placeholder_block_space_manager.py | 2 +- vllm/core/scheduler.py | 17 +- .../device_communicators/pynccl.py | 19 + .../device_communicators/pynccl_wrapper.py | 16 + .../device_communicators/shm_broadcast.py | 113 +- vllm/distributed/kv_transfer/README.md | 30 + vllm/distributed/kv_transfer/__init__.py | 0 .../kv_transfer/disagg_prefill_workflow.jpg | Bin 0 -> 142656 bytes .../kv_transfer/kv_connector/__init__.py | 0 .../kv_transfer/kv_connector/base.py | 122 ++ .../kv_transfer/kv_connector/factory.py | 20 + .../kv_connector/simple_connector.py | 312 +++++ .../kv_transfer/kv_lookup_buffer/__init__.py | 0 .../kv_transfer/kv_lookup_buffer/base.py | 108 ++ .../kv_lookup_buffer/simple_buffer.py | 242 ++++ .../kv_transfer/kv_pipe/__init__.py | 0 vllm/distributed/kv_transfer/kv_pipe/base.py | 65 + .../kv_transfer/kv_pipe/mooncake_pipe.py | 272 ++++ .../kv_transfer/kv_pipe/pynccl_pipe.py | 276 ++++ .../kv_transfer/kv_transfer_agent.py | 75 ++ vllm/distributed/parallel_state.py | 99 +- vllm/engine/arg_utils.py | 159 ++- vllm/engine/async_llm_engine.py | 103 +- vllm/engine/llm_engine.py | 128 +- vllm/engine/metrics.py | 33 +- vllm/engine/metrics_types.py | 3 +- vllm/engine/multiprocessing/__init__.py | 11 +- vllm/engine/multiprocessing/client.py | 47 +- vllm/engine/output_processor/multi_step.py | 2 +- vllm/engine/protocol.py | 7 +- vllm/entrypoints/api_server.py | 11 +- vllm/entrypoints/chat_utils.py | 67 +- vllm/entrypoints/llm.py | 285 +++-- vllm/entrypoints/openai/api_server.py | 48 +- vllm/entrypoints/openai/logits_processors.py | 4 +- vllm/entrypoints/openai/protocol.py | 90 +- vllm/entrypoints/openai/run_batch.py | 4 +- vllm/entrypoints/openai/serving_chat.py | 41 +- vllm/entrypoints/openai/serving_completion.py | 20 +- vllm/entrypoints/openai/serving_embedding.py | 39 +- vllm/entrypoints/openai/serving_engine.py | 103 +- vllm/entrypoints/openai/serving_score.py | 39 +- .../openai/serving_tokenization.py | 24 +- .../tool_parsers/granite_tool_parser.py | 10 +- .../openai/tool_parsers/hermes_tool_parser.py | 51 +- .../tool_parsers/mistral_tool_parser.py | 23 +- vllm/entrypoints/utils.py | 57 + vllm/envs.py | 27 +- vllm/executor/cpu_executor.py | 8 +- vllm/executor/multiproc_gpu_executor.py | 50 +- vllm/executor/multiproc_worker_utils.py | 50 + vllm/executor/neuron_executor.py | 6 +- vllm/executor/openvino_executor.py | 3 +- vllm/executor/ray_gpu_executor.py | 51 +- vllm/executor/ray_hpu_executor.py | 35 +- vllm/executor/ray_tpu_executor.py | 27 +- vllm/executor/ray_utils.py | 10 +- vllm/executor/ray_xpu_executor.py | 17 +- vllm/forward_context.py | 55 +- vllm/inputs/__init__.py | 31 - vllm/inputs/data.py | 75 +- vllm/inputs/registry.py | 130 +- vllm/lora/fully_sharded_layers.py | 199 ++- vllm/lora/layers.py | 875 ++++--------- vllm/lora/lora.py | 18 + vllm/lora/models.py | 58 +- vllm/lora/peft_helper.py | 70 + vllm/lora/punica.py | 611 --------- vllm/lora/punica_wrapper/__init__.py | 7 + vllm/lora/punica_wrapper/punica_base.py | 482 +++++++ vllm/lora/punica_wrapper/punica_gpu.py | 358 ++++++ vllm/lora/punica_wrapper/punica_hpu.py | 87 ++ vllm/lora/punica_wrapper/punica_selector.py | 19 + vllm/lora/punica_wrapper/utils.py | 159 +++ .../guided_decoding/__init__.py | 115 +- .../guided_decoding/outlines_decoding.py | 11 +- .../outlines_logits_processors.py | 4 +- .../guided_decoding/xgrammar_decoding.py | 278 ++++ .../guided_decoding/xgrammar_utils.py | 158 +++ vllm/model_executor/layers/fused_moe/layer.py | 10 +- vllm/model_executor/layers/layernorm.py | 11 +- vllm/model_executor/layers/linear.py | 24 +- .../model_executor/layers/logits_processor.py | 5 +- .../layers/mamba/mamba_mixer.py | 26 +- vllm/model_executor/layers/pooler.py | 290 +++-- .../layers/quantization/bitsandbytes.py | 8 +- .../layers/quantization/gguf.py | 69 +- vllm/model_executor/model_loader/loader.py | 92 +- vllm/model_executor/model_loader/utils.py | 11 +- vllm/model_executor/models/__init__.py | 11 +- vllm/model_executor/models/adapters.py | 98 ++ vllm/model_executor/models/aria.py | 5 +- vllm/model_executor/models/bert.py | 2 + vllm/model_executor/models/blip.py | 45 +- vllm/model_executor/models/blip2.py | 5 +- vllm/model_executor/models/clip.py | 46 +- vllm/model_executor/models/commandr.py | 19 +- vllm/model_executor/models/exaone.py | 3 +- vllm/model_executor/models/gemma2.py | 56 +- vllm/model_executor/models/glm.py | 21 + .../models/glm4_vision_encoder.py | 22 +- vllm/model_executor/models/granite.py | 5 +- vllm/model_executor/models/gritlm.py | 248 ++++ .../models/idefics2_vision_model.py | 25 +- vllm/model_executor/models/idefics3.py | 113 +- vllm/model_executor/models/interfaces.py | 46 +- vllm/model_executor/models/interfaces_base.py | 15 +- vllm/model_executor/models/intern_vit.py | 28 +- vllm/model_executor/models/internlm2.py | 22 +- vllm/model_executor/models/internvl.py | 95 +- vllm/model_executor/models/jamba.py | 113 +- vllm/model_executor/models/llama.py | 117 +- vllm/model_executor/models/llava.py | 318 ++--- vllm/model_executor/models/llava_next.py | 26 +- .../model_executor/models/llava_next_video.py | 5 +- vllm/model_executor/models/llava_onevision.py | 5 +- vllm/model_executor/models/mamba.py | 92 +- vllm/model_executor/models/minicpm.py | 163 +-- vllm/model_executor/models/minicpm3.py | 11 +- vllm/model_executor/models/minicpmv.py | 136 +- vllm/model_executor/models/molmo.py | 319 +++-- vllm/model_executor/models/nemotron.py | 4 +- vllm/model_executor/models/paligemma.py | 16 +- vllm/model_executor/models/phi3v.py | 357 ++---- vllm/model_executor/models/pixtral.py | 189 ++- vllm/model_executor/models/qwen2.py | 47 +- vllm/model_executor/models/qwen2_audio.py | 117 +- vllm/model_executor/models/qwen2_vl.py | 197 ++- vllm/model_executor/models/registry.py | 85 +- vllm/model_executor/models/siglip.py | 45 +- vllm/model_executor/models/solar.py | 3 +- vllm/model_executor/models/starcoder2.py | 7 +- vllm/model_executor/models/transformers.py | 225 ++++ vllm/model_executor/models/ultravox.py | 247 ++-- vllm/model_executor/models/utils.py | 87 +- vllm/model_executor/models/xverse.py | 423 ------ vllm/model_executor/sampling_metadata.py | 1 + vllm/multimodal/__init__.py | 15 - vllm/multimodal/base.py | 76 +- vllm/multimodal/inputs.py | 6 +- vllm/multimodal/processing.py | 711 +++++----- vllm/multimodal/registry.py | 62 +- vllm/multimodal/utils.py | 10 +- vllm/outputs.py | 213 ++- vllm/platforms/__init__.py | 4 +- vllm/platforms/cpu.py | 17 +- vllm/platforms/cuda.py | 78 +- vllm/platforms/hpu.py | 22 +- vllm/platforms/interface.py | 67 + vllm/platforms/neuron.py | 23 +- vllm/platforms/openvino.py | 20 +- vllm/platforms/rocm.py | 33 +- vllm/platforms/tpu.py | 13 +- vllm/platforms/xpu.py | 16 +- vllm/plugins/__init__.py | 20 +- vllm/profiler/layerwise_profile.py | 22 +- vllm/sampling_params.py | 5 +- vllm/sequence.py | 57 +- vllm/spec_decode/multi_step_worker.py | 9 +- vllm/spec_decode/spec_decode_worker.py | 6 +- vllm/transformers_utils/config.py | 63 +- .../tokenizer_group/__init__.py | 9 +- .../tokenizer_group/tokenizer_group.py | 3 +- vllm/utils.py | 309 +++-- vllm/v1/attention/backends/flash_attn.py | 160 +-- vllm/v1/core/kv_cache_manager.py | 125 +- vllm/v1/core/kv_cache_utils.py | 134 +- vllm/v1/core/scheduler.py | 39 +- vllm/v1/engine/__init__.py | 29 +- vllm/v1/engine/async_llm.py | 54 +- vllm/v1/engine/async_stream.py | 8 +- vllm/v1/engine/core.py | 161 +-- vllm/v1/engine/core_client.py | 88 +- vllm/v1/engine/detokenizer.py | 4 +- vllm/v1/engine/llm_engine.py | 64 +- vllm/v1/engine/mm_input_mapper.py | 161 ++- vllm/v1/engine/processor.py | 40 +- vllm/v1/executor/abstract.py | 40 + vllm/v1/executor/multiproc_executor.py | 387 ++++++ .../{gpu_executor.py => uniproc_executor.py} | 15 +- vllm/v1/outputs.py | 6 +- vllm/v1/request.py | 36 +- vllm/v1/sample/sampler.py | 3 +- vllm/v1/utils.py | 82 +- vllm/v1/worker/gpu_input_batch.py | 295 +++++ vllm/v1/worker/gpu_model_runner.py | 626 ++++----- vllm/v1/worker/gpu_worker.py | 19 +- vllm/worker/cache_engine.py | 12 +- ..._runner.py => cpu_pooling_model_runner.py} | 4 +- vllm/worker/cpu_worker.py | 8 +- vllm/worker/enc_dec_model_runner.py | 6 +- vllm/worker/hpu_model_runner.py | 21 +- vllm/worker/hpu_worker.py | 4 +- vllm/worker/model_runner.py | 191 +-- vllm/worker/multi_step_model_runner.py | 9 +- vllm/worker/openvino_worker.py | 6 +- ...odel_runner.py => pooling_model_runner.py} | 6 +- vllm/worker/utils.py | 2 +- vllm/worker/worker.py | 89 +- vllm/worker/worker_base.py | 3 +- vllm/worker/xpu_model_runner.py | 4 - 451 files changed, 23857 insertions(+), 9018 deletions(-) create mode 100644 .buildkite/run-gh200-test.sh create mode 100644 .github/workflows/lint-and-deploy.yaml create mode 100644 benchmarks/benchmark_guided.py create mode 100644 benchmarks/benchmark_serving_guided.py create mode 100644 benchmarks/disagg_benchmarks/disagg_overhead_benchmark.sh create mode 100644 benchmarks/disagg_benchmarks/disagg_performance_benchmark.sh create mode 100644 benchmarks/disagg_benchmarks/disagg_prefill_proxy_server.py create mode 100644 benchmarks/disagg_benchmarks/round_robin_proxy.py create mode 100644 benchmarks/disagg_benchmarks/visualize_benchmark_results.py create mode 100644 benchmarks/fused_kernels/layernorm_rms_benchmarks.py create mode 100644 benchmarks/kernels/benchmark_rmsnorm.py create mode 100644 benchmarks/structured_schemas/structured_schema_1.json create mode 100644 csrc/quantization/fused_kernels/fused_layernorm_dynamic_per_token_quant.cu create mode 100644 csrc/quantization/fused_kernels/layernorm_utils.cuh create mode 100644 csrc/quantization/fused_kernels/quant_conversions.cuh create mode 100644 csrc/quantization/vectorization.cuh create mode 100644 docs/source/assets/usage/disagg_prefill/abstraction.jpg create mode 100644 docs/source/assets/usage/disagg_prefill/overview.jpg create mode 100644 docs/source/design/multiprocessing.md create mode 100644 docs/source/models/generative_models.rst create mode 100644 docs/source/models/pooling_models.rst create mode 100644 docs/source/serving/architecture_helm_deployment.png create mode 100644 docs/source/serving/deploying_with_helm.rst create mode 100644 docs/source/serving/deploying_with_kubeai.rst rename docs/source/{serving => usage}/compatibility_matrix.rst (96%) create mode 100644 docs/source/usage/disagg_prefill.rst rename docs/source/{models => usage}/engine_args.rst (100%) rename docs/source/{serving => usage}/env_vars.rst (100%) rename docs/source/{serving => usage}/faq.rst (76%) rename docs/source/{models => usage}/lora.rst (99%) rename docs/source/{models/vlm.rst => usage/multimodal_inputs.rst} (52%) rename docs/source/{models => usage}/performance.rst (100%) rename docs/source/{models => usage}/spec_decode.rst (97%) rename docs/source/{models => usage}/structured_outputs.rst (100%) create mode 100644 docs/source/usage/tool_calling.md rename docs/source/{serving => usage}/usage_stats.md (100%) create mode 100644 examples/chart-helm/.helmignore create mode 100644 examples/chart-helm/Chart.yaml create mode 100644 examples/chart-helm/ct.yaml create mode 100644 examples/chart-helm/lintconf.yaml create mode 100644 examples/chart-helm/templates/_helpers.tpl create mode 100644 examples/chart-helm/templates/configmap.yaml create mode 100644 examples/chart-helm/templates/custom-objects.yaml create mode 100644 examples/chart-helm/templates/deployment.yaml create mode 100644 examples/chart-helm/templates/hpa.yaml create mode 100644 examples/chart-helm/templates/job.yaml create mode 100644 examples/chart-helm/templates/poddisruptionbudget.yaml create mode 100644 examples/chart-helm/templates/pvc.yaml create mode 100644 examples/chart-helm/templates/secrets.yaml create mode 100644 examples/chart-helm/templates/service.yaml create mode 100644 examples/chart-helm/values.schema.json create mode 100644 examples/chart-helm/values.yaml create mode 100644 examples/disaggregated_prefill.sh create mode 100644 examples/offline_inference_classification.py create mode 100644 examples/offline_inference_scoring.py create mode 100644 requirements-cuda-arm64.txt create mode 100644 tests/entrypoints/llm/test_gpu_utilization.py create mode 100644 tests/entrypoints/openai/test_async_tokenization.py create mode 100644 tests/kernels/test_fused_quant_layernorm.py create mode 100644 tests/kv_transfer/disagg_test.py create mode 100644 tests/kv_transfer/module_test.py create mode 100644 tests/kv_transfer/test_lookup_buffer.py create mode 100644 tests/kv_transfer/test_lookup_buffer.sh create mode 100644 tests/kv_transfer/test_send_recv.py create mode 100644 tests/kv_transfer/test_send_recv.sh create mode 100644 tests/models/embedding/language/test_gritlm.py create mode 100644 tests/standalone_tests/lazy_torch_compile.py create mode 100644 tests/standalone_tests/python_only_compile.sh delete mode 100644 tests/test_lazy_torch_compile.py delete mode 100644 vllm/compilation/compile_context.py create mode 100644 vllm/compilation/fx_utils.py create mode 100644 vllm/compilation/monitor.py create mode 100644 vllm/compilation/multi_output_match.py create mode 100644 vllm/distributed/kv_transfer/README.md create mode 100644 vllm/distributed/kv_transfer/__init__.py create mode 100644 vllm/distributed/kv_transfer/disagg_prefill_workflow.jpg create mode 100644 vllm/distributed/kv_transfer/kv_connector/__init__.py create mode 100644 vllm/distributed/kv_transfer/kv_connector/base.py create mode 100644 vllm/distributed/kv_transfer/kv_connector/factory.py create mode 100644 vllm/distributed/kv_transfer/kv_connector/simple_connector.py create mode 100644 vllm/distributed/kv_transfer/kv_lookup_buffer/__init__.py create mode 100644 vllm/distributed/kv_transfer/kv_lookup_buffer/base.py create mode 100644 vllm/distributed/kv_transfer/kv_lookup_buffer/simple_buffer.py create mode 100644 vllm/distributed/kv_transfer/kv_pipe/__init__.py create mode 100644 vllm/distributed/kv_transfer/kv_pipe/base.py create mode 100644 vllm/distributed/kv_transfer/kv_pipe/mooncake_pipe.py create mode 100644 vllm/distributed/kv_transfer/kv_pipe/pynccl_pipe.py create mode 100644 vllm/distributed/kv_transfer/kv_transfer_agent.py create mode 100644 vllm/entrypoints/utils.py create mode 100644 vllm/lora/peft_helper.py delete mode 100644 vllm/lora/punica.py create mode 100644 vllm/lora/punica_wrapper/__init__.py create mode 100644 vllm/lora/punica_wrapper/punica_base.py create mode 100644 vllm/lora/punica_wrapper/punica_gpu.py create mode 100644 vllm/lora/punica_wrapper/punica_hpu.py create mode 100644 vllm/lora/punica_wrapper/punica_selector.py create mode 100644 vllm/lora/punica_wrapper/utils.py create mode 100644 vllm/model_executor/guided_decoding/xgrammar_decoding.py create mode 100644 vllm/model_executor/guided_decoding/xgrammar_utils.py create mode 100644 vllm/model_executor/models/adapters.py create mode 100644 vllm/model_executor/models/glm.py create mode 100644 vllm/model_executor/models/gritlm.py create mode 100644 vllm/model_executor/models/transformers.py delete mode 100644 vllm/model_executor/models/xverse.py create mode 100644 vllm/v1/executor/abstract.py create mode 100644 vllm/v1/executor/multiproc_executor.py rename vllm/v1/executor/{gpu_executor.py => uniproc_executor.py} (87%) create mode 100644 vllm/v1/worker/gpu_input_batch.py rename vllm/worker/{cpu_embedding_model_runner.py => cpu_pooling_model_runner.py} (98%) rename vllm/worker/{embedding_model_runner.py => pooling_model_runner.py} (98%) diff --git a/.buildkite/nightly-benchmarks/benchmark-pipeline.yaml b/.buildkite/nightly-benchmarks/benchmark-pipeline.yaml index 3db77d5f16022..64ba1b32fb074 100644 --- a/.buildkite/nightly-benchmarks/benchmark-pipeline.yaml +++ b/.buildkite/nightly-benchmarks/benchmark-pipeline.yaml @@ -21,7 +21,7 @@ steps: podSpec: priorityClassName: perf-benchmark containers: - - image: public.ecr.aws/q9t5s3a7/vllm-ci-test-repo:$BUILDKITE_COMMIT + - image: public.ecr.aws/q9t5s3a7/vllm-ci-postmerge-repo:$BUILDKITE_COMMIT command: - bash .buildkite/nightly-benchmarks/scripts/run-performance-benchmarks.sh resources: @@ -51,7 +51,7 @@ steps: queue: H200 plugins: - docker#v5.12.0: - image: public.ecr.aws/q9t5s3a7/vllm-ci-test-repo:$BUILDKITE_COMMIT + image: public.ecr.aws/q9t5s3a7/vllm-ci-postmerge-repo:$BUILDKITE_COMMIT command: - bash - .buildkite/nightly-benchmarks/scripts/run-performance-benchmarks.sh @@ -65,13 +65,18 @@ steps: - VLLM_USAGE_SOURCE - HF_TOKEN + - block: "Run H100 Benchmark" + key: block-h100 + depends_on: ~ + - label: "H100" # skip: "use this flag to conditionally skip the benchmark step, useful for PR testing" agents: queue: H100 + depends_on: block-h100 plugins: - docker#v5.12.0: - image: public.ecr.aws/q9t5s3a7/vllm-ci-test-repo:$BUILDKITE_COMMIT + image: public.ecr.aws/q9t5s3a7/vllm-ci-postmerge-repo:$BUILDKITE_COMMIT command: - bash - .buildkite/nightly-benchmarks/scripts/run-performance-benchmarks.sh diff --git a/.buildkite/nightly-benchmarks/scripts/wait-for-image.sh b/.buildkite/nightly-benchmarks/scripts/wait-for-image.sh index 19f7160e68a4d..aa0f7ade808e0 100644 --- a/.buildkite/nightly-benchmarks/scripts/wait-for-image.sh +++ b/.buildkite/nightly-benchmarks/scripts/wait-for-image.sh @@ -1,6 +1,6 @@ #!/bin/sh -TOKEN=$(curl -s -L "https://public.ecr.aws/token?service=public.ecr.aws&scope=repository:q9t5s3a7/vllm-ci-test-repo:pull" | jq -r .token) -URL="https://public.ecr.aws/v2/q9t5s3a7/vllm-ci-test-repo/manifests/$BUILDKITE_COMMIT" +TOKEN=$(curl -s -L "https://public.ecr.aws/token?service=public.ecr.aws&scope=repository:q9t5s3a7/vllm-ci-postmerge-repo:pull" | jq -r .token) +URL="https://public.ecr.aws/v2/q9t5s3a7/vllm-ci-postmerge-repo/manifests/$BUILDKITE_COMMIT" TIMEOUT_SECONDS=10 diff --git a/.buildkite/release-pipeline.yaml b/.buildkite/release-pipeline.yaml index f78e360b7afd3..2de6fceb0c3fe 100644 --- a/.buildkite/release-pipeline.yaml +++ b/.buildkite/release-pipeline.yaml @@ -1,7 +1,7 @@ steps: - label: "Build wheel - CUDA 12.1" agents: - queue: cpu_queue + queue: cpu_queue_postmerge commands: - "DOCKER_BUILDKIT=1 docker build --build-arg max_jobs=16 --build-arg USE_SCCACHE=1 --build-arg GIT_REPO_CHECK=1 --build-arg CUDA_VERSION=12.1.0 --tag vllm-ci:build-image --target build --progress plain ." - "mkdir artifacts" @@ -18,7 +18,7 @@ steps: - label: "Build wheel - CUDA 11.8" # depends_on: block-build-cu118-wheel agents: - queue: cpu_queue + queue: cpu_queue_postmerge commands: - "DOCKER_BUILDKIT=1 docker build --build-arg max_jobs=16 --build-arg USE_SCCACHE=1 --build-arg GIT_REPO_CHECK=1 --build-arg CUDA_VERSION=11.8.0 --tag vllm-ci:build-image --target build --progress plain ." - "mkdir artifacts" @@ -26,3 +26,32 @@ steps: - "bash .buildkite/upload-wheels.sh" env: DOCKER_BUILDKIT: "1" + + - block: "Build release image" + depends_on: ~ + key: block-release-image-build + + - label: "Build release image" + depends_on: block-release-image-build + agents: + queue: cpu_queue_postmerge + commands: + - "aws ecr-public get-login-password --region us-east-1 | docker login --username AWS --password-stdin public.ecr.aws/q9t5s3a7" + - "DOCKER_BUILDKIT=1 docker build --build-arg max_jobs=16 --build-arg USE_SCCACHE=1 --build-arg GIT_REPO_CHECK=1 --build-arg CUDA_VERSION=12.1.0 --tag public.ecr.aws/q9t5s3a7/vllm-release-repo:$BUILDKITE_COMMIT --target vllm-openai --progress plain ." + - "docker push public.ecr.aws/q9t5s3a7/vllm-release-repo:$BUILDKITE_COMMIT" + + - label: "Build and publish TPU release image" + depends_on: ~ + if: build.env("NIGHTLY") == "1" + agents: + queue: tpu_queue_postmerge + commands: + - "DOCKER_BUILDKIT=1 docker build --build-arg max_jobs=16 --build-arg USE_SCCACHE=1 --build-arg GIT_REPO_CHECK=1 --tag vllm/vllm-tpu:nightly --tag vllm/vllm-tpu:$BUILDKITE_COMMIT --progress plain -f Dockerfile.tpu ." + - "docker push vllm/vllm-tpu:nightly" + - "docker push vllm/vllm-tpu:$BUILDKITE_COMMIT" + plugins: + - docker-login#v3.0.0: + username: vllm + password-env: DOCKERHUB_TOKEN + env: + DOCKER_BUILDKIT: "1" diff --git a/.buildkite/run-gh200-test.sh b/.buildkite/run-gh200-test.sh new file mode 100644 index 0000000000000..d06604f96f2b8 --- /dev/null +++ b/.buildkite/run-gh200-test.sh @@ -0,0 +1,25 @@ +#!/bin/bash + +# This script build the GH200 docker image and run the offline inference inside the container. +# It serves a sanity check for compilation and basic model usage. +set -ex + +# Try building the docker image +DOCKER_BUILDKIT=1 docker build . \ + --target vllm-openai \ + --platform "linux/arm64" \ + -t gh200-test \ + --build-arg max_jobs=66 \ + --build-arg nvcc_threads=2 \ + --build-arg torch_cuda_arch_list="9.0+PTX" \ + --build-arg vllm_fa_cmake_gpu_arches="90-real" + +# Setup cleanup +remove_docker_container() { docker rm -f gh200-test || true; } +trap remove_docker_container EXIT +remove_docker_container + +# Run the image and test offline inference +docker run --name gh200-test --gpus=all --entrypoint="" gh200-test bash -c ' + python3 examples/offline_inference.py +' diff --git a/.buildkite/run-xpu-test.sh b/.buildkite/run-xpu-test.sh index faeac8e2ded36..e0a12afbe7320 100644 --- a/.buildkite/run-xpu-test.sh +++ b/.buildkite/run-xpu-test.sh @@ -12,5 +12,8 @@ remove_docker_container() { docker rm -f xpu-test || true; } trap remove_docker_container EXIT remove_docker_container -# Run the image and launch offline inference -docker run --network host --name xpu-test --device /dev/dri -v /dev/dri/by-path:/dev/dri/by-path --entrypoint="" xpu-test python3 examples/offline_inference.py +# Run the image and test offline inference/tensor parallel +docker run --name xpu-test --device /dev/dri -v /dev/dri/by-path:/dev/dri/by-path --entrypoint="" xpu-test sh -c ' + python3 examples/offline_inference.py + python3 examples/offline_inference_cli.py -tp 2 +' diff --git a/.buildkite/test-pipeline.yaml b/.buildkite/test-pipeline.yaml index fc23c9cff0d87..44f47fac1c1b3 100644 --- a/.buildkite/test-pipeline.yaml +++ b/.buildkite/test-pipeline.yaml @@ -50,9 +50,9 @@ steps: - tests/multimodal - tests/test_utils - tests/worker - - tests/test_lazy_torch_compile.py + - tests/standalone_tests/lazy_torch_compile.py commands: - - python3 test_lazy_torch_compile.py + - python3 standalone_tests/lazy_torch_compile.py - pytest -v -s mq_llm_engine # MQLLMEngine - pytest -v -s async_engine # AsyncLLMEngine - NUM_SCHEDULER_STEPS=4 pytest -v -s async_engine/test_async_llm_engine.py @@ -61,6 +61,13 @@ steps: - pytest -v -s test_utils.py # Utils - pytest -v -s worker # Worker +- label: Python-only Installation Test + source_file_dependencies: + - tests/standalone_tests/python_only_compile.sh + - setup.py + commands: + - bash standalone_tests/python_only_compile.sh + - label: Basic Correctness Test # 30min #mirror_hardwares: [amd] fast_check: true @@ -174,14 +181,14 @@ steps: commands: - VLLM_USE_V1=1 pytest -v -s v1 -- label: Examples Test # 15min +- label: Examples Test # 25min working_dir: "/vllm-workspace/examples" #mirror_hardwares: [amd] source_file_dependencies: - vllm/entrypoints - examples/ commands: - - pip install awscli tensorizer # for llava example and tensorizer test + - pip install tensorizer # for tensorizer test - python3 offline_inference.py - python3 cpu_offload.py - python3 offline_inference_chat.py @@ -191,10 +198,13 @@ steps: - python3 offline_inference_vision_language_multi_image.py - python3 tensorize_vllm_model.py --model facebook/opt-125m serialize --serialized-directory /tmp/ --suffix v1 && python3 tensorize_vllm_model.py --model facebook/opt-125m deserialize --path-to-tensors /tmp/vllm/facebook/opt-125m/v1/model.tensors - python3 offline_inference_encoder_decoder.py - - python3 offline_profile.py --model facebook/opt-125m + - python3 offline_inference_classification.py + - python3 offline_inference_embedding.py + - python3 offline_inference_scoring.py + - python3 offline_profile.py --model facebook/opt-125m run_num_steps --num-steps 2 - label: Prefix Caching Test # 9min - #mirror_hardwares: [amd] + mirror_hardwares: [amd] source_file_dependencies: - vllm/ - tests/prefix_caching @@ -230,7 +240,7 @@ steps: source_file_dependencies: - vllm/lora - tests/lora - command: pytest -v -s lora --shard-id=$$BUILDKITE_PARALLEL_JOB --num-shards=$$BUILDKITE_PARALLEL_JOB_COUNT --ignore lora/test_long_context.py lora/test_chatglm3_tp.py lora/test_llama_tp.py + command: pytest -v -s lora --shard-id=$$BUILDKITE_PARALLEL_JOB --num-shards=$$BUILDKITE_PARALLEL_JOB_COUNT --ignore=lora/test_long_context.py --ignore=lora/test_chatglm3_tp.py --ignore=lora/test_llama_tp.py parallelism: 4 - label: "PyTorch Fullgraph Smoke Test" # 9min @@ -314,7 +324,7 @@ steps: ##### models test ##### -- label: Basic Models Test # 30min +- label: Basic Models Test # 24min source_file_dependencies: - vllm/ - tests/models @@ -324,7 +334,7 @@ steps: - pytest -v -s models/test_registry.py - pytest -v -s models/test_initialization.py -- label: Language Models Test (Standard) # 42min +- label: Language Models Test (Standard) # 32min #mirror_hardwares: [amd] source_file_dependencies: - vllm/ @@ -334,9 +344,8 @@ steps: commands: - pytest -v -s models/decoder_only/language -m 'core_model or quant_model' - pytest -v -s models/embedding/language -m core_model - - pytest -v -s models/embedding/vision_language -m core_model -- label: Language Models Test (Extended) # 50min +- label: Language Models Test (Extended) # 1h10min optional: true source_file_dependencies: - vllm/ @@ -346,9 +355,8 @@ steps: commands: - pytest -v -s models/decoder_only/language -m 'not core_model and not quant_model' - pytest -v -s models/embedding/language -m 'not core_model' - - pytest -v -s models/embedding/vision_language -m 'not core_model' -- label: Multi-Modal Models Test (Standard) # 26min +- label: Multi-Modal Models Test (Standard) # 28min #mirror_hardwares: [amd] source_file_dependencies: - vllm/ @@ -357,12 +365,14 @@ steps: - tests/models/embedding/vision_language - tests/models/encoder_decoder/vision_language commands: + - pip install git+https://github.com/TIGER-AI-Lab/Mantis.git - pytest -v -s models/decoder_only/audio_language -m 'core_model or quant_model' - pytest -v -s --ignore models/decoder_only/vision_language/test_phi3v.py models/decoder_only/vision_language -m 'core_model or quant_model' + - pytest -v -s models/embedding/vision_language -m core_model - pytest -v -s models/encoder_decoder/language -m core_model - pytest -v -s models/encoder_decoder/vision_language -m core_model -- label: Multi-Modal Models Test (Extended) # 1h15m +- label: Multi-Modal Models Test (Extended) 1 # 1h16m optional: true source_file_dependencies: - vllm/ @@ -371,14 +381,26 @@ steps: - tests/models/embedding/vision_language - tests/models/encoder_decoder/vision_language commands: + - pip install git+https://github.com/TIGER-AI-Lab/Mantis.git - pytest -v -s models/decoder_only/audio_language -m 'not core_model and not quant_model' + - pytest -v -s models/decoder_only/vision_language/test_models.py -m 'split(group=0) and not core_model and not quant_model' # HACK - run phi3v tests separately to sidestep this transformers bug # https://github.com/huggingface/transformers/issues/34307 - pytest -v -s models/decoder_only/vision_language/test_phi3v.py - - pytest -v -s --ignore models/decoder_only/vision_language/test_phi3v.py models/decoder_only/vision_language -m 'not core_model and not quant_model' + - pytest -v -s --ignore models/decoder_only/vision_language/test_models.py --ignore models/decoder_only/vision_language/test_phi3v.py models/decoder_only/vision_language -m 'not core_model and not quant_model' + - pytest -v -s models/embedding/vision_language -m 'not core_model' - pytest -v -s models/encoder_decoder/language -m 'not core_model' - pytest -v -s models/encoder_decoder/vision_language -m 'not core_model' +- label: Multi-Modal Models Test (Extended) 2 # 38m + optional: true + source_file_dependencies: + - vllm/ + - tests/models/decoder_only/vision_language + commands: + - pip install git+https://github.com/TIGER-AI-Lab/Mantis.git + - pytest -v -s models/decoder_only/vision_language/test_models.py -m 'split(group=1) and not core_model and not quant_model' + # This test is used only in PR development phase to test individual models and should never run on main - label: Custom Models Test optional: true @@ -413,11 +435,11 @@ steps: - tests/distributed/ commands: - # the following commands are for the first node, with ip 192.168.10.10 (ray environment already set up) - - VLLM_TEST_SAME_HOST=0 torchrun --nnodes 2 --nproc-per-node=2 --rdzv_backend=c10d --rdzv_endpoint=192.168.10.10 distributed/test_same_node.py | grep -q 'Same node test passed' + - VLLM_TEST_SAME_HOST=0 torchrun --nnodes 2 --nproc-per-node=2 --rdzv_backend=c10d --rdzv_endpoint=192.168.10.10 distributed/test_same_node.py | grep 'Same node test passed' - VLLM_MULTI_NODE=1 pytest -v -s distributed/test_multi_node_assignment.py - VLLM_MULTI_NODE=1 pytest -v -s distributed/test_pipeline_parallel.py - # the following commands are for the second node, with ip 192.168.10.11 (ray environment already set up) - - VLLM_TEST_SAME_HOST=0 torchrun --nnodes 2 --nproc-per-node=2 --rdzv_backend=c10d --rdzv_endpoint=192.168.10.10 distributed/test_same_node.py | grep -q 'Same node test passed' + - VLLM_TEST_SAME_HOST=0 torchrun --nnodes 2 --nproc-per-node=2 --rdzv_backend=c10d --rdzv_endpoint=192.168.10.10 distributed/test_same_node.py | grep 'Same node test passed' - label: Distributed Tests (2 GPUs) # 40min #mirror_hardwares: [amd] @@ -430,19 +452,23 @@ steps: - vllm/model_executor/models/ - tests/distributed/ - vllm/compilation + - vllm/worker/worker_base.py + - vllm/worker/worker.py + - vllm/worker/model_runner.py commands: - pytest -v -s ./compile/test_basic_correctness.py - pytest -v -s ./compile/test_wrapper.py - - VLLM_TEST_SAME_HOST=1 torchrun --nproc-per-node=4 distributed/test_same_node.py | grep -q 'Same node test passed' - - TARGET_TEST_SUITE=L4 pytest basic_correctness/ -v -s -m distributed_2_gpus + - VLLM_TEST_SAME_HOST=1 torchrun --nproc-per-node=4 distributed/test_same_node.py | grep 'Same node test passed' + - TARGET_TEST_SUITE=L4 pytest basic_correctness/ -v -s -m 'distributed(num_gpus=2)' # Avoid importing model tests that cause CUDA reinitialization error - - pytest models/encoder_decoder/language/test_bart.py -v -s -m distributed_2_gpus - - pytest models/encoder_decoder/vision_language/test_broadcast.py -v -s -m distributed_2_gpus - - pytest models/decoder_only/vision_language/test_models.py -v -s -m distributed_2_gpus + - pytest models/encoder_decoder/language/test_bart.py -v -s -m 'distributed(num_gpus=2)' + - pytest models/encoder_decoder/vision_language/test_broadcast.py -v -s -m 'distributed(num_gpus=2)' + - pytest models/decoder_only/vision_language/test_models.py -v -s -m 'distributed(num_gpus=2)' - pytest -v -s spec_decode/e2e/test_integration_dist_tp2.py - pip install -e ./plugins/vllm_add_dummy_model - pytest -v -s distributed/test_distributed_oot.py - CUDA_VISIBLE_DEVICES=0,1 pytest -v -s test_sharded_state_loader.py + - CUDA_VISIBLE_DEVICES=0,1 pytest -v -s kv_transfer/disagg_test.py - label: Multi-step Tests (4 GPUs) # 36min working_dir: "/vllm-workspace/tests" @@ -477,7 +503,6 @@ steps: - label: LoRA TP Test (Distributed) num_gpus: 4 - soft_fail: true source_file_dependencies: - vllm/lora - tests/lora @@ -528,7 +553,7 @@ steps: # see https://github.com/vllm-project/vllm/pull/5689 for details - pytest -v -s distributed/test_custom_all_reduce.py - torchrun --nproc_per_node=2 distributed/test_ca_buffer_sharing.py - - TARGET_TEST_SUITE=A100 pytest basic_correctness/ -v -s -m distributed_2_gpus + - TARGET_TEST_SUITE=A100 pytest basic_correctness/ -v -s -m 'distributed(num_gpus=2)' - pytest -v -s -x lora/test_mixtral.py - label: LM Eval Large Models # optional diff --git a/.github/workflows/lint-and-deploy.yaml b/.github/workflows/lint-and-deploy.yaml new file mode 100644 index 0000000000000..ab6f6e5d2060d --- /dev/null +++ b/.github/workflows/lint-and-deploy.yaml @@ -0,0 +1,81 @@ +name: Lint and Deploy Charts + +on: pull_request + +jobs: + lint-and-deploy: + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + with: + fetch-depth: 0 + + - name: Set up Helm + uses: azure/setup-helm@fe7b79cd5ee1e45176fcad797de68ecaf3ca4814 # v4.2.0 + with: + version: v3.14.4 + + #Python is required because ct lint runs Yamale and yamllint which require Python. + - uses: actions/setup-python@0b93645e9fea7318ecaed2b359559ac225c90a2b # v5.3.0 + with: + python-version: '3.13' + + - name: Set up chart-testing + uses: helm/chart-testing-action@e6669bcd63d7cb57cb4380c33043eebe5d111992 # v2.6.1 + with: + version: v3.10.1 + + - name: Run chart-testing (lint) + run: ct lint --target-branch ${{ github.event.repository.default_branch }} --chart-dirs examples/chart-helm --charts examples/chart-helm + + - name: Setup minio + run: | + docker network create vllm-net + docker run -d -p 9000:9000 --name minio --net vllm-net \ + -e "MINIO_ACCESS_KEY=minioadmin" \ + -e "MINIO_SECRET_KEY=minioadmin" \ + -v /tmp/data:/data \ + -v /tmp/config:/root/.minio \ + minio/minio server /data + export AWS_ACCESS_KEY_ID=minioadmin + export AWS_SECRET_ACCESS_KEY=minioadmin + export AWS_EC2_METADATA_DISABLED=true + mkdir opt-125m + cd opt-125m && curl -O -Ls "https://huggingface.co/facebook/opt-125m/resolve/main/{pytorch_model.bin,config.json,generation_config.json,merges.txt,special_tokens_map.json,tokenizer_config.json,vocab.json}" && cd .. + aws --endpoint-url http://127.0.0.1:9000/ s3 mb s3://testbucket + aws --endpoint-url http://127.0.0.1:9000/ s3 cp opt-125m/ s3://testbucket/opt-125m --recursive + + - name: Create kind cluster + uses: helm/kind-action@0025e74a8c7512023d06dc019c617aa3cf561fde # v1.10.0 + + - name: Build the Docker image vllm cpu + run: docker buildx build -f Dockerfile.cpu -t vllm-cpu-env . + + - name: Configuration of docker images, network and namespace for the kind cluster + run: | + docker pull amazon/aws-cli:2.6.4 + kind load docker-image amazon/aws-cli:2.6.4 --name chart-testing + kind load docker-image vllm-cpu-env:latest --name chart-testing + docker network connect vllm-net "$(docker ps -aqf "name=chart-testing-control-plane")" + kubectl create ns ns-vllm + + - name: Run chart-testing (install) + run: | + export AWS_ACCESS_KEY_ID=minioadmin + export AWS_SECRET_ACCESS_KEY=minioadmin + helm install --wait --wait-for-jobs --timeout 5m0s --debug --create-namespace --namespace=ns-vllm test-vllm examples/chart-helm -f examples/chart-helm/values.yaml --set secrets.s3endpoint=http://minio:9000 --set secrets.s3bucketname=testbucket --set secrets.s3accesskeyid=$AWS_ACCESS_KEY_ID --set secrets.s3accesskey=$AWS_SECRET_ACCESS_KEY --set resources.requests.cpu=1 --set resources.requests.memory=4Gi --set resources.limits.cpu=2 --set resources.limits.memory=5Gi --set image.env[0].name=VLLM_CPU_KVCACHE_SPACE --set image.env[1].name=VLLM_LOGGING_LEVEL --set-string image.env[0].value="1" --set-string image.env[1].value="DEBUG" --set-string extraInit.s3modelpath="opt-125m/" --set-string 'resources.limits.nvidia\.com/gpu=0' --set-string 'resources.requests.nvidia\.com/gpu=0' --set-string image.repository="vllm-cpu-env" + + - name: curl test + run: | + kubectl -n ns-vllm port-forward service/test-vllm-service 8001:80 & + sleep 10 + CODE="$(curl -v -f --location http://localhost:8001/v1/completions \ + --header "Content-Type: application/json" \ + --data '{ + "model": "opt-125m", + "prompt": "San Francisco is a", + "max_tokens": 7, + "temperature": 0 + }'):$CODE" + echo "$CODE" \ No newline at end of file diff --git a/CMakeLists.txt b/CMakeLists.txt index 882d4412632a5..bf19b3d227171 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -196,6 +196,7 @@ set(VLLM_EXT_SRC "csrc/quantization/gptq/q_gemm.cu" "csrc/quantization/compressed_tensors/int8_quant_kernels.cu" "csrc/quantization/fp8/common.cu" + "csrc/quantization/fused_kernels/fused_layernorm_dynamic_per_token_quant.cu" "csrc/quantization/gguf/gguf_kernel.cu" "csrc/cuda_utils_kernels.cu" "csrc/prepare_inputs/advance_step.cu" @@ -300,7 +301,7 @@ if(VLLM_GPU_LANG STREQUAL "CUDA") # # For the cutlass_scaled_mm kernels we want to build the c2x (CUTLASS 2.x) # kernels for the remaining archs that are not already built for 3x. - cuda_archs_loose_intersection(SCALED_MM_2X_ARCHS + cuda_archs_loose_intersection(SCALED_MM_2X_ARCHS "7.5;8.0;8.6;8.7;8.9;9.0" "${CUDA_ARCHS}") # subtract out the archs that are already built for 3x list(REMOVE_ITEM SCALED_MM_2X_ARCHS ${SCALED_MM_3X_ARCHS}) @@ -522,7 +523,7 @@ else() FetchContent_Declare( vllm-flash-attn GIT_REPOSITORY https://github.com/vllm-project/flash-attention.git - GIT_TAG 5259c586c403a4e4d8bf69973c159b40cc346fb9 + GIT_TAG 04325b6798bcc326c86fb35af62d05a9c8c8eceb GIT_PROGRESS TRUE # Don't share the vllm-flash-attn build between build types BINARY_DIR ${CMAKE_BINARY_DIR}/vllm-flash-attn diff --git a/Dockerfile b/Dockerfile index 682f046d4b6ec..123703848749c 100644 --- a/Dockerfile +++ b/Dockerfile @@ -11,6 +11,7 @@ ARG CUDA_VERSION=12.4.1 FROM nvidia/cuda:${CUDA_VERSION}-devel-ubuntu20.04 AS base ARG CUDA_VERSION=12.4.1 ARG PYTHON_VERSION=3.12 +ARG TARGETPLATFORM ENV DEBIAN_FRONTEND=noninteractive # Install Python and other dependencies @@ -46,9 +47,14 @@ WORKDIR /workspace # install build and runtime dependencies COPY requirements-common.txt requirements-common.txt COPY requirements-cuda.txt requirements-cuda.txt +COPY requirements-cuda-arm64.txt requirements-cuda-arm64.txt RUN --mount=type=cache,target=/root/.cache/pip \ python3 -m pip install -r requirements-cuda.txt +RUN --mount=type=cache,target=/root/.cache/pip \ + if [ "$TARGETPLATFORM" = "linux/arm64" ]; then \ + python3 -m pip install -r requirements-cuda-arm64.txt; \ + fi # cuda arch list used by torch # can be useful for both `dev` and `test` @@ -63,6 +69,7 @@ ENV VLLM_FA_CMAKE_GPU_ARCHES=${vllm_fa_cmake_gpu_arches} #################### WHEEL BUILD IMAGE #################### FROM base AS build +ARG TARGETPLATFORM # install build dependencies COPY requirements-build.txt requirements-build.txt @@ -70,6 +77,11 @@ COPY requirements-build.txt requirements-build.txt RUN --mount=type=cache,target=/root/.cache/pip \ python3 -m pip install -r requirements-build.txt +RUN --mount=type=cache,target=/root/.cache/pip \ + if [ "$TARGETPLATFORM" = "linux/arm64" ]; then \ + python3 -m pip install -r requirements-cuda-arm64.txt; \ + fi + COPY . . ARG GIT_REPO_CHECK=0 RUN --mount=type=bind,source=.git,target=.git \ @@ -134,8 +146,8 @@ COPY requirements-test.txt requirements-test.txt COPY requirements-dev.txt requirements-dev.txt RUN --mount=type=cache,target=/root/.cache/pip \ python3 -m pip install -r requirements-dev.txt - #################### DEV IMAGE #################### + #################### vLLM installation IMAGE #################### # image with vLLM installed FROM nvidia/cuda:${CUDA_VERSION}-base-ubuntu22.04 AS vllm-base @@ -143,6 +155,9 @@ ARG CUDA_VERSION=12.4.1 ARG PYTHON_VERSION=3.12 WORKDIR /vllm-workspace ENV DEBIAN_FRONTEND=noninteractive +ARG TARGETPLATFORM + +COPY requirements-cuda-arm64.txt requirements-cuda-arm64.txt RUN PYTHON_VERSION_STR=$(echo ${PYTHON_VERSION} | sed 's/\.//g') && \ echo "export PYTHON_VERSION_STR=${PYTHON_VERSION_STR}" >> /etc/environment @@ -168,18 +183,25 @@ RUN echo 'tzdata tzdata/Areas select America' | debconf-set-selections \ # or future versions of triton. RUN ldconfig /usr/local/cuda-$(echo $CUDA_VERSION | cut -d. -f1,2)/compat/ -# install vllm wheel first, so that torch etc will be installed +# Install vllm wheel first, so that torch etc will be installed. RUN --mount=type=bind,from=build,src=/workspace/dist,target=/vllm-workspace/dist \ --mount=type=cache,target=/root/.cache/pip \ python3 -m pip install dist/*.whl --verbose RUN --mount=type=cache,target=/root/.cache/pip \ - . /etc/environment && \ - python3 -m pip install https://github.com/flashinfer-ai/flashinfer/releases/download/v0.1.6/flashinfer-0.1.6+cu121torch2.4-cp${PYTHON_VERSION_STR}-cp${PYTHON_VERSION_STR}-linux_x86_64.whl + if [ "$TARGETPLATFORM" = "linux/arm64" ]; then \ + pip uninstall -y torch && \ + python3 -m pip install -r requirements-cuda-arm64.txt; \ + fi + +RUN --mount=type=cache,target=/root/.cache/pip \ +. /etc/environment && \ +if [ "$TARGETPLATFORM" != "linux/arm64" ]; then \ + python3 -m pip install https://github.com/flashinfer-ai/flashinfer/releases/download/v0.1.6/flashinfer-0.1.6+cu121torch2.4-cp${PYTHON_VERSION_STR}-cp${PYTHON_VERSION_STR}-linux_x86_64.whl; \ +fi COPY examples examples #################### vLLM installation IMAGE #################### - #################### TEST IMAGE #################### # image to run unit testing suite # note that this uses vllm installed by `pip` @@ -209,7 +231,6 @@ COPY vllm/v1 /usr/local/lib/python3.12/dist-packages/vllm/v1 RUN mkdir test_docs RUN mv docs test_docs/ RUN mv vllm test_docs/ - #################### TEST IMAGE #################### #################### OPENAI API SERVER #################### @@ -218,8 +239,11 @@ FROM vllm-base AS vllm-openai # install additional dependencies for openai api server RUN --mount=type=cache,target=/root/.cache/pip \ - pip install accelerate hf_transfer 'modelscope!=1.15.0' 'bitsandbytes>=0.44.0' timm==0.9.10 - + if [ "$TARGETPLATFORM" = "linux/arm64" ]; then \ + pip install accelerate hf_transfer 'modelscope!=1.15.0' 'bitsandbytes>=0.42.0' 'timm==0.9.10'; \ + else \ + pip install accelerate hf_transfer 'modelscope!=1.15.0' 'bitsandbytes>=0.45.0' 'timm==0.9.10'; \ + fi ENV VLLM_USAGE_SOURCE production-docker-image ENTRYPOINT ["python3", "-m", "vllm.entrypoints.openai.api_server"] diff --git a/Dockerfile.neuron b/Dockerfile.neuron index 76dbd4c04d3f3..77162bc82de62 100644 --- a/Dockerfile.neuron +++ b/Dockerfile.neuron @@ -1,5 +1,6 @@ # default base image -ARG BASE_IMAGE="public.ecr.aws/neuron/pytorch-inference-neuronx:2.1.2-neuronx-py310-sdk2.20.0-ubuntu20.04" +# https://gallery.ecr.aws/neuron/pytorch-inference-neuronx +ARG BASE_IMAGE="public.ecr.aws/neuron/pytorch-inference-neuronx:2.1.2-neuronx-py310-sdk2.20.2-ubuntu20.04" FROM $BASE_IMAGE diff --git a/README.md b/README.md index cfeb24cbb5823..93b71ddaccc61 100644 --- a/README.md +++ b/README.md @@ -16,6 +16,7 @@ Easy, fast, and cheap LLM serving for everyone --- *Latest News* 🔥 +- [2024/12] vLLM joins [pytorch ecosystem](https://pytorch.org/blog/vllm-joins-pytorch)! Easy, Fast, and Cheap LLM Serving for Everyone! - [2024/11] We hosted [the seventh vLLM meetup](https://lu.ma/h0qvrajz) with Snowflake! Please find the meetup slides from vLLM team [here](https://docs.google.com/presentation/d/1e3CxQBV3JsfGp30SwyvS3eM_tW-ghOhJ9PAJGK6KR54/edit?usp=sharing), and Snowflake team [here](https://docs.google.com/presentation/d/1qF3RkDAbOULwz9WK5TOltt2fE9t6uIc_hVNLFAaQX6A/edit?usp=sharing). - [2024/10] We have just created a developer slack ([slack.vllm.ai](https://slack.vllm.ai)) focusing on coordinating contributions and discussing features. Please feel free to join us there! - [2024/10] Ray Summit 2024 held a special track for vLLM! Please find the opening talk slides from the vLLM team [here](https://docs.google.com/presentation/d/1B_KQxpHBTRa_mDF-tR6i8rWdOU5QoTZNcEg2MKZxEHM/edit?usp=sharing). Learn more from the [talks](https://www.youtube.com/playlist?list=PLzTswPQNepXl6AQwifuwUImLPFRVpksjR) from other vLLM contributors and users! @@ -133,3 +134,7 @@ If you use vLLM for your research, please cite our [paper](https://arxiv.org/abs * For coordinating contributions and development, please use Slack. * For security disclosures, please use Github's security advisory feature. * For collaborations and partnerships, please contact us at vllm-questions AT lists.berkeley.edu. + +## Media Kit + +* If you wish to use vLLM's logo, please refer to [our media kit repo](https://github.com/vllm-project/media-kit). diff --git a/benchmarks/backend_request_func.py b/benchmarks/backend_request_func.py index c3fed56e8a956..b67849038cf0d 100644 --- a/benchmarks/backend_request_func.py +++ b/benchmarks/backend_request_func.py @@ -24,6 +24,7 @@ class RequestFuncInput: model: str best_of: int = 1 logprobs: Optional[int] = None + extra_body: Optional[dict] = None multi_modal_content: Optional[dict] = None ignore_eos: bool = False @@ -36,6 +37,7 @@ class RequestFuncOutput: ttft: float = 0.0 # Time to first token itl: List[float] = field( default_factory=list) # List of inter-token latencies + tpot: float = 0.0 # avg next-token latencies prompt_len: int = 0 error: str = "" @@ -242,6 +244,8 @@ async def async_request_openai_completions( "stream": True, "ignore_eos": request_func_input.ignore_eos, } + if request_func_input.extra_body: + payload.update(request_func_input.extra_body) headers = { "Authorization": f"Bearer {os.environ.get('OPENAI_API_KEY')}" } @@ -336,6 +340,8 @@ async def async_request_openai_chat_completions( "stream": True, "ignore_eos": request_func_input.ignore_eos, } + if request_func_input.extra_body: + payload.update(request_func_input.extra_body) headers = { "Content-Type": "application/json", "Authorization": f"Bearer {os.environ.get('OPENAI_API_KEY')}", diff --git a/benchmarks/benchmark_guided.py b/benchmarks/benchmark_guided.py new file mode 100644 index 0000000000000..1a0e62598bfcb --- /dev/null +++ b/benchmarks/benchmark_guided.py @@ -0,0 +1,494 @@ +"""Benchmark guided decoding throughput.""" +import argparse +import dataclasses +import json +import os +import random +import time +from typing import List + +import datasets +import pandas as pd +import uvloop +from transformers import AutoTokenizer, PreTrainedTokenizerBase + +from vllm.engine.arg_utils import AsyncEngineArgs, EngineArgs +from vllm.entrypoints.openai.api_server import ( + build_async_engine_client_from_engine_args) +from vllm.sampling_params import GuidedDecodingParams +from vllm.utils import FlexibleArgumentParser, merge_async_iterators + + +@dataclasses.dataclass +class SampleRequest: + """A class representing a single inference request for benchmarking. + + Attributes: + prompt: The input text prompt for the model. + multi_modal_data: Optional dictionary containing multi-modal data (e.g. + images). + prompt_len: The length of the prompt in tokens. + expected_output_len: The expected length of the output in tokens. + """ + prompt: str + prompt_len: int + expected_output_len: int + schema: dict + structure_type: str = 'json' + completion: str = None + + +def run_vllm(requests: List[SampleRequest], + engine_args: EngineArgs, + n: int, + guided_decoding_rate: float = 1.0, + warmup: bool = False) -> float: + from vllm import LLM, SamplingParams + llm = LLM(**vars(engine_args)) + + # Add the requests to the engine. + prompts: List[str] = [] + sampling_params: List[SamplingParams] = [] + # create a list containing random selected true or false + guided_decoding_req_idx = random.sample( + range(len(requests)), int(len(requests) * guided_decoding_rate)) + + if warmup: + print(">>>>> Running warmup prompt, for the first 5") + # We setup the first 5 requests to warmup FSM + # if using xgrammar dataset, we will skip warmup + warmup_requests = requests[:5] + for i, request in enumerate(warmup_requests): + prompts.append(request.prompt) + sampling_params.append( + SamplingParams( + n=n, + temperature=1.0, + top_p=1.0, + ignore_eos=True, + max_tokens=request.expected_output_len, + guided_decoding=GuidedDecodingParams(json=request.schema) + if guided_decoding_rate > 0 else None, + )) + llm.generate(prompts, sampling_params, use_tqdm=False) + + print(">>>>> Benchmark started...") + prompts = [] + sampling_params = [] + for i, request in enumerate(requests): + prompts.append(request.prompt) + sampling_params.append( + SamplingParams( + n=n, + temperature=1.0, + top_p=1.0, + ignore_eos=True, + max_tokens=request.expected_output_len, + guided_decoding=GuidedDecodingParams( + **{request.structure_type: request.schema}) + if i in guided_decoding_req_idx else None, + )) + + start = time.perf_counter() + outputs = llm.generate(prompts, sampling_params, use_tqdm=False) + ret = [] + for output, request in zip(outputs, requests): + generated_text = output.outputs[0].text + ret.append({ + "generated": generated_text, + "expected": request.completion + }) + end = time.perf_counter() + return end - start, ret + + +async def run_vllm_async( + requests: List[SampleRequest], + engine_args: AsyncEngineArgs, + n: int, + guided_decoding_rate: float = 1.0, + warmup: bool = False, + disable_frontend_multiprocessing: bool = False) -> float: + from vllm import SamplingParams + + async with build_async_engine_client_from_engine_args( + engine_args, disable_frontend_multiprocessing) as llm: + + # Add the requests to the engine. + prompts: List[str] = [] + sampling_params: List[SamplingParams] = [] + guided_decoding_req_idx = random.sample( + range(len(requests)), int(len(requests) * guided_decoding_rate)) + + if warmup: + print(">>>>>> Running warmup prompt, for the first 5") + # We setup the first 5 requests to warmup FSM + # if using xgrammar dataset, we will skip warmup + warmup_requests = requests[:5] + for i, request in enumerate(warmup_requests): + prompts.append(request.prompt) + sampling_params.append( + SamplingParams( + n=n, + temperature=1.0, + top_p=1.0, + ignore_eos=True, + max_tokens=request.expected_output_len, + guided_decoding=GuidedDecodingParams( + json=request.schema) + if guided_decoding_rate > 0 else None, + )) + generators = [] + for i, (prompt, sp) in enumerate(zip(prompts, sampling_params)): + generator = llm.generate(prompt, sp, request_id=f"test{i}") + generators.append(generator) + all_gens = merge_async_iterators(*generators) + async for i, res in all_gens: + pass + + print(">>>>> Benchmark started...") + prompts = [] + sampling_params = [] + for i, request in enumerate(requests): + prompts.append(request.prompt) + sampling_params.append( + SamplingParams( + n=n, + temperature=1.0, + top_p=1.0, + ignore_eos=True, + max_tokens=request.expected_output_len, + guided_decoding=GuidedDecodingParams(json=request.schema) + if i in guided_decoding_req_idx else None, + )) + + generators = [] + start_time = [] + latencies = [] + start = time.perf_counter() + for i, (prompt, sp) in enumerate(zip(prompts, sampling_params)): + generator = llm.generate(prompt, sp, request_id=f"test{i}") + generators.append(generator) + start_time.append(time.perf_counter()) + latencies.append([]) + all_gens = merge_async_iterators(*generators) + generated_texts = [''] * len(requests) + async for i, res in all_gens: + generated_texts[i] = res.outputs[0].text + lat = time.perf_counter() - start_time[i] + latencies[i].append(lat) + ret = [{ + 'generated': gt, + 'expected': req.completion + } for gt, req in zip(generated_texts, requests)] + end = time.perf_counter() + first_latency = pd.Series([lat[0] * 1000 for lat in latencies]) + next_latency = pd.Series([(lat[-1] - lat[0]) / len(lat[1:]) * 1000 + for lat in latencies]) + return end - start, ret, (first_latency, next_latency) + + +def sample_requests(tokenizer: PreTrainedTokenizerBase, + args: argparse.Namespace) -> List[SampleRequest]: + if args.dataset == 'json': + if args.json_schema_path is None: + dir_path = os.path.dirname(os.path.realpath(__file__)) + args.json_schema_path = os.path.join(dir_path, + "structured_schemas", + "structured_schema_1.json") + with open(args.json_schema_path) as f: + schema = json.load(f) + prompt = f"Generate an example of a user profile given the following schema: {json.dumps(schema)}" # noqa: E501 + input_len = len(tokenizer(prompt).input_ids) + print(f"Input length of the prompt: {input_len} tokens") + requests = [ + SampleRequest(prompt=prompt, + prompt_len=input_len, + expected_output_len=args.output_len, + schema=schema, + structure_type=args.structure_type) + for _ in range(args.num_prompts) + ] + + elif args.dataset == "grammar": + schema = """ + ?start: select_statement + + ?select_statement: "SELECT " column_list " FROM " table_name + + ?column_list: column_name ("," column_name)* + + ?table_name: identifier + + ?column_name: identifier + + ?identifier: /[a-zA-Z_][a-zA-Z0-9_]*/ + """ + prompt = "Generate an SQL query to show the 'username' \ + and 'email' from the 'users' table." + + input_len = len(tokenizer(prompt).input_ids) + print(f"Input length of the prompt: {input_len} tokens") + requests = [ + SampleRequest(prompt=prompt, + prompt_len=input_len, + expected_output_len=args.output_len, + schema=schema, + structure_type=args.structure_type) + for _ in range(args.num_prompts) + ] + + elif args.dataset == "regex": + regex = r"\w+@\w+\.com\n" + args.regex = regex + prompt = "Generate an email address for Alan Turing, \ + who works in Enigma. End in .com and new line. \ + Example result: alan.turing@enigma.com\n" + + input_len = len(tokenizer(prompt).input_ids) + print(f"Input length of the prompt: {input_len} tokens") + requests = [ + SampleRequest(prompt=prompt, + prompt_len=input_len, + expected_output_len=args.output_len, + schema=regex, + structure_type=args.structure_type) + for _ in range(args.num_prompts) + ] + + elif args.dataset == "choice": + choice = ["Positive", "Negative"] + args.choice = choice + prompt = "Classify this sentiment: vLLM is wonderful!" + input_len = len(tokenizer(prompt).input_ids) + print(f"Input length of the prompt: {input_len} tokens") + requests = [ + SampleRequest(prompt=prompt, + prompt_len=input_len, + expected_output_len=args.output_len, + schema=choice, + structure_type=args.structure_type) + for _ in range(args.num_prompts) + ] + + elif args.dataset == "xgrammar_bench": + args.warmup = False + requests: List[SampleRequest] = [] + dataset = datasets.load_dataset("NousResearch/json-mode-eval", + split="train") + print(f"dataset has {len(dataset)} entries") + len_dataset = len(dataset) + for data_point_idx in range(args.num_prompts): + idx = data_point_idx + while idx >= len_dataset: + idx -= len_dataset + schema = dataset["schema"][idx] + prompt = tokenizer.apply_chat_template(dataset["prompt"][idx], + tokenize=False) + input_len = len(tokenizer(prompt).input_ids) + completion = dataset["completion"][idx] + + requests.append( + SampleRequest(prompt=prompt, + prompt_len=input_len, + expected_output_len=args.output_len, + schema=schema, + completion=completion)) + + return requests + + +def evaluate(ret, args): + + def _eval_correctness_json(expected, actual): + # extract json string from string using regex + import re + actual = actual.replace('\n', '').replace(' ', '').strip() + try: + actual = re.search(r'\{.*\}', actual).group() + actual = json.loads(actual) + except Exception: + return False + + return True + + def _eval_correctness_choice(expected, actual): + return actual in args.choice + + def _eval_correctness_regex(expected, actual): + import re + return re.match(args.regex, actual) is not None + + def _eval_correctness(expected, actual): + if args.structure_type == 'json': + return _eval_correctness_json(expected, actual) + elif args.structure_type == 'regex': + return _eval_correctness_regex(expected, actual) + elif args.structure_type == 'choice': + return _eval_correctness_choice(expected, actual) + else: + return None + + scores = [] + for res in ret: + score = _eval_correctness(res['expected'], res['generated']) + res['correctness'] = score + scores.append(score) + + not_none_scores = [score for score in scores if score is not None] + + return (sum(not_none_scores) / len(not_none_scores) * + 100) if len(not_none_scores) > 0 else None + + +def main(args: argparse.Namespace): + print(args) + random.seed(args.seed) + + # async engine is working for 'regex', 'choice' and 'grammar' + if args.dataset == 'grammar': + args.structure_type = 'grammar' + args.async_engine = False + elif args.dataset == 'regex': + args.structure_type = 'regex' + args.async_engine = False + elif args.dataset == 'choice': + args.structure_type = 'choice' + args.async_engine = False + else: + args.structure_type = 'json' + + if args.no_guided_decoding: + args.guided_decoding_ratio = 0 + if args.save_results: + result_file_name = f'{args.guided_decoding_ratio}guided' + result_file_name += f"_{args.model.split('/')[-1]}" + result_file_name += f"_{args.dataset}" + result_file_name += f"_{args.num_prompts}" + result_file_name += f"_out{args.output_len}" + result_file_name += f"_async{args.async_engine}" + result_file_name += f"_warmup{args.warmup}" + result_file_name += f"_chunkedprefill{args.enable_chunked_prefill}" + result_file_name += ".txt" + else: + result_file_name = None + + # Synthesize a prompt with the given input length. + tokenizer = AutoTokenizer.from_pretrained( + args.tokenizer, trust_remote_code=args.trust_remote_code) + requests = sample_requests(tokenizer, args) + + if args.async_engine: + engine_args = AsyncEngineArgs.from_cli_args(args) + elapsed_time, ret, (first_latency, next_latency) = uvloop.run( + run_vllm_async(requests, engine_args, args.n, + args.guided_decoding_ratio, args.warmup, + args.disable_frontend_multiprocessing)) + else: + engine_args = EngineArgs.from_cli_args(args) + elapsed_time, ret = run_vllm(requests, engine_args, args.n, + args.guided_decoding_ratio, args.warmup) + first_latency, next_latency = None, None + + score = evaluate(ret, args) + total_num_tokens = sum(request.prompt_len + request.expected_output_len + for request in requests) + total_output_tokens = sum(request.expected_output_len + for request in requests) + if first_latency is not None: + latency_breakdown = "\nFirst token latency(msecs):\n" + latency_breakdown += f"{first_latency.describe()}" + latency_breakdown += "\nNext token latency(msecs):\n" + latency_breakdown += f"{next_latency.describe()}" + print( + f"Throughput: {len(requests) / elapsed_time:.2f} requests/s, " + f"{total_num_tokens / elapsed_time:.2f} total tokens/s, " + f"{total_output_tokens / elapsed_time:.2f} output tokens/s", + f"Correct rate is {score} %", + f"{latency_breakdown if first_latency is not None else ''}") + + # Output JSON results if specified + if args.output_json or result_file_name: + results = { + "elapsed_time": elapsed_time, + "num_requests": len(requests), + "total_num_tokens": total_num_tokens, + "total_output_tokens": total_output_tokens, + "requests_per_second": len(requests) / elapsed_time, + "tokens_per_second": f"{total_num_tokens / elapsed_time:.2f}", + "output_tokens_per_second": + f"{total_output_tokens / elapsed_time:.2f}", + "correct_rate(%)": score + } + results = {"outputs": ret, **results} + if first_latency is not None: + results["first_token_latency(msecs)"] = first_latency.describe( + ).to_dict() + results["next_token_latency(msecs)"] = next_latency.describe( + ).to_dict() + if args.output_json: + with open(args.output_json, "w") as f: + json.dump(results, f, indent=4) + elif result_file_name: + with open(result_file_name, "w") as f: + json.dump(results, f, indent=4) + + +if __name__ == "__main__": + parser = FlexibleArgumentParser(description="Benchmark guided decoding.") + parser = AsyncEngineArgs.add_cli_args(parser) + + parser.add_argument("--output-len", + type=int, + default=512, + help="Output length for each request. Overrides the " + "output length from the dataset.") + parser.add_argument( + "--dataset", + default='json', + choices=['json', 'grammar', 'regex', 'choice', 'xgrammar_bench']) + parser.add_argument("--json_schema_path", + type=str, + default=None, + help="Path to json schema.") + parser.add_argument("--n", + type=int, + default=1, + help="Number of generated sequences per prompt.") + parser.add_argument("--num-prompts", + type=int, + default=10, + help="Number of prompts to process.") + parser.add_argument( + '--output-json', + type=str, + default=None, + help='Path to save the throughput results in JSON format.') + parser.add_argument("--async-engine", + action='store_true', + default=False, + help="Use vLLM async engine rather than LLM class.") + parser.add_argument("--no-guided-decoding", + action='store_true', + default=False, + help="Whether to disable JSON decoding or not.") + parser.add_argument("--guided-decoding-ratio", + type=float, + default=1.0, + help="Ratio of Guided Decoding requests") + parser.add_argument("--disable-frontend-multiprocessing", + action='store_true', + default=False, + help="Disable decoupled async engine frontend.") + parser.add_argument("--warmup", + action="store_true", + default=False, + help="Run warmup prompts before benchmark.") + parser.add_argument("--save-results", + action="store_true", + default=False, + help="save output results.") + args = parser.parse_args() + if args.tokenizer is None: + args.tokenizer = args.model + main(args) diff --git a/benchmarks/benchmark_serving.py b/benchmarks/benchmark_serving.py index e9fc037a46965..4eb0e1f8ac903 100644 --- a/benchmarks/benchmark_serving.py +++ b/benchmarks/benchmark_serving.py @@ -199,6 +199,56 @@ def sample_sonnet_requests( return sampled_requests +def sample_mmmu_pro_vision_requests( + dataset, + num_requests: int, + tokenizer: PreTrainedTokenizerBase, + fixed_output_len: Optional[int] = None, +) -> List[Tuple[str, str, int, Optional[Dict[str, Collection[str]]]]]: + sampled_requests: List[Tuple[str, int, int, Dict[str, + Collection[str]]]] = [] + for data in dataset: + if len(sampled_requests) == num_requests: + break + + # MMMU-Pro vision direct prompt + # Ref: https://github.com/MMMU-Benchmark/MMMU/blob/6ce42f4d8f70c1841c67867152648974415b5cac/mmmu-pro/prompts.yaml#L5 + prompt = ( + "Answer with the option letter from the given choices directly. " + "The last line of your response should be of the following " + "format: 'Answer: $LETTER' (without quotes) where LETTER is one of " + "options.") + + prompt_token_ids = tokenizer(prompt).input_ids + if fixed_output_len is None: + # Default max output len is set to 128 + print("--hf-output-len is not provided. Using default value 128.") + fixed_output_len = 128 + + prompt_len = len(prompt_token_ids) + output_len = fixed_output_len + + assert isinstance( + data["image"], + Image), ("Input image format must be `PIL.Image.Image`, " + f"given {type(data['image'])}.") + image: Image = data["image"] + image = image.convert("RGB") + image_data = io.BytesIO() + image.save(image_data, format='JPEG') + image_base64 = base64.b64encode(image_data.getvalue()).decode("utf-8") + mm_content = { + "type": "image_url", + "image_url": { + "url": f"data:image/jpeg;base64,{image_base64}" + }, + } + + sampled_requests.append((prompt, prompt_len, output_len, mm_content)) + + return sampled_requests + + def sample_hf_requests( dataset_path: str, dataset_subset: str, @@ -208,6 +258,21 @@ def sample_hf_requests( random_seed: int, fixed_output_len: Optional[int] = None, ) -> List[Tuple[str, str, int, Optional[Dict[str, Collection[str]]]]]: + + # Special case for MMMU-Pro vision dataset + if dataset_path == 'MMMU/MMMU_Pro' and dataset_subset == 'vision': + assert dataset_split == "test" + dataset = load_dataset(dataset_path, + name=dataset_subset, + split=dataset_split, + streaming=True) + assert "image" in dataset.features, ( + "MMMU/MMMU_Pro vision dataset must have 'image' column.") + filter_func = lambda x: isinstance(x["image"], Image) + dataset = dataset.shuffle(seed=random_seed).filter(filter_func) + return sample_mmmu_pro_vision_requests(dataset, num_requests, + tokenizer, fixed_output_len) + dataset = load_dataset(dataset_path, name=dataset_subset, split=dataset_split, @@ -716,6 +781,7 @@ def main(args: argparse.Namespace): backend = args.backend model_id = args.model tokenizer_id = args.tokenizer if args.tokenizer is not None else args.model + tokenizer_mode = args.tokenizer_mode if args.base_url is not None: api_url = f"{args.base_url}{args.endpoint}" @@ -725,6 +791,7 @@ def main(args: argparse.Namespace): base_url = f"http://{args.host}:{args.port}" tokenizer = get_tokenizer(tokenizer_id, + tokenizer_mode=tokenizer_mode, trust_remote_code=args.trust_remote_code) if args.dataset is not None: @@ -1145,5 +1212,15 @@ def main(args: argparse.Namespace): "from the sampled HF dataset.", ) + parser.add_argument( + '--tokenizer-mode', + type=str, + default="auto", + choices=['auto', 'slow', 'mistral'], + help='The tokenizer mode.\n\n* "auto" will use the ' + 'fast tokenizer if available.\n* "slow" will ' + 'always use the slow tokenizer. \n* ' + '"mistral" will always use the `mistral_common` tokenizer.') + args = parser.parse_args() main(args) diff --git a/benchmarks/benchmark_serving_guided.py b/benchmarks/benchmark_serving_guided.py new file mode 100644 index 0000000000000..4435d87e18a8a --- /dev/null +++ b/benchmarks/benchmark_serving_guided.py @@ -0,0 +1,881 @@ +r"""Benchmark online serving throughput with guided decoding. + +On the server side, run one of the following commands: + (vLLM OpenAI API server) + vllm serve --disable-log-requests + + (TGI backend) + ./launch_tgi_server.sh + +On the client side, run: + python benchmarks/benchmark_serving.py \ + --backend \ + --model \ + --dataset json \ + --guided-decoding-ratio 1.0 \ + --guided-decoding-backend xgrammar \ + --request-rate 10 \ + --num-prompts 1000 + + when using tgi backend, add + --endpoint /generate_stream + to the end of the command above. +""" +import argparse +import asyncio +import dataclasses +import json +import os +import random +import time +import warnings +from dataclasses import dataclass +from typing import AsyncGenerator, List, Optional, Tuple + +import datasets +import numpy as np +import pandas as pd +from backend_request_func import (ASYNC_REQUEST_FUNCS, RequestFuncInput, + RequestFuncOutput) +from tqdm.asyncio import tqdm +from transformers import PreTrainedTokenizerBase + +try: + from vllm.transformers_utils.tokenizer import get_tokenizer +except ImportError: + from backend_request_func import get_tokenizer + +try: + from vllm.utils import FlexibleArgumentParser +except ImportError: + from argparse import ArgumentParser as FlexibleArgumentParser + +MILLISECONDS_TO_SECONDS_CONVERSION = 1000 + + +@dataclass +class BenchmarkMetrics: + completed: int + total_input: int + total_output: int + request_throughput: float + request_goodput: float + output_throughput: float + total_token_throughput: float + mean_ttft_ms: float + median_ttft_ms: float + std_ttft_ms: float + percentiles_ttft_ms: List[Tuple[float, float]] + mean_tpot_ms: float + median_tpot_ms: float + std_tpot_ms: float + percentiles_tpot_ms: List[Tuple[float, float]] + mean_itl_ms: float + median_itl_ms: float + std_itl_ms: float + percentiles_itl_ms: List[Tuple[float, float]] + # E2EL stands for end-to-end latency per request. + # It is the time taken on the client side from sending + # a request to receiving a complete response. + mean_e2el_ms: float + median_e2el_ms: float + std_e2el_ms: float + percentiles_e2el_ms: List[Tuple[float, float]] + + +@dataclasses.dataclass +class SampleRequest: + """A class representing a single inference request for benchmarking. + + Attributes: + prompt: The input text prompt for the model. + multi_modal_data: Optional dictionary containing multi-modal data (e.g. + images). + prompt_len: The length of the prompt in tokens. + expected_output_len: The expected length of the output in tokens. + """ + prompt: str + prompt_len: int + expected_output_len: int + schema: dict + structure_type: str + completion: str = None + + +def sample_requests(tokenizer: PreTrainedTokenizerBase, + args: argparse.Namespace) -> List[SampleRequest]: + if args.dataset == 'json': + if args.json_schema_path is None: + dir_path = os.path.dirname(os.path.realpath(__file__)) + args.json_schema_path = os.path.join(dir_path, + "structured_schemas", + "structured_schema_1.json") + with open(args.json_schema_path) as f: + schema = json.load(f) + prompt = f"Generate an example of a user profile given the following schema: {json.dumps(schema)}" # noqa: E501 + input_len = len(tokenizer(prompt).input_ids) + print(f"Input length of the prompt: {input_len} tokens") + requests = [ + SampleRequest(prompt=prompt, + prompt_len=input_len, + expected_output_len=args.output_len, + schema=schema, + structure_type=args.structure_type) + for _ in range(args.num_prompts) + ] + + elif args.dataset == "grammar": + schema = """ + ?start: select_statement + + ?select_statement: "SELECT " column_list " FROM " table_name + + ?column_list: column_name ("," column_name)* + + ?table_name: identifier + + ?column_name: identifier + + ?identifier: /[a-zA-Z_][a-zA-Z0-9_]*/ + """ + prompt = "Generate an SQL query to show the 'username' \ + and 'email' from the 'users' table." + + input_len = len(tokenizer(prompt).input_ids) + print(f"Input length of the prompt: {input_len} tokens") + requests = [ + SampleRequest(prompt=prompt, + prompt_len=input_len, + expected_output_len=args.output_len, + schema=schema, + structure_type=args.structure_type) + for _ in range(args.num_prompts) + ] + + elif args.dataset == "regex": + regex = r"\w+@\w+\.com\n" + args.regex = regex + prompt = "Generate an email address for Alan Turing, \ + who works in Enigma. End in .com and new line. \ + Example result: alan.turing@enigma.com\n" + + input_len = len(tokenizer(prompt).input_ids) + print(f"Input length of the prompt: {input_len} tokens") + requests = [ + SampleRequest(prompt=prompt, + prompt_len=input_len, + expected_output_len=args.output_len, + schema=regex, + structure_type=args.structure_type) + for _ in range(args.num_prompts) + ] + + elif args.dataset == "choice": + choice = ["Positive", "Negative"] + args.choice = choice + prompt = "Classify this sentiment: vLLM is wonderful!" + input_len = len(tokenizer(prompt).input_ids) + print(f"Input length of the prompt: {input_len} tokens") + requests = [ + SampleRequest(prompt=prompt, + prompt_len=input_len, + expected_output_len=args.output_len, + schema=choice, + structure_type=args.structure_type) + for _ in range(args.num_prompts) + ] + + elif args.dataset == "xgrammar_bench": + requests: List[SampleRequest] = [] + dataset = datasets.load_dataset("NousResearch/json-mode-eval", + split="train") + print(f"dataset has {len(dataset)} entries") + len_dataset = len(dataset) + for data_point_idx in range(args.num_prompts): + idx = data_point_idx + while idx >= len_dataset: + idx -= len_dataset + schema = dataset["schema"][idx] + prompt = tokenizer.apply_chat_template(dataset["prompt"][idx], + tokenize=False) + input_len = len(tokenizer(prompt).input_ids) + completion = dataset["completion"][idx] + + requests.append( + SampleRequest(prompt=prompt, + prompt_len=input_len, + expected_output_len=args.output_len, + schema=schema, + structure_type=args.structure_type, + completion=completion)) + + return requests + + +async def get_request( + input_requests: List[SampleRequest], + request_rate: float, + burstiness: float = 1.0, +) -> AsyncGenerator[Tuple[int, SampleRequest], None]: + """ + Asynchronously generates requests at a specified rate + with OPTIONAL burstiness. + + Args: + input_requests: + A list of input requests, each represented as a tuple. + request_rate: + The rate at which requests are generated (requests/s). + burstiness (optional): + The burstiness factor of the request generation. + Only takes effect when request_rate is not inf. + Default value is 1, which follows a Poisson process. + Otherwise, the request intervals follow a gamma distribution. + A lower burstiness value (0 < burstiness < 1) results + in more bursty requests, while a higher burstiness value + (burstiness > 1) results in a more uniform arrival of requests. + """ + input_requests = iter(input_requests) + + # Calculate scale parameter theta to maintain the desired request_rate. + assert burstiness > 0, ( + f"A positive burstiness factor is expected, but given {burstiness}.") + theta = 1.0 / (request_rate * burstiness) + + for i, request in enumerate(input_requests): + yield i, request + + if request_rate == float("inf"): + # If the request rate is infinity, then we don't need to wait. + continue + + # Sample the request interval from the gamma distribution. + # If burstiness is 1, it follows exponential distribution. + interval = np.random.gamma(shape=burstiness, scale=theta) + # The next request will be sent after the interval. + await asyncio.sleep(interval) + + +def calculate_metrics( + input_requests: List[Tuple[str, int, int]], + outputs: List[RequestFuncOutput], + dur_s: float, + tokenizer: PreTrainedTokenizerBase, + selected_percentile_metrics: List[str], + selected_percentiles: List[float], +) -> Tuple[BenchmarkMetrics, List[int]]: + actual_output_lens: List[int] = [] + total_input = 0 + completed = 0 + good_completed = 0 + itls: List[float] = [] + tpots: List[float] = [] + all_tpots: List[float] = [] + ttfts: List[float] = [] + e2els: List[float] = [] + for i in range(len(outputs)): + if outputs[i].success: + # We use the tokenizer to count the number of output tokens for all + # serving backends instead of looking at len(outputs[i].itl) since + # multiple output tokens may be bundled together + # Note : this may inflate the output token count slightly + output_len = len( + tokenizer(outputs[i].generated_text, + add_special_tokens=False).input_ids) + actual_output_lens.append(output_len) + total_input += input_requests[i].prompt_len + tpot = 0 + if output_len > 1: + tpot = (outputs[i].latency - outputs[i].ttft) / (output_len - + 1) + tpots.append(tpot) + outputs[i].tpot = sum(tpots) / len(tpots) if len(tpots) else 0 + # Note: if output_len <= 1, we regard tpot as 0 for goodput + all_tpots.append(tpot) + itls += outputs[i].itl + ttfts.append(outputs[i].ttft) + e2els.append(outputs[i].latency) + completed += 1 + else: + actual_output_lens.append(0) + + if completed == 0: + warnings.warn( + "All requests failed. This is likely due to a misconfiguration " + "on the benchmark arguments.", + stacklevel=2) + metrics = BenchmarkMetrics( + completed=completed, + total_input=total_input, + total_output=sum(actual_output_lens), + request_throughput=completed / dur_s, + request_goodput=good_completed / dur_s, + output_throughput=sum(actual_output_lens) / dur_s, + total_token_throughput=(total_input + sum(actual_output_lens)) / dur_s, + mean_ttft_ms=np.mean(ttfts or 0) * + 1000, # ttfts is empty if streaming is not supported by backend + std_ttft_ms=np.std(ttfts or 0) * 1000, + median_ttft_ms=np.median(ttfts or 0) * 1000, + percentiles_ttft_ms=[(p, np.percentile(ttfts or 0, p) * 1000) + for p in selected_percentiles], + mean_tpot_ms=np.mean(tpots or 0) * 1000, + std_tpot_ms=np.std(tpots or 0) * 1000, + median_tpot_ms=np.median(tpots or 0) * 1000, + percentiles_tpot_ms=[(p, np.percentile(tpots or 0, p) * 1000) + for p in selected_percentiles], + mean_itl_ms=np.mean(itls or 0) * 1000, + std_itl_ms=np.std(itls or 0) * 1000, + median_itl_ms=np.median(itls or 0) * 1000, + percentiles_itl_ms=[(p, np.percentile(itls or 0, p) * 1000) + for p in selected_percentiles], + mean_e2el_ms=np.mean(e2els or 0) * 1000, + std_e2el_ms=np.std(e2els or 0) * 1000, + median_e2el_ms=np.median(e2els or 0) * 1000, + percentiles_e2el_ms=[(p, np.percentile(e2els or 0, p) * 1000) + for p in selected_percentiles], + ) + + return metrics, actual_output_lens + + +async def benchmark( + backend: str, + api_url: str, + base_url: str, + model_id: str, + tokenizer: PreTrainedTokenizerBase, + input_requests: List[SampleRequest], + request_rate: float, + burstiness: float, + disable_tqdm: bool, + profile: bool, + selected_percentile_metrics: List[str], + selected_percentiles: List[str], + ignore_eos: bool, + max_concurrency: Optional[int], + guided_decoding_ratio: float, + guided_decoding_backend: str, +): + if backend in ASYNC_REQUEST_FUNCS: + request_func = ASYNC_REQUEST_FUNCS[backend] + else: + raise ValueError(f"Unknown backend: {backend}") + + def prepare_extra_body(request) -> dict: + extra_body = {} + # Add the schema to the extra_body + extra_body[request.structure_type] = request.schema + # Add the specific guided_decoding_backend + extra_body["guided_decoding_backend"] = guided_decoding_backend + return extra_body + + print("Starting initial single prompt test run...") + guided_decoding_req_idx = random.sample( + range(len(input_requests)), + int(len(input_requests) * guided_decoding_ratio)) + + test_request = input_requests[0] + test_input = RequestFuncInput( + model=model_id, + prompt=test_request.prompt, + api_url=api_url, + prompt_len=test_request.prompt_len, + output_len=test_request.expected_output_len, + ignore_eos=ignore_eos, + extra_body=prepare_extra_body(test_request), + ) + test_output = await request_func(request_func_input=test_input) + if not test_output.success: + raise ValueError( + "Initial test run failed - Please make sure benchmark arguments " + f"are correctly specified. Error: {test_output.error}") + else: + print("Initial test run completed. Starting main benchmark run...") + + if profile: + print("Starting profiler...") + profile_input = RequestFuncInput( + model=model_id, + prompt=test_request.prompt, + api_url=base_url + "/start_profile", + prompt_len=test_request.prompt_len, + output_len=test_request.expected_output_len, + ignore_eos=ignore_eos, + extra_body=prepare_extra_body(test_request), + ) + profile_output = await request_func(request_func_input=profile_input) + if profile_output.success: + print("Profiler started") + + if burstiness == 1.0: + distribution = "Poisson process" + else: + distribution = "Gamma distribution" + + print(f"Traffic request rate: {request_rate}") + print(f"Burstiness factor: {burstiness} ({distribution})") + print(f"Maximum request concurrency: {max_concurrency}") + + pbar = None if disable_tqdm else tqdm(total=len(input_requests)) + + # This can be used once the minimum Python version is 3.10 or higher, + # and it will simplify the code in limited_request_func. + # semaphore = (asyncio.Semaphore(max_concurrency) + # if max_concurrency else contextlib.nullcontext()) + semaphore = (asyncio.Semaphore(max_concurrency) + if max_concurrency else None) + + async def limited_request_func(request_func_input, pbar): + if semaphore is None: + return await request_func(request_func_input=request_func_input, + pbar=pbar) + async with semaphore: + return await request_func(request_func_input=request_func_input, + pbar=pbar) + + benchmark_start_time = time.perf_counter() + tasks: List[asyncio.Task] = [] + expected: List[str] = [] + async for i, request in get_request(input_requests, request_rate, + burstiness): + extra_body = prepare_extra_body( + request) if i in guided_decoding_req_idx else None + request_func_input = RequestFuncInput( + model=model_id, + prompt=request.prompt, + api_url=api_url, + prompt_len=request.prompt_len, + output_len=request.expected_output_len, + ignore_eos=ignore_eos, + extra_body=extra_body, + ) + expected.append(request.completion) + tasks.append( + asyncio.create_task( + limited_request_func(request_func_input=request_func_input, + pbar=pbar))) + outputs: List[RequestFuncOutput] = await asyncio.gather(*tasks) + + if profile: + print("Stopping profiler...") + profile_input = RequestFuncInput( + model=model_id, + prompt=test_request.prompt, + api_url=base_url + "/stop_profile", + prompt_len=test_request.prompt_len, + output_len=test_request.expected_output_len, + extra_body={test_request.structure_type: test_request.schema}, + ) + profile_output = await request_func(request_func_input=profile_input) + if profile_output.success: + print("Profiler stopped") + + if pbar is not None: + pbar.close() + + benchmark_duration = time.perf_counter() - benchmark_start_time + + metrics, actual_output_lens = calculate_metrics( + input_requests=input_requests, + outputs=outputs, + dur_s=benchmark_duration, + tokenizer=tokenizer, + selected_percentile_metrics=selected_percentile_metrics, + selected_percentiles=selected_percentiles, + ) + + print("{s:{c}^{n}}".format(s=' Serving Benchmark Result ', n=50, c='=')) + print("{:<40} {:<10}".format("Successful requests:", metrics.completed)) + print("{:<40} {:<10.2f}".format("Benchmark duration (s):", + benchmark_duration)) + print("{:<40} {:<10}".format("Total input tokens:", metrics.total_input)) + print("{:<40} {:<10}".format("Total generated tokens:", + metrics.total_output)) + print("{:<40} {:<10.2f}".format("Request throughput (req/s):", + metrics.request_throughput)) + print("{:<40} {:<10.2f}".format("Output token throughput (tok/s):", + metrics.output_throughput)) + print("{:<40} {:<10.2f}".format("Total Token throughput (tok/s):", + metrics.total_token_throughput)) + + result = { + "duration": + benchmark_duration, + "completed": + metrics.completed, + "total_input_tokens": + metrics.total_input, + "total_output_tokens": + metrics.total_output, + "request_throughput": + metrics.request_throughput, + "output_throughput": + metrics.output_throughput, + "total_token_throughput": + metrics.total_token_throughput, + "ttft_description": + pd.Series([output.ttft for output in outputs]).describe().to_dict(), + "tpot_description": + pd.Series([output.tpot for output in outputs]).describe().to_dict(), + "input_lens": [output.prompt_len for output in outputs], + "output_lens": + actual_output_lens, + "ttfts": [output.ttft for output in outputs], + "itls": [output.itl for output in outputs], + "errors": [output.error for output in outputs], + } + + ret = [{ + 'generated': output.generated_text, + 'expected': gt + } for output, gt in zip(outputs, expected)] + + def process_one_metric( + # E.g., "ttft" + metric_attribute_name: str, + # E.g., "TTFT" + metric_name: str, + # E.g., "Time to First Token" + metric_header: str, + ): + # This function prints and adds statistics of the specified + # metric. + if metric_attribute_name not in selected_percentile_metrics: + return + print("{s:{c}^{n}}".format(s=metric_header, n=50, c='-')) + print("{:<40} {:<10.2f}".format( + f"Mean {metric_name} (ms):", + getattr(metrics, f"mean_{metric_attribute_name}_ms"))) + print("{:<40} {:<10.2f}".format( + f"Median {metric_name} (ms):", + getattr(metrics, f"median_{metric_attribute_name}_ms"))) + result[f"mean_{metric_attribute_name}_ms"] = getattr( + metrics, f"mean_{metric_attribute_name}_ms") + result[f"median_{metric_attribute_name}_ms"] = getattr( + metrics, f"median_{metric_attribute_name}_ms") + result[f"std_{metric_attribute_name}_ms"] = getattr( + metrics, f"std_{metric_attribute_name}_ms") + for p, value in getattr(metrics, + f"percentiles_{metric_attribute_name}_ms"): + p_word = str(int(p)) if int(p) == p else str(p) + print("{:<40} {:<10.2f}".format(f"P{p_word} {metric_name} (ms):", + value)) + result[f"p{p_word}_{metric_attribute_name}_ms"] = value + + process_one_metric("ttft", "TTFT", "Time to First Token") + process_one_metric("tpot", "TPOT", + "Time per Output Token (excl. 1st token)") + process_one_metric("itl", "ITL", "Inter-token Latency") + process_one_metric("e2el", "E2EL", "End-to-end Latency") + + print("=" * 50) + + return result, ret + + +def evaluate(ret, args): + + def _eval_correctness_json(expected, actual): + # extract json string from string using regex + import re + actual = actual.replace('\n', '').replace(' ', '').strip() + try: + actual = re.search(r'\{.*\}', actual).group() + actual = json.loads(actual) + except Exception: + return False + + return True + + def _eval_correctness_choice(expected, actual): + return actual in args.choice + + def _eval_correctness_regex(expected, actual): + import re + return re.match(args.regex, actual) is not None + + def _eval_correctness(expected, actual): + if args.structure_type == 'guided_json': + return _eval_correctness_json(expected, actual) + elif args.structure_type == 'guided_regex': + return _eval_correctness_regex(expected, actual) + elif args.structure_type == 'guided_choice': + return _eval_correctness_choice(expected, actual) + else: + return None + + scores = [] + for res in ret: + score = _eval_correctness(res['expected'], res['generated']) + res['correctness'] = score + scores.append(score) + + not_none_scores = [score for score in scores if score is not None] + + return (sum(not_none_scores) / len(not_none_scores) * + 100) if len(not_none_scores) > 0 else None + + +def main(args: argparse.Namespace): + print(args) + random.seed(args.seed) + np.random.seed(args.seed) + + backend = args.backend + model_id = args.model + tokenizer_id = args.tokenizer if args.tokenizer is not None else args.model + + if args.base_url is not None: + api_url = f"{args.base_url}{args.endpoint}" + base_url = f"{args.base_url}" + else: + api_url = f"http://{args.host}:{args.port}{args.endpoint}" + base_url = f"http://{args.host}:{args.port}" + + tokenizer = get_tokenizer(tokenizer_id, + trust_remote_code=args.trust_remote_code) + + if args.dataset == 'grammar': + args.structure_type = 'guided_grammar' + elif args.dataset == 'regex': + args.structure_type = 'guided_regex' + elif args.dataset == 'choice': + args.structure_type = 'guided_choice' + else: + args.structure_type = 'guided_json' + + if args.no_guided_decoding: + args.guided_decoding_ratio = 0 + if args.save_results: + result_file_name = f'{args.guided_decoding_ratio}guided' + result_file_name += f"_{backend}" + result_file_name += f"_{args.request_rate}qps" + result_file_name += f"_{args.model.split('/')[-1]}" + result_file_name += f"_{args.dataset}" + result_file_name += f"_{args.num_prompts}" + result_file_name += f"_out{args.output_len}" + result_file_name += ".txt" + else: + result_file_name = None + + input_requests = sample_requests(tokenizer, args) + + benchmark_result, ret = asyncio.run( + benchmark( + backend=backend, + api_url=api_url, + base_url=base_url, + model_id=model_id, + tokenizer=tokenizer, + input_requests=input_requests, + request_rate=args.request_rate, + burstiness=args.burstiness, + disable_tqdm=args.disable_tqdm, + profile=args.profile, + selected_percentile_metrics=args.percentile_metrics.split(","), + selected_percentiles=[ + float(p) for p in args.metric_percentiles.split(",") + ], + ignore_eos=args.ignore_eos, + max_concurrency=args.max_concurrency, + guided_decoding_ratio=args.guided_decoding_ratio, + guided_decoding_backend=args.guided_decoding_backend, + )) + + # Save config and results to json + score = evaluate(ret, args) + print("correct_rate(%)", score, '\n') + if args.save_results: + results = { + "backend": + backend, + "model_id": + model_id, + "tokenizer_id": + tokenizer_id, + "num_prompts": + args.num_prompts, + "request_rate": + args.request_rate if args.request_rate < float("inf") else "inf", + "burstiness": + args.burstiness, + "max_concurrency": + args.max_concurrency, + "correct_rate(%)": + score + } + results = {"outputs": ret, **results, **benchmark_result} + + # Save to file + if args.result_filename: + result_file_name = args.result_filename + if args.result_dir: + result_file_name = os.path.join(args.result_dir, result_file_name) + with open(result_file_name, "w", encoding='utf-8') as outfile: + json.dump(results, outfile, indent=4) + + +if __name__ == "__main__": + parser = FlexibleArgumentParser( + description="Benchmark the online serving throughput.") + parser.add_argument( + "--backend", + type=str, + default="vllm", + choices=list(ASYNC_REQUEST_FUNCS.keys()), + ) + parser.add_argument( + "--base-url", + type=str, + default=None, + help="Server or API base url if not using http host and port.", + ) + parser.add_argument("--host", type=str, default="localhost") + parser.add_argument("--port", type=int, default=8000) + parser.add_argument( + "--endpoint", + type=str, + default="/v1/completions", + help="API endpoint.", + ) + parser.add_argument( + "--dataset", + default='json', + choices=['json', 'grammar', 'regex', 'choice', 'xgrammar_bench']) + parser.add_argument("--json_schema_path", + type=str, + default=None, + help="Path to json schema.") + parser.add_argument( + "--max-concurrency", + type=int, + default=None, + help="Maximum number of concurrent requests. This can be used " + "to help simulate an environment where a higher level component " + "is enforcing a maximum number of concurrent requests. While the " + "--request-rate argument controls the rate at which requests are " + "initiated, this argument will control how many are actually allowed " + "to execute at a time. This means that when used in combination, the " + "actual request rate may be lower than specified with --request-rate, " + "if the server is not processing requests fast enough to keep up.") + parser.add_argument( + "--model", + type=str, + required=True, + help="Name of the model.", + ) + parser.add_argument( + "--tokenizer", + type=str, + help= + "Name or path of the tokenizer, if not using the default tokenizer.", # noqa: E501 + ) + parser.add_argument( + "--num-prompts", + type=int, + default=1000, + help="Number of prompts to process.", + ) + parser.add_argument( + "--output-len", + type=int, + default=128, + help="Number of output tokens.", + ) + parser.add_argument( + "--request-rate", + type=float, + default=float("inf"), + help="Number of requests per second. If this is inf, " + "then all the requests are sent at time 0. " + "Otherwise, we use Poisson process or gamma distribution " + "to synthesize the request arrival times.", + ) + parser.add_argument( + "--burstiness", + type=float, + default=1.0, + help="Burstiness factor of the request generation. " + "Only take effect when request_rate is not inf. " + "Default value is 1, which follows Poisson process. " + "Otherwise, the request intervals follow a gamma distribution. " + "A lower burstiness value (0 < burstiness < 1) results in more " + "bursty requests. A higher burstiness value (burstiness > 1) " + "results in a more uniform arrival of requests.", + ) + parser.add_argument("--seed", type=int, default=0) + parser.add_argument( + "--trust-remote-code", + action="store_true", + help="Trust remote code from huggingface", + ) + parser.add_argument( + "--disable-tqdm", + action="store_true", + help="Specify to disable tqdm progress bar.", + ) + parser.add_argument( + "--save-results", + action="store_true", + help="Specify to save benchmark results to a json file", + ) + parser.add_argument( + "--profile", + action="store_true", + help="Use Torch Profiler. The endpoint must be launched with " + "VLLM_TORCH_PROFILER_DIR to enable profiler.", + ) + parser.add_argument( + "--result-dir", + type=str, + default=None, + help="Specify directory to save benchmark json results." + "If not specified, results are saved in the current directory.", + ) + parser.add_argument( + "--result-filename", + type=str, + default=None, + help="Specify the filename to save benchmark json results." + "If not specified, results will be saved in " + "{backend}-{args.request_rate}qps-{base_model_id}-{current_dt}.json" + " format.", + ) + parser.add_argument( + "--ignore-eos", + action="store_true", + help="Set ignore_eos flag when sending the benchmark request." + "Warning: ignore_eos is not supported in deepspeed_mii and tgi.") + parser.add_argument( + "--percentile-metrics", + type=str, + default="ttft,tpot,itl", + help="Comma-seperated list of selected metrics to report percentils. " + "This argument specifies the metrics to report percentiles. " + "Allowed metric names are \"ttft\", \"tpot\", \"itl\", \"e2el\". " + "Default value is \"ttft,tpot,itl\".") + parser.add_argument( + "--metric-percentiles", + type=str, + default="99", + help="Comma-seperated list of percentiles for selected metrics. " + "To report 25-th, 50-th, and 75-th percentiles, use \"25,50,75\". " + "Default value is \"99\". " + "Use \"--percentile-metrics\" to select metrics.", + ) + parser.add_argument("--no-guided-decoding", + action='store_true', + default=False, + help="Whether to disable JSON decoding or not.") + parser.add_argument("--guided-decoding-ratio", + type=float, + default=1.0, + help="Ratio of Guided Decoding requests") + parser.add_argument("--guided-decoding-backend", + type=str, + choices=["outlines", "lm-format-enforcer", "xgrammar"], + default="xgrammar", + help="Backend to use for guided decoding") + + args = parser.parse_args() + main(args) diff --git a/benchmarks/benchmark_throughput.py b/benchmarks/benchmark_throughput.py index 159cf055737ce..1e5967bd9bf8b 100644 --- a/benchmarks/benchmark_throughput.py +++ b/benchmarks/benchmark_throughput.py @@ -294,23 +294,36 @@ def main(args: argparse.Namespace): tokenizer = AutoTokenizer.from_pretrained( args.tokenizer, trust_remote_code=args.trust_remote_code) if args.dataset is None: - # Synthesize a prompt with the given input length. - # As tokenizer may add additional tokens like BOS, we need to try - # different lengths to get the desired input length. - for i in range(-10, 10): - prompt = "hi " * (args.input_len + i) - tokenized_prompt = tokenizer(prompt).input_ids - if len(tokenized_prompt) == args.input_len: - break - else: - raise ValueError( - f"Failed to synthesize a prompt with {args.input_len} tokens.") - requests = [ - SampleRequest(prompt=prompt, - prompt_len=args.input_len, - expected_output_len=args.output_len) - for _ in range(args.num_prompts) - ] + vocab_size = tokenizer.vocab_size + requests = [] + for _ in range(args.num_prompts): + # Synthesize a prompt with the given input length. + candidate_ids = [ + random.randint(0, vocab_size - 1) + for _ in range(args.input_len) + ] + # As tokenizer may add additional tokens like BOS, we need to try + # different lengths to get the desired input length. + for _ in range(5): # Max attempts to correct + candidate_prompt = tokenizer.decode(candidate_ids) + tokenized_len = len(tokenizer.encode(candidate_prompt)) + + if tokenized_len == args.input_len: + break + + # Adjust length based on difference + diff = args.input_len - tokenized_len + if diff > 0: + candidate_ids.extend([ + random.randint(100, vocab_size - 100) + for _ in range(diff) + ]) + else: + candidate_ids = candidate_ids[:diff] + requests.append( + SampleRequest(prompt=candidate_prompt, + prompt_len=args.input_len, + expected_output_len=args.output_len)) else: requests = sample_requests(tokenizer, args) diff --git a/benchmarks/disagg_benchmarks/disagg_overhead_benchmark.sh b/benchmarks/disagg_benchmarks/disagg_overhead_benchmark.sh new file mode 100644 index 0000000000000..2924ea4a49f54 --- /dev/null +++ b/benchmarks/disagg_benchmarks/disagg_overhead_benchmark.sh @@ -0,0 +1,144 @@ +#!/bin/bash + +# benchmark the overhead of disaggregated prefill. +# methodology: +# - send all request to prefill vLLM instance. It will buffer KV cache. +# - then send all request to decode instance. +# - The TTFT of decode instance is the overhead. + +set -ex + +kill_gpu_processes() { + # kill all processes on GPU. + pkill -f pt_main_thread + sleep 10 + + # remove vllm config file + rm -rf ~/.config/vllm + + # Print the GPU memory usage + # so that we know if all GPU processes are killed. + gpu_memory_usage=$(nvidia-smi --query-gpu=memory.used --format=csv,noheader,nounits -i 0) + # The memory usage should be 0 MB. + echo "GPU 0 Memory Usage: $gpu_memory_usage MB" +} + +wait_for_server() { + # wait for vllm server to start + # return 1 if vllm server crashes + local port=$1 + timeout 1200 bash -c " + until curl -s localhost:${port}/v1/completions > /dev/null; do + sleep 1 + done" && return 0 || return 1 +} + + +benchmark() { + + export VLLM_LOGGING_LEVEL=DEBUG + export VLLM_HOST_IP=$(hostname -I | awk '{print $1}') + + # compare chunked prefill with disaggregated prefill + + results_folder="./results" + model="meta-llama/Meta-Llama-3.1-8B-Instruct" + dataset_name="sonnet" + dataset_path="../sonnet_4x.txt" + num_prompts=10 + qps=$1 + prefix_len=50 + input_len=2048 + output_len=$2 + + + CUDA_VISIBLE_DEVICES=0 python3 \ + -m vllm.entrypoints.openai.api_server \ + --model meta-llama/Meta-Llama-3.1-8B-Instruct \ + --port 8100 \ + --max-model-len 10000 \ + --gpu-memory-utilization 0.6 \ + --kv-transfer-config \ + '{"kv_connector":"PyNcclConnector","kv_role":"kv_producer","kv_rank":0,"kv_parallel_size":2,"kv_buffer_size":5e9}' & + + + CUDA_VISIBLE_DEVICES=1 python3 \ + -m vllm.entrypoints.openai.api_server \ + --model meta-llama/Meta-Llama-3.1-8B-Instruct \ + --port 8200 \ + --max-model-len 10000 \ + --gpu-memory-utilization 0.6 \ + --kv-transfer-config \ + '{"kv_connector":"PyNcclConnector","kv_role":"kv_consumer","kv_rank":1,"kv_parallel_size":2,"kv_buffer_size":5e9}' & + + wait_for_server 8100 + wait_for_server 8200 + + # let the prefill instance finish prefill + python3 ../benchmark_serving.py \ + --backend vllm \ + --model $model \ + --dataset-name $dataset_name \ + --dataset-path $dataset_path \ + --sonnet-input-len $input_len \ + --sonnet-output-len "$output_len" \ + --sonnet-prefix-len $prefix_len \ + --num-prompts $num_prompts \ + --port 8100 \ + --save-result \ + --result-dir $results_folder \ + --result-filename disagg_prefill_2xtp4.json \ + --request-rate "inf" + + + # send the request to decode. + # The TTFT of this command will be the overhead of disagg prefill impl. + python3 ../benchmark_serving.py \ + --backend vllm \ + --model $model \ + --dataset-name $dataset_name \ + --dataset-path $dataset_path \ + --sonnet-input-len $input_len \ + --sonnet-output-len "$output_len" \ + --sonnet-prefix-len $prefix_len \ + --num-prompts $num_prompts \ + --port 8200 \ + --save-result \ + --result-dir $results_folder \ + --result-filename disagg_prefill_2xtp4.json \ + --request-rate "$qps" + kill_gpu_processes + +} + + +main() { + + (which wget && which curl) || (apt-get update && apt-get install -y wget curl) + (which jq) || (apt-get -y install jq) + (which socat) || (apt-get -y install socat) + + pip install quart httpx + + cd "$(dirname "$0")" + + cd .. + # create sonnet-4x.txt + echo "" > sonnet_4x.txt + for _ in {1..4} + do + cat sonnet.txt >> sonnet_4x.txt + done + cd disagg_benchmarks + + rm -rf results + mkdir results + + default_qps=1 + default_output_len=1 + benchmark $default_qps $default_output_len + +} + + +main "$@" diff --git a/benchmarks/disagg_benchmarks/disagg_performance_benchmark.sh b/benchmarks/disagg_benchmarks/disagg_performance_benchmark.sh new file mode 100644 index 0000000000000..d8d9e976dce76 --- /dev/null +++ b/benchmarks/disagg_benchmarks/disagg_performance_benchmark.sh @@ -0,0 +1,164 @@ +#!/bin/bash + +# Requirement: 8x H100 GPUs. + + +# Model: neuralmagic/Meta-Llama-3-70B-Instruct-FP8-KV +# Query: 2048 input tokens, 11 output tokens, QPS 4, 500 requests +# Resource: 8x H100 +# Approaches: +# 1. Chunked prefill: 1 vllm instance with tp=8 +# 2. Chunked prefill: 2 vllm instance with tp=4, equivalent to 1 tp=4 instance with QPS 4 +# 3. Disaggregated prefill: 1 prefilling instance and 1 decoding instance +# Prefilling instance: max_output_token=1 +# Decoding instance: force the input tokens be the same across requests to bypass prefilling + +set -ex + +kill_gpu_processes() { + # kill all processes on GPU. + pgrep pt_main_thread | xargs -r kill -9 + pgrep python3 | xargs -r kill -9 + for port in 8000 8100 8200; do lsof -t -i:$port | xargs -r kill -9; done + sleep 1 +} + +wait_for_server() { + # wait for vllm server to start + # return 1 if vllm server crashes + local port=$1 + timeout 1200 bash -c " + until curl -s localhost:${port}/v1/completions > /dev/null; do + sleep 1 + done" && return 0 || return 1 +} + + +launch_chunked_prefill() { + model="meta-llama/Meta-Llama-3.1-8B-Instruct" + # disagg prefill + CUDA_VISIBLE_DEVICES=0 python3 \ + -m vllm.entrypoints.openai.api_server \ + --model $model \ + --port 8100 \ + --max-model-len 10000 \ + --enable-chunked-prefill \ + --gpu-memory-utilization 0.6 & + CUDA_VISIBLE_DEVICES=1 python3 \ + -m vllm.entrypoints.openai.api_server \ + --model $model \ + --port 8200 \ + --max-model-len 10000 \ + --enable-chunked-prefill \ + --gpu-memory-utilization 0.6 & + wait_for_server 8100 + wait_for_server 8200 + python3 round_robin_proxy.py & + sleep 1 +} + + +launch_disagg_prefill() { + model="meta-llama/Meta-Llama-3.1-8B-Instruct" + # disagg prefill + CUDA_VISIBLE_DEVICES=0 python3 \ + -m vllm.entrypoints.openai.api_server \ + --model $model \ + --port 8100 \ + --max-model-len 10000 \ + --gpu-memory-utilization 0.6 \ + --kv-transfer-config \ + '{"kv_connector":"PyNcclConnector","kv_role":"kv_producer","kv_rank":0,"kv_parallel_size":2,"kv_buffer_size":5e9}' & + + CUDA_VISIBLE_DEVICES=1 python3 \ + -m vllm.entrypoints.openai.api_server \ + --model $model \ + --port 8200 \ + --max-model-len 10000 \ + --gpu-memory-utilization 0.6 \ + --kv-transfer-config \ + '{"kv_connector":"PyNcclConnector","kv_role":"kv_consumer","kv_rank":1,"kv_parallel_size":2,"kv_buffer_size":5e9}' & + + wait_for_server 8100 + wait_for_server 8200 + python3 disagg_prefill_proxy_server.py & + sleep 1 +} + + +benchmark() { + results_folder="./results" + model="meta-llama/Meta-Llama-3.1-8B-Instruct" + dataset_name="sonnet" + dataset_path="../sonnet_4x.txt" + num_prompts=100 + qps=$1 + prefix_len=50 + input_len=1024 + output_len=$2 + tag=$3 + + python3 ../benchmark_serving.py \ + --backend vllm \ + --model $model \ + --dataset-name $dataset_name \ + --dataset-path $dataset_path \ + --sonnet-input-len $input_len \ + --sonnet-output-len "$output_len" \ + --sonnet-prefix-len $prefix_len \ + --num-prompts $num_prompts \ + --port 8000 \ + --save-result \ + --result-dir $results_folder \ + --result-filename "$tag"-qps-"$qps".json \ + --request-rate "$qps" + + sleep 2 + +} + + +main() { + + (which wget && which curl) || (apt-get update && apt-get install -y wget curl) + (which jq) || (apt-get -y install jq) + (which socat) || (apt-get -y install socat) + + pip install quart httpx matplotlib aiohttp + + cd "$(dirname "$0")" + + cd .. + # create sonnet-4x.txt so that we can sample 2048 tokens for input + echo "" > sonnet_4x.txt + for _ in {1..4} + do + cat sonnet.txt >> sonnet_4x.txt + done + cd disagg_benchmarks + + rm -rf results + mkdir results + + default_output_len=6 + + export VLLM_HOST_IP=$(hostname -I | awk '{print $1}') + + launch_chunked_prefill + for qps in 2 4 6 8; do + benchmark $qps $default_output_len chunked_prefill + done + kill_gpu_processes + + launch_disagg_prefill + for qps in 2 4 6 8; do + benchmark $qps $default_output_len disagg_prefill + done + kill_gpu_processes + + python3 visualize_benchmark_results.py + +} + + +main "$@" diff --git a/benchmarks/disagg_benchmarks/disagg_prefill_proxy_server.py b/benchmarks/disagg_benchmarks/disagg_prefill_proxy_server.py new file mode 100644 index 0000000000000..4058b1c0a3b79 --- /dev/null +++ b/benchmarks/disagg_benchmarks/disagg_prefill_proxy_server.py @@ -0,0 +1,61 @@ +import os + +import aiohttp +from quart import Quart, make_response, request + +AIOHTTP_TIMEOUT = aiohttp.ClientTimeout(total=6 * 60 * 60) + +app = Quart(__name__) + + +async def forward_request(url, data): + async with aiohttp.ClientSession(timeout=AIOHTTP_TIMEOUT) as session: + headers = { + "Authorization": f"Bearer {os.environ.get('OPENAI_API_KEY')}" + } + async with session.post(url=url, json=data, + headers=headers) as response: + if response.status == 200: + # if response.headers.get('Transfer-Encoding') == 'chunked': + if True: + async for chunk_bytes in response.content.iter_chunked( + 1024): + yield chunk_bytes + else: + content = await response.read() + yield content + + +@app.route('/v1/completions', methods=['POST']) +async def handle_request(): + try: + original_request_data = await request.get_json() + + prefill_request = original_request_data.copy() + # change max_tokens = 1 to let it only do prefill + prefill_request['max_tokens'] = 1 + + # finish prefill + async for _ in forward_request('http://localhost:8100/v1/completions', + prefill_request): + continue + + # return decode + generator = forward_request('http://localhost:8200/v1/completions', + original_request_data) + response = await make_response(generator) + response.timeout = None + + return response + + except Exception as e: + import sys + import traceback + exc_info = sys.exc_info() + print("Error occurred in disagg prefill proxy server") + print(e) + print("".join(traceback.format_exception(*exc_info))) + + +if __name__ == '__main__': + app.run(port=8000) diff --git a/benchmarks/disagg_benchmarks/round_robin_proxy.py b/benchmarks/disagg_benchmarks/round_robin_proxy.py new file mode 100644 index 0000000000000..6eb5f63980070 --- /dev/null +++ b/benchmarks/disagg_benchmarks/round_robin_proxy.py @@ -0,0 +1,60 @@ +import asyncio +import itertools + +import aiohttp +from aiohttp import web + + +class RoundRobinProxy: + + def __init__(self, target_ports): + self.target_ports = target_ports + self.port_cycle = itertools.cycle(self.target_ports) + + async def handle_request(self, request): + target_port = next(self.port_cycle) + target_url = f"http://localhost:{target_port}{request.path_qs}" + + async with aiohttp.ClientSession() as session: + try: + # Forward the request + async with session.request( + method=request.method, + url=target_url, + headers=request.headers, + data=request.content, + ) as response: + # Start sending the response + resp = web.StreamResponse(status=response.status, + headers=response.headers) + await resp.prepare(request) + + # Stream the response content + async for chunk in response.content.iter_any(): + await resp.write(chunk) + + await resp.write_eof() + return resp + + except Exception as e: + return web.Response(text=f"Error: {str(e)}", status=500) + + +async def main(): + proxy = RoundRobinProxy([8100, 8200]) + app = web.Application() + app.router.add_route('*', '/{path:.*}', proxy.handle_request) + + runner = web.AppRunner(app) + await runner.setup() + site = web.TCPSite(runner, 'localhost', 8000) + await site.start() + + print("Proxy server started on http://localhost:8000") + + # Keep the server running + await asyncio.Event().wait() + + +if __name__ == '__main__': + asyncio.run(main()) diff --git a/benchmarks/disagg_benchmarks/visualize_benchmark_results.py b/benchmarks/disagg_benchmarks/visualize_benchmark_results.py new file mode 100644 index 0000000000000..e59d8bb0e6c8c --- /dev/null +++ b/benchmarks/disagg_benchmarks/visualize_benchmark_results.py @@ -0,0 +1,46 @@ +import json + +import matplotlib.pyplot as plt +import pandas as pd + +if __name__ == "__main__": + + data = [] + for name in ['disagg_prefill', 'chunked_prefill']: + for qps in [2, 4, 6, 8]: + with open(f"results/{name}-qps-{qps}.json") as f: + x = json.load(f) + x['name'] = name + x['qps'] = qps + data.append(x) + + df = pd.DataFrame.from_dict(data) + dis_df = df[df['name'] == 'disagg_prefill'] + chu_df = df[df['name'] == 'chunked_prefill'] + + plt.style.use('bmh') + plt.rcParams['font.size'] = 20 + + for key in [ + 'mean_ttft_ms', 'median_ttft_ms', 'p99_ttft_ms', 'mean_itl_ms', + 'median_itl_ms', 'p99_itl_ms' + ]: + + fig, ax = plt.subplots(figsize=(11, 7)) + plt.plot(dis_df['qps'], + dis_df[key], + label='disagg_prefill', + marker='o', + linewidth=4) + plt.plot(chu_df['qps'], + chu_df[key], + label='chunked_prefill', + marker='o', + linewidth=4) + ax.legend() + + ax.set_xlabel('QPS') + ax.set_ylabel(key) + ax.set_ylim(bottom=0) + fig.savefig(f'results/{key}.png') + plt.close(fig) diff --git a/benchmarks/fused_kernels/layernorm_rms_benchmarks.py b/benchmarks/fused_kernels/layernorm_rms_benchmarks.py new file mode 100644 index 0000000000000..ef91f9f8eb529 --- /dev/null +++ b/benchmarks/fused_kernels/layernorm_rms_benchmarks.py @@ -0,0 +1,173 @@ +import pickle as pkl +import time +from dataclasses import dataclass +from itertools import product +from typing import Callable, Iterable, List, Optional + +import torch +import torch.utils.benchmark as TBenchmark +from torch.utils.benchmark import Measurement as TMeasurement +from tqdm import tqdm + +import vllm._custom_ops as ops +from vllm.model_executor.layers.layernorm import RMSNorm + + +@dataclass +class bench_params_t: + num_tokens: int + hidden_size: int + add_residual: bool + dtype: torch.dtype + + def description(self): + return (f'N {self.num_tokens} ' + f'x D {self.hidden_size} ' + f'x R {self.add_residual} ' + f'x DT {self.dtype}') + + +def get_bench_params() -> List[bench_params_t]: + ## Test Fixtures + NUM_TOKENS = [2**x for x in range(11)] + HIDDEN_SIZES = list(range(1024, 8129, 1024)) + ADD_RESIDUAL = [True, False] + DTYPES = [torch.bfloat16, torch.float] + + combinations = product(NUM_TOKENS, HIDDEN_SIZES, ADD_RESIDUAL, DTYPES) + bench_params = list(map(lambda x: \ + bench_params_t(x[0], x[1], x[2], x[3]), combinations)) + return bench_params + + +# Reference impls +def unfused_int8_impl(rms_norm_layer: RMSNorm, x: torch.Tensor, + residual: Optional[torch.Tensor], + quant_dtype: torch.dtype): + # Norm + torch_out = None + if residual is None: + torch_out = rms_norm_layer.forward_cuda(x, residual) + else: + torch_out, _ = rms_norm_layer.forward_cuda(x, residual) + + # Quant + torch_out, _, _ = ops.scaled_int8_quant(torch_out) + + +def unfused_fp8_impl(rms_norm_layer: RMSNorm, x: torch.Tensor, + residual: Optional[torch.Tensor], + quant_dtype: torch.dtype): + # Norm + torch_out = None + if residual is None: + torch_out = rms_norm_layer.forward_cuda(x, residual) + else: + torch_out, _ = rms_norm_layer.forward_cuda(x, residual) + + # Quant + torch_out, _ = ops.scaled_fp8_quant(torch_out) + + +def fused_impl( + rms_norm_layer: RMSNorm, # this stores the weights + x: torch.Tensor, + residual: Optional[torch.Tensor], + quant_dtype: torch.dtype): + out, _ = ops.rms_norm_dynamic_per_token_quant(x, + rms_norm_layer.weight, + 1e-6, + quant_dtype, + residual=residual) + + +# Bench functions +def bench_fn(rms_norm_layer: RMSNorm, x: torch.Tensor, residual: torch.Tensor, + quant_dtype: torch.dtype, label: str, sub_label: str, + fn: Callable, description: str) -> TMeasurement: + + min_run_time = 1 + + globals = { + "rms_norm_layer": rms_norm_layer, + "x": x, + "residual": residual, + "quant_dtype": quant_dtype, + "fn": fn, + } + return TBenchmark.Timer( + stmt="fn(rms_norm_layer, x, residual, quant_dtype)", + globals=globals, + label=label, + sub_label=sub_label, + description=description, + ).blocked_autorange(min_run_time=min_run_time) + +def bench(params: bench_params_t, label: str, sub_label: str) \ + -> Iterable[TMeasurement]: + + # Make inputs + layer = RMSNorm(params.hidden_size, 1e-6).to(dtype=params.dtype) + # Make weights + layer.weight.data.normal_(mean=1.0, std=0.1) + # Make inputs + scale = 1 / params.hidden_size + x = torch.randn(params.num_tokens, + params.hidden_size, + dtype=params.dtype, + device='cuda') * scale + residual = (torch.randn_like(x) * scale).to(device='cuda') \ + if params.add_residual else None + + timers = [] + + # unfused int8 impl. + timers.append( + bench_fn(layer, x, residual, torch.int8, label, sub_label, + unfused_int8_impl, "unfused_int8_impl")) + + # unfused fp8 impl. + timers.append( + bench_fn(layer, x, residual, torch.float8_e4m3fn, label, sub_label, + unfused_fp8_impl, "unfused_fp8_impl")) + + # fused int8 impl. + timers.append( + bench_fn(layer, x, residual, torch.int8, label, sub_label, fused_impl, + "fused_int8_impl")) + + # fused fp8 impl. + timers.append( + bench_fn(layer, x, residual, torch.float8_e4m3fn, label, sub_label, + fused_impl, "fused_fp8_impl")) + + print_timers(timers) + + return timers + + +# launch bench +# runner +def print_timers(timers: Iterable[TMeasurement]): + compare = TBenchmark.Compare(timers) + compare.print() + + +def main(): + torch.set_default_device('cuda') + bench_params = get_bench_params() + + timers = [] + for bp in tqdm(bench_params): + timers.extend( + bench(bp, "rms-norm-dynamic-per-token-quant", bp.description())) + print_timers(timers) + + # pickle all the results + timestamp = int(time.time()) + with open(f"rms_norm_dpt_quant-{timestamp}.pkl", "wb") as f: + pkl.dump(timers, f) + + +if __name__ == '__main__': + main() diff --git a/benchmarks/kernels/benchmark_rmsnorm.py b/benchmarks/kernels/benchmark_rmsnorm.py new file mode 100644 index 0000000000000..baa5de0fff1bd --- /dev/null +++ b/benchmarks/kernels/benchmark_rmsnorm.py @@ -0,0 +1,262 @@ +import itertools +from typing import Optional, Tuple, Union + +import torch +import triton +from flashinfer.norm import fused_add_rmsnorm, rmsnorm +from torch import nn + +from vllm import _custom_ops as vllm_ops + + +class HuggingFaceRMSNorm(nn.Module): + + def __init__(self, hidden_size: int, eps: float = 1e-6) -> None: + super().__init__() + self.weight = nn.Parameter(torch.ones(hidden_size)) + self.variance_epsilon = eps + + def forward( + self, + x: torch.Tensor, + residual: Optional[torch.Tensor] = None, + ) -> Union[torch.Tensor, Tuple[torch.Tensor, torch.Tensor]]: + orig_dtype = x.dtype + x = x.to(torch.float32) + if residual is not None: + x = x + residual.to(torch.float32) + residual = x.to(orig_dtype) + + variance = x.pow(2).mean(dim=-1, keepdim=True) + x = x * torch.rsqrt(variance + self.variance_epsilon) + x = x.to(orig_dtype) * self.weight + if residual is None: + return x + else: + return x, residual + + +def rmsnorm_naive( + x: torch.Tensor, + weight: torch.Tensor, + residual: Optional[torch.Tensor] = None, + eps: float = 1e-6, +): + naive_norm = HuggingFaceRMSNorm(x.shape[-1], eps=eps) + naive_norm.weight = nn.Parameter(weight) + naive_norm = naive_norm.to(x.device) + + orig_shape = x.shape + x = x.view(-1, x.shape[-1]) + if residual is not None: + residual = residual.view(-1, residual.shape[-1]) + + output = naive_norm(x, residual) + + if isinstance(output, tuple): + output = (output[0].view(orig_shape), output[1].view(orig_shape)) + else: + output = output.view(orig_shape) + return output + + +def rmsnorm_flashinfer( + x: torch.Tensor, + weight: torch.Tensor, + residual: Optional[torch.Tensor] = None, + eps: float = 1e-6, +): + orig_shape = x.shape + x = x.view(-1, x.shape[-1]) + if residual is not None: + residual = residual.view(-1, residual.shape[-1]) + + if residual is not None: + fused_add_rmsnorm(x, residual, weight, eps) + output = (x, residual) + else: + output = rmsnorm(x, weight, eps) + + if isinstance(output, tuple): + output = (output[0].view(orig_shape), output[1].view(orig_shape)) + else: + output = output.view(orig_shape) + return output + + +def rmsnorm_vllm( + x: torch.Tensor, + weight: torch.Tensor, + residual: Optional[torch.Tensor] = None, + eps: float = 1e-6, +): + orig_shape = x.shape + x = x.view(-1, x.shape[-1]) + if residual is not None: + residual = residual.view(-1, residual.shape[-1]) + + if residual is not None: + vllm_ops.fused_add_rms_norm(x, residual, weight, eps) + output = (x, residual) + else: + out = torch.empty_like(x) + vllm_ops.rms_norm(out, x, weight, eps) + output = out + + if isinstance(output, tuple): + output = (output[0].view(orig_shape), output[1].view(orig_shape)) + else: + output = output.view(orig_shape) + return output + + +def calculate_diff(batch_size, seq_len, hidden_size, use_residual=True): + dtype = torch.bfloat16 + x = torch.randn(batch_size, + seq_len, + hidden_size, + dtype=dtype, + device="cuda") + weight = torch.ones(hidden_size, dtype=dtype, device="cuda") + residual = torch.randn_like(x) if use_residual else None + + output_naive = rmsnorm_naive( + x.clone(), weight, + residual.clone() if residual is not None else None) + output_flashinfer = rmsnorm_flashinfer( + x.clone(), weight, + residual.clone() if residual is not None else None) + output_vllm = rmsnorm_vllm( + x.clone(), weight, + residual.clone() if residual is not None else None) + + if use_residual: + output_naive = output_naive[0] + output_flashinfer = output_flashinfer[0] + output_vllm = output_vllm[0] + + print(f"Naive output={output_naive}") + print(f"FlashInfer output={output_flashinfer}") + print(f"VLLM output={output_vllm}") + + if torch.allclose(output_naive, output_flashinfer, atol=1e-2, + rtol=1e-2) and torch.allclose( + output_naive, output_vllm, atol=1e-2, rtol=1e-2): + print("✅ All implementations match") + else: + print("❌ Implementations differ") + + +batch_size_range = [2**i for i in range(0, 7, 2)] +seq_length_range = [2**i for i in range(6, 11, 1)] +head_num_range = [32, 48] +configs = list( + itertools.product(head_num_range, batch_size_range, seq_length_range)) + + +def get_benchmark(use_residual): + + @triton.testing.perf_report( + triton.testing.Benchmark( + x_names=["head_num", "batch_size", "seq_len"], + x_vals=[list(_) for _ in configs], + line_arg="provider", + line_vals=["huggingface", "flashinfer", "vllm"], + line_names=["HuggingFace", "FlashInfer", "vLLM"], + styles=[("blue", "-"), ("green", "-"), ("red", "-")], + ylabel="us", + plot_name= + f"rmsnorm-perf-{'with' if use_residual else 'without'}-residual", + args={}, + )) + def benchmark(head_num, batch_size, seq_len, provider): + dtype = torch.bfloat16 + hidden_size = head_num * 128 # assuming head_dim = 128 + + x = torch.randn(batch_size, + seq_len, + hidden_size, + dtype=dtype, + device="cuda") + weight = torch.ones(hidden_size, dtype=dtype, device="cuda") + residual = torch.randn_like(x) if use_residual else None + + quantiles = [0.5, 0.2, 0.8] + + if provider == "huggingface": + ms, min_ms, max_ms = triton.testing.do_bench( + lambda: rmsnorm_naive( + x.clone(), + weight, + residual.clone() if residual is not None else None, + ), + quantiles=quantiles, + ) + elif provider == "flashinfer": + ms, min_ms, max_ms = triton.testing.do_bench( + lambda: rmsnorm_flashinfer( + x.clone(), + weight, + residual.clone() if residual is not None else None, + ), + quantiles=quantiles, + ) + else: + ms, min_ms, max_ms = triton.testing.do_bench( + lambda: rmsnorm_vllm( + x.clone(), + weight, + residual.clone() if residual is not None else None, + ), + quantiles=quantiles, + ) + + return 1000 * ms, 1000 * max_ms, 1000 * min_ms + + return benchmark + + +if __name__ == "__main__": + import argparse + + parser = argparse.ArgumentParser() + parser.add_argument( + "--batch-size", + type=int, + default=4, + help="Batch size", + ) + parser.add_argument( + "--seq-len", + type=int, + default=128, + help="Sequence length", + ) + parser.add_argument( + "--hidden-size", + type=int, + default=4096, + help="Hidden size (2nd dimension) of the sequence", + ) + parser.add_argument("--use-residual", + action="store_true", + help="Whether to use residual connection") + parser.add_argument( + "--save-path", + type=str, + default="./configs/rmsnorm/", + help="Path to save rmsnorm benchmark results", + ) + + args = parser.parse_args() + + # Run correctness test + calculate_diff(batch_size=args.batch_size, + seq_len=args.seq_len, + hidden_size=args.hidden_size, + use_residual=args.use_residual) + + # Get the benchmark function with proper use_residual setting + benchmark = get_benchmark(args.use_residual) + # Run performance benchmark + benchmark.run(print_data=True, save_path=args.save_path) diff --git a/benchmarks/structured_schemas/structured_schema_1.json b/benchmarks/structured_schemas/structured_schema_1.json new file mode 100644 index 0000000000000..6003698469e8d --- /dev/null +++ b/benchmarks/structured_schemas/structured_schema_1.json @@ -0,0 +1,113 @@ +{ + "$schema": + "https://json-schema.org/draft/2020-12/schema", + "title": + "User Profile", + "type": + "object", + "properties": { + "userId": { + "type": "string", + "description": "Unique identifier for the user." + }, + "personalInfo": { + "type": "object", + "properties": { + "firstName": { + "type": "string", + "description": "The user's first name." + }, + "lastName": { + "type": "string", + "description": "The user's last name." + }, + "age": { + "type": "integer", + "minimum": 0, + "description": "The user's age." + }, + "phoneNumbers": { + "type": + "array", + "items": { + "type": "object", + "properties": { + "type": { + "type": "string", + "enum": ["home", "work", "mobile"], + "description": "Type of phone number." + }, + "number": { + "type": "string", + "pattern": "^\\+?[1-9]\\d{1,14}$", + "description": "Phone number in E.164 format." + } + }, + "required": ["type", "number"] + }, + "description": + "List of phone numbers associated with the user." + } + }, + "required": ["firstName", "lastName"] + }, + "address": { + "type": "object", + "properties": { + "street": { + "type": "string", + "description": "Street address." + }, + "city": { + "type": "string", + "description": "City name." + }, + "state": { + "type": "string", + "description": "State or province." + }, + "postalCode": { + "type": "string", + "pattern": "^\\d{5}(-\\d{4})?$", + "description": "Postal code." + }, + "country": { + "type": "string", + "description": "Country name." + } + }, + "required": ["street", "city", "state", "postalCode", "country"] + }, + "preferences": { + "type": "object", + "properties": { + "newsletterSubscribed": { + "type": + "boolean", + "description": + "Indicates if the user is subscribed to the newsletter." + }, + "favoriteCategories": { + "type": "array", + "items": { + "type": "string" + }, + "description": "List of user's favorite categories." + } + }, + "required": ["newsletterSubscribed"] + }, + "accountStatus": { + "type": "string", + "enum": ["active", "inactive", "suspended"], + "description": "Current status of the user's account." + }, + "registrationDate": { + "type": "string", + "format": "date-time", + "description": "ISO 8601 formatted date-time of user registration." + } + }, + "required": + ["userId", "personalInfo", "address", "accountStatus", "registrationDate"] +} \ No newline at end of file diff --git a/csrc/attention/paged_attention_v1.cu b/csrc/attention/paged_attention_v1.cu index 741cd0c82dc89..cb1a069942069 100644 --- a/csrc/attention/paged_attention_v1.cu +++ b/csrc/attention/paged_attention_v1.cu @@ -140,13 +140,10 @@ void paged_attention_v1_launcher( blocksparse_block_size, blocksparse_head_sliding_step); #define CALL_V1_LAUNCHER_SPARSITY(T, CACHE_T, BLOCK_SIZE, IS_FP8_KV_CACHE) \ - switch (is_block_sparse) { \ - case true: \ - CALL_V1_LAUNCHER(T, CACHE_T, BLOCK_SIZE, IS_FP8_KV_CACHE, true); \ - break; \ - case false: \ - CALL_V1_LAUNCHER(T, CACHE_T, BLOCK_SIZE, IS_FP8_KV_CACHE, false); \ - break; \ + if (is_block_sparse) { \ + CALL_V1_LAUNCHER(T, CACHE_T, BLOCK_SIZE, IS_FP8_KV_CACHE, true); \ + } else { \ + CALL_V1_LAUNCHER(T, CACHE_T, BLOCK_SIZE, IS_FP8_KV_CACHE, false); \ } // NOTE(woosuk): To reduce the compilation time, we omitted block sizes diff --git a/csrc/attention/paged_attention_v2.cu b/csrc/attention/paged_attention_v2.cu index 6de8d0bdd5b8d..c457bdb89008e 100644 --- a/csrc/attention/paged_attention_v2.cu +++ b/csrc/attention/paged_attention_v2.cu @@ -147,13 +147,10 @@ void paged_attention_v2_launcher( blocksparse_head_sliding_step); #define CALL_V2_LAUNCHER_SPARSITY(T, CACHE_T, BLOCK_SIZE, IS_FP8_KV_CACHE) \ - switch (is_block_sparse) { \ - case true: \ - CALL_V2_LAUNCHER(T, CACHE_T, BLOCK_SIZE, IS_FP8_KV_CACHE, true); \ - break; \ - case false: \ - CALL_V2_LAUNCHER(T, CACHE_T, BLOCK_SIZE, IS_FP8_KV_CACHE, false); \ - break; \ + if (is_block_sparse) { \ + CALL_V2_LAUNCHER(T, CACHE_T, BLOCK_SIZE, IS_FP8_KV_CACHE, true); \ + } else { \ + CALL_V2_LAUNCHER(T, CACHE_T, BLOCK_SIZE, IS_FP8_KV_CACHE, false); \ } // NOTE(woosuk): To reduce the compilation time, we omitted block sizes diff --git a/csrc/cache_kernels.cu b/csrc/cache_kernels.cu index 1be806bbfa43c..8a95279f9a25a 100644 --- a/csrc/cache_kernels.cu +++ b/csrc/cache_kernels.cu @@ -307,10 +307,20 @@ void reshape_and_cache_flash( torch::Tensor& key_cache, // [num_blocks, block_size, num_heads, head_size] torch::Tensor& value_cache, // [num_blocks, block_size, num_heads, head_size] - torch::Tensor& slot_mapping, // [num_tokens] + torch::Tensor& slot_mapping, // [num_tokens] or [num_actual_tokens] const std::string& kv_cache_dtype, const double k_scale, const double v_scale) { - int num_tokens = key.size(0); + // NOTE(woosuk): In vLLM V1, key.size(0) can be different from + // slot_mapping.size(0) because of padding for CUDA graphs. + // In vLLM V0, key.size(0) is always equal to slot_mapping.size(0) because + // both include padding. + // In vLLM V1, however, key.size(0) can be larger than slot_mapping.size(0) + // since key includes padding for CUDA graphs, while slot_mapping does not. + // In this case, slot_mapping.size(0) represents the actual number of tokens + // before padding. + // For compatibility with both cases, we use slot_mapping.size(0) as the + // number of tokens. + int num_tokens = slot_mapping.size(0); int num_heads = key.size(1); int head_size = key.size(2); int block_size = key_cache.size(1); diff --git a/csrc/dispatch_utils.h b/csrc/dispatch_utils.h index a634e1c3d4886..03414b7e1ae93 100644 --- a/csrc/dispatch_utils.h +++ b/csrc/dispatch_utils.h @@ -14,6 +14,20 @@ #define VLLM_DISPATCH_FLOATING_TYPES(TYPE, NAME, ...) \ AT_DISPATCH_SWITCH(TYPE, NAME, VLLM_DISPATCH_CASE_FLOATING_TYPES(__VA_ARGS__)) +// TODO(luka/varun): use FP8_TYPE macro after refactoring +#ifndef USE_ROCM + #define VLLM_DISPATCH_CASE_QUANT_TYPES(...) \ + AT_DISPATCH_CASE(at::ScalarType::Float8_e4m3fn, __VA_ARGS__) \ + AT_DISPATCH_CASE(at::ScalarType::Char, __VA_ARGS__) +#else + #define VLLM_DISPATCH_CASE_QUANT_TYPES(...) \ + AT_DISPATCH_CASE(at::ScalarType::Float8_e4m3fnuz, __VA_ARGS__) \ + AT_DISPATCH_CASE(at::ScalarType::Char, __VA_ARGS__) +#endif + +#define VLLM_DISPATCH_QUANT_TYPES(TYPE, NAME, ...) \ + AT_DISPATCH_SWITCH(TYPE, NAME, VLLM_DISPATCH_CASE_QUANT_TYPES(__VA_ARGS__)) + #define VLLM_DISPATCH_CASE_FLOATING_AND_BYTE_TYPES(...) \ AT_DISPATCH_CASE(at::ScalarType::Float, __VA_ARGS__) \ AT_DISPATCH_CASE(at::ScalarType::Half, __VA_ARGS__) \ diff --git a/csrc/mamba/causal_conv1d/causal_conv1d.cu b/csrc/mamba/causal_conv1d/causal_conv1d.cu index 498d069c05f0d..dd1e6de2e0180 100644 --- a/csrc/mamba/causal_conv1d/causal_conv1d.cu +++ b/csrc/mamba/causal_conv1d/causal_conv1d.cu @@ -424,7 +424,7 @@ void causal_conv1d_fwd_kernel(ConvParamsBase params) { // and the one before it (chunk = n_chunks - 1 and chunk = n_chunks - 2), // (which occurs when `final_state_position` is a non-positivie index) // we load the correct data from smem_exchange from both chunks, the last chunk iteration and the one before it - if (final_state_position < 0 && seqlen > kWidth){ + if (conv_states != nullptr && final_state_position < 0 && seqlen > kWidth){ input_t vals_load[kNElts] = {0}; if ((chunk == n_chunks - 2) && (tidx == kNThreads - 1)){ // chunk = n_chunks - 2, a segment of the final state sits in the last index diff --git a/csrc/ops.h b/csrc/ops.h index ea001190bc202..816b471d062d2 100644 --- a/csrc/ops.h +++ b/csrc/ops.h @@ -66,6 +66,14 @@ void fused_add_rms_norm_static_fp8_quant(torch::Tensor& out, torch::Tensor& weight, torch::Tensor& scale, double epsilon); +void rms_norm_dynamic_per_token_quant(torch::Tensor& out, + torch::Tensor const& input, + torch::Tensor const& weight, + torch::Tensor& scales, + double const epsilon, + std::optional scale_ub, + std::optional residual); + void rotary_embedding(torch::Tensor& positions, torch::Tensor& query, torch::Tensor& key, int64_t head_size, torch::Tensor& cos_sin_cache, bool is_neox); diff --git a/csrc/quantization/fp8/common.cuh b/csrc/quantization/fp8/common.cuh index d7c0297d5333f..15bd5b6ed1564 100644 --- a/csrc/quantization/fp8/common.cuh +++ b/csrc/quantization/fp8/common.cuh @@ -1,6 +1,9 @@ #pragma once +#include "quantization/vectorization.cuh" + #include +#include #ifndef USE_ROCM #include @@ -15,6 +18,7 @@ using FP8_TYPE = c10::Float8_e4m3fnuz; // issue when running dynamic quantization. Here use 224.0f for rocm. constexpr auto FP8_E4M3_MAX = 224.0f; #endif +constexpr static auto kFp8Type = c10::CppTypeToScalarType::value; namespace vllm { @@ -89,22 +93,6 @@ __global__ void segmented_max_reduction(float* __restrict__ scale, } } -template -struct __align__(8) vec4_t { - scalar_t x; - scalar_t y; - scalar_t z; - scalar_t w; -}; - -typedef struct __align__(4) { - FP8_TYPE x; - FP8_TYPE y; - FP8_TYPE z; - FP8_TYPE w; -} -float8x4_t; - template __device__ float thread_max_vec(scalar_t const* __restrict__ input, int64_t const num_elems, int const tid, @@ -139,10 +127,10 @@ __device__ void scaled_fp8_conversion_vec(FP8_TYPE* __restrict__ out, float const scale, int64_t const num_elems, int const tid, int const step) { + using float8x4_t = q8x4_t; // Vectorized input/output to better utilize memory bandwidth. - vec4_t const* vectorized_in = - reinterpret_cast const*>(input); - float8x4_t* vectorized_out = reinterpret_cast(out); + auto const* vectorized_in = reinterpret_cast const*>(input); + auto* vectorized_out = reinterpret_cast(out); int64_t const num_vec_elems = num_elems >> 2; diff --git a/csrc/quantization/fused_kernels/fused_layernorm_dynamic_per_token_quant.cu b/csrc/quantization/fused_kernels/fused_layernorm_dynamic_per_token_quant.cu new file mode 100644 index 0000000000000..3c4f183bf4b59 --- /dev/null +++ b/csrc/quantization/fused_kernels/fused_layernorm_dynamic_per_token_quant.cu @@ -0,0 +1,160 @@ + +#include +#include + +#include "../../dispatch_utils.h" +#include "layernorm_utils.cuh" +#include "quant_conversions.cuh" + +namespace vllm { + +template +__device__ void rms_norm_dynamic_per_token_quant_vec( + scalar_out_t* __restrict__ out, // [..., hidden_size] + float* __restrict__ scales, // [num_tokens] + scalar_t const* __restrict__ input, // [..., hidden_size] + scalar_t const* __restrict__ weight, // [hidden_size] + float const* scale_ub, float const var_epsilon, + float const min_scaling_factor, int32_t const hidden_size, + scalar_t* __restrict__ residual = nullptr) { + float rms = 0.0f; + float token_scale = 0.0f; + + // Compute rms + vllm::vectorized::compute_rms( + &rms, input, hidden_size, var_epsilon, residual); + + // Compute scale + vllm::vectorized::compute_dynamic_per_token_scales( + &token_scale, scales, input, weight, rms, scale_ub, min_scaling_factor, + hidden_size, residual); + + // RMS Norm + Quant + if constexpr (std::is_same_v) { + vllm::vectorized::norm_and_quant( + out, input, weight, rms, 1.0f / token_scale, hidden_size, residual); + } else { + // FP8 - Do not invert token_scale for exact match with FBGemm + vllm::vectorized::norm_and_quant( + out, input, weight, rms, token_scale, hidden_size, residual); + } +} + +// RMS norm + quant kernel +template +__global__ void rms_norm_dynamic_per_token_quant_kernel( + scalar_out_t* __restrict__ out, // [..., hidden_size] + float* __restrict__ scales, // [num_tokens] + scalar_t const* __restrict__ input, // [..., hidden_size] + scalar_t const* __restrict__ weight, // [hidden_size] + float const* scale_ub, float const var_epsilon, + float const min_scaling_factor, int32_t const hidden_size, + scalar_t* __restrict__ residual = nullptr) { + // For vectorization, token_input and token_output pointers need to be + // aligned at 8-byte and 4-byte addresses respectively. + bool const can_vectorize = hidden_size % 4 == 0; + + if (can_vectorize) { + return rms_norm_dynamic_per_token_quant_vec( + out, scales, input, weight, scale_ub, var_epsilon, min_scaling_factor, + hidden_size, residual); + } + + float rms = 0.0f; + float token_scale = 0.0f; + + // Compute RMS + vllm::compute_rms(&rms, input, hidden_size, + var_epsilon, residual); + // Compute Scale + vllm::compute_dynamic_per_token_scales( + &token_scale, scales, input, weight, rms, scale_ub, min_scaling_factor, + hidden_size, residual); + + // RMS Norm + Quant + if constexpr (std::is_same_v) { + vllm::norm_and_quant( + out, input, weight, rms, 1.0f / token_scale, hidden_size, residual); + } else { + // FP8 - Do not invert s_token_scale for exact match with FBGemm + vllm::norm_and_quant( + out, input, weight, rms, token_scale, hidden_size, residual); + } +} +} // namespace vllm + +// Residual add + RMS norm + dynamic per token +template +void rms_norm_dynamic_per_token_quant_dispatch( + torch::Tensor& out, // [..., hidden_size] + torch::Tensor const& input, // [..., hidden_size] + torch::Tensor const& weight, // [hidden_size] + torch::Tensor& scales, // [num_tokens] + double const var_epsilon, // Variance epsilon used in norm calculation + std::optional const& scale_ub, + std::optional& residual) { + int32_t hidden_size = input.size(-1); + int32_t num_tokens = input.numel() / hidden_size; + + dim3 grid(num_tokens); + dim3 block(std::min(hidden_size, 1024)); + const at::cuda::OptionalCUDAGuard device_guard(device_of(input)); + const cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + + const float min_scaling_factor = + out.dtype() == torch::kInt8 + ? std::numeric_limits::epsilon() + : 1.0f / (std::numeric_limits::max() * 512.f); + + if (residual.has_value()) { + VLLM_DISPATCH_QUANT_TYPES( + out.scalar_type(), "rms_norm_dynamic_per_token_quant_kernel", [&] { + vllm::rms_norm_dynamic_per_token_quant_kernel + <<>>( + out.data_ptr(), scales.data_ptr(), + input.data_ptr(), weight.data_ptr(), + scale_ub.has_value() ? scale_ub->data_ptr() : nullptr, + var_epsilon, min_scaling_factor, hidden_size, + residual->data_ptr()); + }); + + } else { + VLLM_DISPATCH_QUANT_TYPES( + out.scalar_type(), "rms_norm_dynamic_per_token_quant_kernel", [&] { + vllm::rms_norm_dynamic_per_token_quant_kernel + <<>>( + out.data_ptr(), scales.data_ptr(), + input.data_ptr(), weight.data_ptr(), + scale_ub.has_value() ? scale_ub->data_ptr() : nullptr, + var_epsilon, min_scaling_factor, hidden_size, nullptr); + }); + } +} + +void rms_norm_dynamic_per_token_quant( + torch::Tensor& out, // [..., hidden_size] + torch::Tensor const& input, // [..., hidden_size] + torch::Tensor const& weight, // [hidden_size] + torch::Tensor& scales, // [num_tokens] + double const var_epsilon, // Variance epsilon used in norm calculation + std::optional scale_ub, std::optional residual) { + TORCH_CHECK(out.dtype() == kFp8Type || out.dtype() == torch::kInt8); + TORCH_CHECK(out.is_contiguous() && input.is_contiguous()); + + if (scale_ub.has_value()) { + TORCH_CHECK(out.dtype() == kFp8Type); + } + TORCH_CHECK(scales.dtype() == torch::kFloat32); + + VLLM_DISPATCH_FLOATING_TYPES( + input.scalar_type(), "rms_norm_dynamic_per_token_quant_dispatch", [&] { + rms_norm_dynamic_per_token_quant_dispatch( + out, input, weight, scales, var_epsilon, scale_ub, residual); + }); +} diff --git a/csrc/quantization/fused_kernels/layernorm_utils.cuh b/csrc/quantization/fused_kernels/layernorm_utils.cuh new file mode 100644 index 0000000000000..cec6b54edb569 --- /dev/null +++ b/csrc/quantization/fused_kernels/layernorm_utils.cuh @@ -0,0 +1,327 @@ +#pragma once + +/** + * __device__ layernorm utilities. + */ + +#include "quantization/vectorization.cuh" +#include "quant_conversions.cuh" + +#ifndef USE_ROCM + #include +#else + #include +#endif + +namespace vllm { + +// has_residual must be true, if residual is not a nullptr +template +__device__ void compute_rms(float* rms, scalar_t const* __restrict__ input, + int32_t const hidden_size, float const epsilon, + scalar_t const* __restrict__ residual = nullptr) { + int64_t const token_offset = blockIdx.x * static_cast(hidden_size); + // sum of squares + float ss = 0.0f; + + for (int32_t i = threadIdx.x; i < hidden_size; i += blockDim.x) { + float x = static_cast(input[token_offset + i]); + if constexpr (has_residual) { + x += static_cast(residual[token_offset + i]); + } + + ss += x * x; + } + + using BlockReduce = cub::BlockReduce; + __shared__ typename BlockReduce::TempStorage reduceStore; + ss = BlockReduce(reduceStore).Reduce(ss, cub::Sum{}, blockDim.x); + + __shared__ float s_rms; + if (threadIdx.x == 0) { + s_rms = rsqrtf(ss / hidden_size + epsilon); + } + __syncthreads(); + + *rms = s_rms; +} + +template +__device__ void compute_dynamic_per_token_scales( + float* __restrict__ token_scale, float* __restrict__ all_token_scales, + scalar_t const* __restrict__ input, scalar_t const* __restrict__ weight, + float const rms, float const* __restrict__ scale_ub, + float const min_scaling_factor, int32_t const hidden_size, + scalar_t const* __restrict__ residual = nullptr) { + int64_t const token_offset = blockIdx.x * static_cast(hidden_size); + ; + constexpr scalar_out_t qmax{std::numeric_limits::max()}; + + float block_absmax_val_maybe = 0.0f; + for (int32_t i = threadIdx.x; i < hidden_size; i += blockDim.x) { + float x = static_cast(input[token_offset + i]); + if constexpr (has_residual) { + x += static_cast(residual[token_offset + i]); + } + + x = static_cast(static_cast(x * rms) * weight[i]); + block_absmax_val_maybe = fmaxf(block_absmax_val_maybe, fabsf(x)); + } + + using BlockReduce = cub::BlockReduce; + __shared__ typename BlockReduce::TempStorage reduceStore; + block_absmax_val_maybe = + BlockReduce(reduceStore) + .Reduce(block_absmax_val_maybe, cub::Max{}, blockDim.x); + + __shared__ float s_token_scale; + if (threadIdx.x == 0) { + float scale = 0.0f; + if (scale_ub) { + scale = min(block_absmax_val_maybe, *scale_ub); + } else { + scale = block_absmax_val_maybe; + } + // token scale computation + scale = max(scale / qmax, min_scaling_factor); + s_token_scale = scale; // Shared memory store + all_token_scales[blockIdx.x] = scale; // Global output store + } + __syncthreads(); + + *token_scale = s_token_scale; +} + +template +__device__ void norm_and_quant(scalar_out_t* __restrict__ output, + scalar_t const* __restrict__ input, + scalar_t const* __restrict__ weight, + float const rms, float const scale, + int32_t const hidden_size, + scalar_t* __restrict__ residual = nullptr) { + int64_t const token_offset = blockIdx.x * static_cast(hidden_size); + ; + + for (int32_t i = threadIdx.x; i < hidden_size; i += blockDim.x) { + float x = static_cast(input[token_offset + i]); + if constexpr (has_residual) { + x += static_cast(residual[token_offset + i]); + residual[token_offset + i] = static_cast(x); + } + // Norm + x = static_cast(static_cast(x * rms) * weight[i]); + // Quant + output[token_offset + i] = + ScaledQuant::quant_fn(x, scale); + } +} + +namespace vectorized { + +// Compute 1.0/rms(input) +// hidden_size must be a multiple of 4 +template +__device__ void compute_rms(float* rms, scalar_t const* __restrict__ input, + int32_t const hidden_size, float const epsilon, + scalar_t const* __restrict__ residual = nullptr) { + int64_t const token_offset = blockIdx.x * static_cast(hidden_size); + + // Vectorized input/output to better utilize memory bandwidth. + vec4_t const* vec_input = + reinterpret_cast const*>(&input[token_offset]); + vec4_t const* vec_residual = nullptr; + if constexpr (has_residual) { + vec_residual = + reinterpret_cast const*>(&residual[token_offset]); + } + + // sum of squares + float ss = 0.0f; + + int32_t const num_vec_elems = hidden_size >> 2; + +#pragma unroll 4 + for (int32_t i = threadIdx.x; i < num_vec_elems; i += blockDim.x) { + vec4_t in = vec_input[i]; + + vec4_t x; + x.x = static_cast(in.x); + x.y = static_cast(in.y); + x.z = static_cast(in.z); + x.w = static_cast(in.w); + if constexpr (has_residual) { + vec4_t r = vec_residual[i]; + x.x += static_cast(r.x); + x.y += static_cast(r.y); + x.z += static_cast(r.z); + x.w += static_cast(r.w); + } + + ss += x.x * x.x; + ss += x.y * x.y; + ss += x.z * x.z; + ss += x.w * x.w; + } + + using BlockReduce = cub::BlockReduce; + __shared__ typename BlockReduce::TempStorage reduceStore; + ss = BlockReduce(reduceStore).Reduce(ss, cub::Sum{}, blockDim.x); + + __shared__ float s_rms; + if (threadIdx.x == 0) { + s_rms = rsqrtf(ss / hidden_size + epsilon); + } + __syncthreads(); + + *rms = s_rms; +} + +// Vectorized version of vllm::compute_dynamic_per_token_scales +// hidden_size must be a multiple of 4 +template +__device__ void compute_dynamic_per_token_scales( + float* __restrict__ token_scale, float* __restrict__ all_token_scales, + scalar_t const* __restrict__ input, scalar_t const* __restrict__ weight, + float const rms, float const* __restrict__ scale_ub, + float const min_scaling_factor, int32_t const hidden_size, + scalar_t const* __restrict__ residual = nullptr) { + int64_t const token_offset = blockIdx.x * static_cast(hidden_size); + ; + + // Vectorized input/weight/residual to better utilize memory bandwidth. + vec4_t const* vec_input = + reinterpret_cast const*>(&input[token_offset]); + vec4_t const* vec_weight = + reinterpret_cast const*>(weight); + vec4_t const* vec_residual = nullptr; + if constexpr (has_residual) { + vec_residual = + reinterpret_cast const*>(&residual[token_offset]); + } + + constexpr scalar_out_t qmax{std::numeric_limits::max()}; + + int32_t const num_vec_elems = hidden_size >> 2; + float block_absmax_val_maybe = 0.0f; + +#pragma unroll 4 + for (int32_t i = threadIdx.x; i < num_vec_elems; i += blockDim.x) { + vec4_t in = vec_input[i]; + vec4_t const w = vec_weight[i]; + + vec4_t x; + x.x = static_cast(in.x); + x.y = static_cast(in.y); + x.z = static_cast(in.z); + x.w = static_cast(in.w); + if constexpr (has_residual) { + vec4_t r = vec_residual[i]; + x.x += static_cast(r.x); + x.y += static_cast(r.y); + x.z += static_cast(r.z); + x.w += static_cast(r.w); + } + + block_absmax_val_maybe = fmaxf( + block_absmax_val_maybe, fabs(static_cast(x.x * rms) * w.x)); + block_absmax_val_maybe = fmaxf( + block_absmax_val_maybe, fabs(static_cast(x.y * rms) * w.y)); + block_absmax_val_maybe = fmaxf( + block_absmax_val_maybe, fabs(static_cast(x.z * rms) * w.z)); + block_absmax_val_maybe = fmaxf( + block_absmax_val_maybe, fabs(static_cast(x.w * rms) * w.w)); + } + + using BlockReduce = cub::BlockReduce; + __shared__ typename BlockReduce::TempStorage reduceStore; + block_absmax_val_maybe = + BlockReduce(reduceStore) + .Reduce(block_absmax_val_maybe, cub::Max{}, blockDim.x); + + __shared__ float s_token_scale; + if (threadIdx.x == 0) { + float scale = 0.0f; + if (scale_ub) { + scale = min(block_absmax_val_maybe, *scale_ub); + } else { + scale = block_absmax_val_maybe; + } + // token scale computation + scale = max(scale / qmax, min_scaling_factor); + s_token_scale = scale; // shared memory store + all_token_scales[blockIdx.x] = scale; // global output store + } + __syncthreads(); + + *token_scale = s_token_scale; +} + +// hidden_size must be a multiple of 4 +template +__device__ void norm_and_quant(scalar_out_t* __restrict__ output, + scalar_t const* __restrict__ input, + scalar_t const* __restrict__ weight, + float const rms, float const scale, + int32_t const hidden_size, + scalar_t* __restrict__ residual = nullptr) { + int64_t const token_offset = blockIdx.x * static_cast(hidden_size); + ; + + // Vectorized input/output/weight/residual to better utilize memory bandwidth. + vec4_t const* vec_input = + reinterpret_cast const*>(&input[token_offset]); + vec4_t const* vec_weight = + reinterpret_cast const*>(weight); + q8x4_t* vec_output = + reinterpret_cast*>(&output[token_offset]); + vec4_t* vec_residual = nullptr; + if constexpr (has_residual) { + vec_residual = reinterpret_cast*>(&residual[token_offset]); + } + + int32_t const num_vec_elems = hidden_size >> 2; + +// TODO(luka/varun) extract into type-agnostic vectorized quant function to +// replace scaled_fp8_conversion_vec +#pragma unroll 4 + for (int32_t i = threadIdx.x; i < num_vec_elems; i += blockDim.x) { + vec4_t const in = vec_input[i]; + vec4_t const w = vec_weight[i]; + + vec4_t x; + x.x = static_cast(in.x); + x.y = static_cast(in.y); + x.z = static_cast(in.z); + x.w = static_cast(in.w); + if constexpr (has_residual) { + vec4_t r = vec_residual[i]; + x.x += static_cast(r.x); + x.y += static_cast(r.y); + x.z += static_cast(r.z); + x.w += static_cast(r.w); + // Update residual + r.x = static_cast(x.x); + r.y = static_cast(x.y); + r.z = static_cast(x.z); + r.w = static_cast(x.w); + vec_residual[i] = r; + } + + q8x4_t out; + out.x = ScaledQuant::quant_fn( + static_cast(x.x * rms) * w.x, scale); + out.y = ScaledQuant::quant_fn( + static_cast(x.y * rms) * w.y, scale); + out.z = ScaledQuant::quant_fn( + static_cast(x.z * rms) * w.z, scale); + out.w = ScaledQuant::quant_fn( + static_cast(x.w * rms) * w.w, scale); + vec_output[i] = out; + } +} + +} // namespace vectorized + +} // namespace vllm diff --git a/csrc/quantization/fused_kernels/quant_conversions.cuh b/csrc/quantization/fused_kernels/quant_conversions.cuh new file mode 100644 index 0000000000000..f8a9872226a3a --- /dev/null +++ b/csrc/quantization/fused_kernels/quant_conversions.cuh @@ -0,0 +1,81 @@ +#pragma once + +/** + * __device__ helper functions to deal with float -> quant datatype conversion + */ + +#include "quantization/vectorization.cuh" +// TODO(luka/varun):refactor common.cuh to use this file instead +#include "quantization/fp8/common.cuh" + +namespace vllm { + +// TODO(luka/varun): combine into common utilities for int8 +// (with int8_quant_kernels.cu) +static __device__ __forceinline__ int8_t float_to_int8_rn(float const x) { +#ifdef USE_ROCM + static const float i8_min = + static_cast(std::numeric_limits::min()); + static const float i8_max = + static_cast(std::numeric_limits::max()); + // round + float dst = std::nearbyint(x); + // saturate + dst = std::clamp(dst, i8_min, i8_max); + return static_cast(dst); +#else + // CUDA path + uint32_t dst; + asm volatile("cvt.rni.sat.s8.f32 %0, %1;" : "=r"(dst) : "f"(x)); + return reinterpret_cast(dst); +#endif +} + +static __device__ __forceinline__ FP8_TYPE float_to_fp8(float const x) { + float const r = fmax(-FP8_E4M3_MAX, fmin(x, FP8_E4M3_MAX)); + return static_cast(r); +} + +template +struct ScaledQuant; + +template +struct ScaledQuant< + quant_type_t, is_scale_inverted, + typename std::enable_if_t>> { + static __device__ __forceinline__ quant_type_t quant_fn(float const x, + float const scale) { + if constexpr (is_scale_inverted) { + return float_to_int8_rn(x * scale); + } else { + return float_to_int8_rn(x / scale); + } + } +}; + +template +struct ScaledQuant< + quant_type_t, is_scale_inverted, + typename std::enable_if_t>> { + static __device__ __forceinline__ quant_type_t quant_fn(float const x, + float const scale) { + if constexpr (is_scale_inverted) { + return float_to_fp8(x * scale); + } else { + return float_to_fp8(x / scale); + } + } +}; + +template +__device__ void scaled_quant_conversion(quant_type_t* __restrict__ output, + scalar_t const* __restrict__ input, + float const scale, int const tid, + int const num_elements, + int const step) { + for (int i = tid; i < num_elements; i += step) { + output[i] = ScaledQuant(input[i], scale); + } +} + +} // namespace vllm diff --git a/csrc/quantization/vectorization.cuh b/csrc/quantization/vectorization.cuh new file mode 100644 index 0000000000000..44c999130f756 --- /dev/null +++ b/csrc/quantization/vectorization.cuh @@ -0,0 +1,33 @@ +#pragma once +/** + * __device__ datatypes vectorized by 4 + */ + +// Include both AMD and NVIDIA fp8 types to avoid circular import +// TODO(luka/varun) use FP8_TYPE instead after refactoring +#include +#include + +namespace vllm { + +// Vectorization containers +template +struct __align__(8) vec4_t { + scalar_t x; + scalar_t y; + scalar_t z; + scalar_t w; +}; + +template +struct __align__(4) q8x4_t { + static_assert(std::is_same_v || + std::is_same_v || + std::is_same_v); + quant_type_t x; + quant_type_t y; + quant_type_t z; + quant_type_t w; +}; + +} // namespace vllm diff --git a/csrc/torch_bindings.cpp b/csrc/torch_bindings.cpp index 4e64b9c92773a..1ffab14862fed 100644 --- a/csrc/torch_bindings.cpp +++ b/csrc/torch_bindings.cpp @@ -128,6 +128,14 @@ TORCH_LIBRARY_EXPAND(TORCH_EXTENSION_NAME, ops) { ops.impl("fused_add_rms_norm_static_fp8_quant", torch::kCUDA, &fused_add_rms_norm_static_fp8_quant); + // Fused Layernorm + Quant kernels + ops.def( + "rms_norm_dynamic_per_token_quant(Tensor! result, Tensor input, " + "Tensor weight, Tensor! scale, float epsilon, " + "Tensor? scale_ub, Tensor!? residual) -> ()"); + ops.impl("rms_norm_dynamic_per_token_quant", torch::kCUDA, + &rms_norm_dynamic_per_token_quant); + // Rotary embedding // Apply GPT-NeoX or GPT-J style rotary embedding to query and key. ops.def( diff --git a/docs/requirements-docs.txt b/docs/requirements-docs.txt index e3e35844405ac..ca2da4cd66d2d 100644 --- a/docs/requirements-docs.txt +++ b/docs/requirements-docs.txt @@ -12,8 +12,10 @@ pydantic >= 2.8 torch py-cpuinfo transformers -mistral_common >= 1.3.4 +mistral_common >= 1.5.0 aiohttp starlette openai # Required by docs/source/serving/openai_compatible_server.md's vllm.entrypoints.openai.cli_args -partial-json-parser # Required by docs/source/serving/openai_compatible_server.md's vllm.entrypoints.openai.cli_args \ No newline at end of file +fastapi # Required by docs/source/serving/openai_compatible_server.md's vllm.entrypoints.openai.cli_args +partial-json-parser # Required by docs/source/serving/openai_compatible_server.md's vllm.entrypoints.openai.cli_args +requests diff --git a/docs/source/assets/usage/disagg_prefill/abstraction.jpg b/docs/source/assets/usage/disagg_prefill/abstraction.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1a99e3ed8cf5f3b6679196752896fca94a22a4a4 GIT binary patch literal 104673 zcmeFa2UHW^x+p&MF4B<}DbkB{Q6wVLLF$`|JHksOy`p=+sUYz$s4U4z}-146t4Lji!Y!X|I5rw22?WM*J|K_8$3v11c* z2@3L~J`4c<0inT{49_05vv)Yk@C9H2Sb@WUJRs=e8WN;yZhqlctAAd9!v8pMAAimh z0E{dAZ0jH8|0RgS%{{~woMm&c_EonKSAP(01W%l6P%t=VnkukdJR&scC;S0~xr4z0 zg7BxGu;(8z{wI9(7fj3gtB%zrJ#d1Lf{V%O;u7o$0Ic$0`O$D!Pp}=<3m|+vz|A`V zgyA5p$0aq`Y{Z;M_V)qy96=r1p7wj6M z2hQam_qw^9Gx-Y+@VoePj()Z2;cf7fkbc45p~hB!!R{gYmVd#)ftUX7%OhA%_pfr7 zU=aHA-Y{S5zstScjsI@{x|fyhUv7V_2c^m(IZ>X1*(O>1RF8>(6dw}jg+6=M#du(p*=k))o?;c?NH@-ok zXaDXi$nV$Q_%%Q7eujUShlE@HeQ#*6)!*eoq2_Vmt?{itz(tD^i-mE$UE0Pu6~ z{;UH4-z3_Js>fqI|?=m5Haeqadr089e2z!Iv!nXQ<8m~SyZWUgWEW1eIF#=^oP#-hby z!Q#zwljR}H3zj!5%Pa@1oUF2}daRdO!&uW;%UNHu&ai%CV`GzKJHzI{7Rr{!_MEMU zZGjDci0hEzA>%`ChY*MI4mBSdJM@*EiTxP6F1s`Pb@pub2KEtl^kJsMl85yUyBv-= zoOihO@YLb&99$gA9A+H;9H|@?9D^KRI2kx4IrTZ+IO8~rIeR!)xoEh=x%9YPxo&fn zaP@Jmano~4aT{~{aHnurbHC@t@Nn{|@!0T0^5pTn;#uOQ;g#ey=Jn%E=dI`c$oqp& zm`|6_oiB;6hHs2-pI?yw48J>nGJh@qB>#^iM~~&+5a|@z zfN(){Al{G%5F}*tD9=&dqkcyp9_>Af5fu_O5)BhA6n!sB5R($K7K;_D6`L1l7C#~G zA^t$TTO1<+kua5jOH@jrjxilOam?#j&ar`G`;trIis^qCm zsIsXVs3KHfs$$jT)ZEpcs7CLy{6y-B zH(GRBXSEPo9a;w`HBN?}tUtMPO5v36sftryw57D&wM(>DPm7&)IbC>qN$04}6`iL# zOHfg$3$zHjaz^}&+nKU6>$)3!3=i^nboTx>A|%#6+Mo6Va`nO`$+x1hH$x5%^jd`bBd{L+9WmnF=y z!V+h7)+*g<)>_&+%(~l#-R82*bDMqJbGDhbOLj_jH|&P&1?;`;Upg>2*f^9qe0S7$ z%yL}2tbRG+@<%5rr%0zkX8~tF=T|UxmCqd{^(2|-KOPF~Bph6y$aE(@UwaSC}E$_*+>@4}?R z;=`81b;9$)@ewu=4Urs?0g>;bWTTRz)~_2}FO8;+c8%_ai^6Zgmv89aD2$$K8xujW>+1OgNMfoG_JmDzV@W?H%ts z?~_!M9wm{J-ICv?98bwkA*Q;fzP+n__u*Yinn&9EbdB^U_vr5h-20dT&3Kl{k$F9H z^}gx-<_AX~BtO{8f@KY6t7RADFz1BjEIqvN@Z}?kNB15PbG>pW^UmhgJ{EeM^msqt zEr0CEnI|;`!UZV>Kc0F&oh~#iY%Y>4$}Xla4ln*(VpB3udZP4snLt@e8L2#=eCgSx zXT8rgpFghpa)R7=TZS}-n%}9v zYZ_J`ZhEirzIjA*!&mW%8UJzO+T9jF=UD8;3wR~>*-HPSP?5gYP)~Aq9#I?B3Y@f5& zA?we-sC?A1PJ6|_Y_Cv0k%9P-BtvQ=n~>)y{uD}(OR&o?UErr>MC}3E zHoN7Z{a_COT%cag?D&Un{j0_=yXj8|R7-zBe_a0q{D=Pi>o*MmsNn_x3pA+ROaZ`) z5&)0{+h@NF0Oo7}V5J36tE&Ee{@L6gIy(TkxCHrmo4dH_3@iSj<^K8mUzb1hdGOm` ztOnM zfI{`pgX-s9G&FSJ9}6Qr<6p)9^($o_wDA%tuK^B5s%FNkG*n^$H3t<92Nk84stBA@ zx<5wv7qZ|uY3b-07@3$^4g=IwG&Iz-G<0;dp!P;}Dn#)pCeSZS!h1*72rpa7XHU6c&`K7Y@_zoG%khu`$^ zujb`<*&q9bGWwf7e$&GLns-vh{-6)J6nw@i1z;gc;Uj9{me>vo;64ghFL&~3`R1zQ zRQ}~HjXX|LowIFD8OJANzvkW?ca0qn@)c3hTk0GH`n|v8-B~n-&(dPG9{7Y<=@TY2|O2gMA&&CDCCeND|voZBVVean!gKTmGcVTIdc z0tb+{$=vvL%%;st3PejzCjfzFL|r2wWFbA~V9xh>AnfhUAA{%81MIS?-kAr?b%Nfe;o zb!CU7etBwM9n(VOVJBM@5YAD68D(q)Ik(1(W?2b;l$}`Evjicf3{!woRa>-rj5Zhu z01#`3AOHnGI|q_va*;02fTCpS-{=3^b3n7ZNM`W`NidUC2t^xDP=GzB=qVz{I@v;x zpaGJV8fJi;w^U09Mzv62R7(QsWk%o(gOd21zk^#Q3M6N9@-Px-s;x``%J9-Od)Jet zpHhP-J7wZG@&22Lt2Ubg_)!4V>~bRd6hSjG2bYC!M28Mj)9qGYg(6?IKfJTldMtBX~Q`Mtt);*&DjF`qGbRUmi0IWrRD_wZy}Xu{wQW=DXYn+>F;^HT-vLB!@KT zSB1DP%6{#R=8NHuv?Q|(65x31fz^d3tffBb@t$b}N!185nUKg^7=e>Ir?ER{9OP{6 z&66bUcIgn)T@@avHR6iz4qo~~%wa4f4zX0ody-~LfVUXu1abaQ>Xf_oWWIlGdy1Ns zlS9{Tdg-$|pi267IX#`|S_(gj5oy8FIJu_{oPE(cp?Ku|q+PYEH~;aHw8@a(U7KP*Ikb9^x>`toYVY0|s!w`lsHG6QoX;rLM8l*WEGcESEleUEIS;n^eh zc5a6aX*tGuh$Vl1Iz`BLc*{bY*^T&GOhPQSYxU_11|7Fs&8t#FE9NPvu_Z^yv$KLv<*=H$0G zf1iD#n(CRn-UtQJ>O~FjBoH;Lr+oy{9`-3I_}92*`-a<~&fTf9XLM;yprO5r%o#zg zE*qxAoJMqYH$!Pcm5yx2Fpp_lV=4sAI82NE({Kc%gk6(Iey#;OvrG&8Jwv>ZD>D1` z@llCHZVG@CAmt;MGIXTzEQH{gZifpC^b?`A!ZY}2tA+&JCQiZNEm^BP^nsJ9_&wKH zJ3A&z8A)jT`KH6Iq^N~Whw!{LgqN8xvOuhd?_rnbo5e7`6GK8l4C-(2w^!18l$atD z#=CW7vAj8DJ}q0FsJEM6r2{0*g+8B{zkR$nAX1p)6&nHy#e_7yuX86Hfh%-iwx_1U z7-QtVr2B@{x$Sy53L6!p1GAc&4~=*AOOtAmOL4F|Zb&_QpHk9-rm1fqjHl>>I`pvF zYPH#=mp2Sntr}V~uZf?jXVGO*K_9daGZ!ACWfRur{!44t1SUaU+Mr;t=(52!6?kXKoq8pYJ zyBL{cG4=gaU)%WSR!RNl7CV?9{k*;F+sE0KH}9M~ADq07Yq^}B0!J)C0Y|;4)!~4i za|z4I=l1uNXXRcT>3(!X_h>i|aT3ub1@*r{vkXOV6KwI8Jx|o`uV~()0G{o$!@k`o z(~7T23`^cRUETZ9rffOE-fE^EsEa+4=Y_K1O9mOVE22xelLygd9K*womx&;9zq|~; zfUWHvN*Sz@m+Z)$H62%*_0C1{uO`eCpK1__w|X;L<#+SwJyovk*?0nEQsT$LUGLJn z!JA+C(aDu52^lsDwRg+I!O+E!TN;Jmc%Aal7;&`uz zRX!YSZnN1Dwuz4W;IuflW|`|Ts|UOK{Vo2CAOW+wgw{P71N6pK)4|pW2RwT+B~FEtYFWA z+{^ay?}6KLc=)5n=d0P1og+NmjxE`nhXlj~`L7sqOH>KWk=xZ3OfHT#ZhXIcHF8@) zLEO76*Mnn#`&w#)st}zujl>h63dRyS+x6r`!^!m6rf7w$^Vf%5wIdj{#H-xWDh!vm zVZ)FlK*ZA`3n3T`L6Jc#5(q~If zDs*xeW?=ikH9)$vCqgm$B$gfo#-ac(9Cr3#S6(?Pl?#4%-y$TnZU6|e55~82-;2zJ z(hWaHN!F=BywhI4i(MFh_R)mZ^xJ&qCKW?i z@$;P@YJ}+HYuFmc99mhAn>Y8o>6iP?fV%`vwk`P`w85-bPziN$->emD*#8Lqi(qA3`h|EbVqpuENHI9eZMg zr`DPaoL)4)iq_)w4S#a%UZ0psTYUh7FTM2@14dbP7e}kF?aTwaV7dzJ3aN=G2wr_K z;jQ3h6QsWfEQ_=hpj*A#C*lbwRte|di}GkGevAHWbhqu@T593E!Q6^fYJAwyA1&y~ znk^{C27;IKOoT2KL0Hw=+aaKsp;L<<<~ld={e%GhRG8zl@?@I?KB%ldE^`*cd|;8# zqcK6w0()n}NAz6}@ghsVXr8EsoO;@pZ4yCKwa^ z5rdE5Alt1|f>D66Bb6H{8Q7UR;xs7VkRFS8&|bl7a`jMv2CIm=jfTi}W9+f~S97BW zbzMQ~?d$O~*R$+7j0cW}uJf!wUbGOoEy+wIf)<=<9O15nmkErm(&2(zPp z#i{vu77j7WmZvRU|B3HNK`d$!odQlF11Xa%{$;VjITOVY*WvB`Jknsx+=-(xyd!yY z#AmLDtAx{pDjBV&*cFngW0^~I0hbPR40K(H5kPb)kvUq3T%Kg+YdkwWN<47&%!9pG z=Y35ow}fG)Z7*}zM2;w|W~CkZWOPTD%OJS|{@NU2*EHfw=ERgIuO=&TZ)E3YzV2G( zGcW1lKm57|6Qpac9YHgevL~FL33W%Hc9ub{KzwOENjn7NF>Y6nSwyXmR)q=Mnq>~F zGY)t71#H`xmM*c&=}+$@)dPJ9ChY^8m#x746mzn^jSE8 zmGhpm33rgp@>-I5*plSqc6EK25ee^o7S|w*ocuoA^lGgmJ0hd%#oJP0A$l7d{-d|M zXs;!|`vBKmUSzRv&R?ghTMXx6d2ApAs8thK(#+ox2pv1;y1owr|r3_jui6#@AZ&;KzY`SG`q z{5J{7-`>A}>HYkl^oULUCf?t~`#Yn$6=j?>|JG3M^4M`SD zCOt%aSsJ*tXynt9=yAhr|I~|qr9-#)qa@(Rob=P1Y z3#!pfr_9Lq-~$~T`Z4dIdw~cMB<6l21z5{Mm^50TwR;|uy^#O8vEqO3keuUHJROy& zMI1%+TEH6ENrH<5wM2<+=z+T-QJ;r!oC1u#B7&A}_r-AvkZny;95sCL?NG=;5e2wY zg!F*?&_t%g@Wv>_?%Q;HvK$W7WQ(KmU{oM3A2fF5D}Oi@rMr9$k(o%k@MV3_Y;jMnO(aDt6(b4RluUx(%@{fogq`)8bxr6_5f zh|BNk|6fw)@9F=ap#C?t(^6!X8w55)*I6A2XE+=FT1a)|sTJV|{>qs0R6m29ia{aY z)_3RYO}OK^KD!y4#2qwm)22_M$PUn@FG{rdkmZ8c1#!rhfm{CfP4I=7ffbQEYSljT z=OPq*wX&X|Yys77XEpzlr7O33UO*>axt`c<=i>_n)pn!qKZAEa19<;iXNMFnlT;Q% zOMU}mXo9698WBkfaG7OO9;>-UCpY3qx1o@ifU#Rhx1m5o-50s}$^*~@j2*;7&yv8s zxby;1$A>n>#&SF-YE674>aX~l7~ao|CcILd7zXlBo>a;-f4)>wFSGo-^uwV-=+ef> z^pFib1$d31?^KU0HV%#%sql8@Ysq}S#(?b|K?w4ku1~)4oC5qSwo~rv>RvjdSGMt@ z^i@-vyoEkzoY5#fkF^2#9x&hpb+rw-=iSI}t_o!T=BmGi+CLPOXJCZpb}7;=Uq?K8 zh5Z&xsIihiVPi*;DCcuemw)rCL6MQQa1a~fg4!>wH8ql`qH*vix(du7A4~R!TcZZ<#du}*BMCh z#Gg?Y#9xoG|DUOw(_p8_EOJ=Tt?7ixS!zj8hvFp%5=%aY`Q|%%w4BaQy`I1Qs$Sab z<0)_Ji2=C)`^yG>0KZ&*&z>*N6RX*WJVJg?X25z542yrnp5BWSqU7`xU0gSfMfIeIk3g9LzwUUUY04qjB(Pb#> z#&{Jf)%JN{6~6pZUHG>JasPnvO3w3!)OW6&QqH;3^aFI_)31WJnu1S@v(E;0JD z5-#|ptgwZ2vA9N+Z|=$-o1ZLQJzp!dOr3DN(YyIft0-pRDCeB~E!7`^MaVm+lxC$s zhcW1SjwHR(5si5dy2Z`<1(jkQ*yD5-)bf=BbcU{l2|rkYAgKkPuic0>4uZe56t#T} z-ID@C2lJQ#BDV=H5^uWd3&YEdq^aFNm|&{jtNGgp?JJee^t)y6e*WS&&jD+bymPUg zzx}Yy{ksW|evIoQxR5O1s7LVfwM0Jgm4eg`e}YKlCNvhw+@TcQ9A4MB<1<@Am92WZ ziqq^`u(kD?V77GDiga$I>C^%pyQuQjSf(S}}+FDTh-(EljMDBED3~OZ7T$5-R z@Lt=AwYRW+Z=3XSg{j2)Ud^(oyv3Pimk`K!ll#{7rq0{NPFp$2!)y;!E-nLeHtO-ElF}u;Sdn12r=bBv$ zzo>Eg&Zpf@|FJdQ+ROOo1Z70m4KgeFHI$nehJ~%3F8y50obP!l{yTfKwzJT9=+MUr z(Ucn*=d!Pkvemt0b$!3KIdV@ceK8#`^|R9@D3N=p>xme4q@3{y2MxYty*-D{HGag%Ex1omb0yhQ`!EB1a05D#h28G!PD&G^SilDkop!^L=- z9v#{iEL)tMUuAGkrpCi^)kF5sG4}(buczr^-q?N%#Atk87$P6KhF^|~66<4yG5I^q zzp=Kq9W@3+Re!?{ceX>AO=YVP1pmIC~G{ z@R@!?B}g_w`TaWS!w1Qj*-=<34BSxL(S$Qw1sIDF6c>(*RMx~hA%!V+j7;nb7Lkj9M@a*%pz&&UH(# zKbh+JSXIHWb2UTkTl3qB*MUke;Dxtj=NUq9lF4!hEzl+;7~CK;-zOO17nc)>Ty^)1#fH8B#WcENHeJPZChS%P6NLq7&i2F+|{ zFs!i1q@=C4Tsv#I5`GJ-cP{M|49#|a2Q3ud5IW;sqONoW@*nFZ`@(~(qsR`tI#^_h*a*jt2y&Zay*PmJxf05D#xzO`ny*;lbT=mprYbP0B(nhYNsvuTpr+xV&e9p|DgTRe|6nF&+J&e!QpJY{IItu?DQohGO;g4`lBns%2{wcLacYR>znwTmRt8A zcO_xEzM^W(pw%!0w~|bbAnIb%$;>j~z4iDO%re3ck}LolP>O?aYMZz(9jJRZJ_}WF zJvY%Lyv^EvR55e*X4-rE`Q{q>kq4T4y2h2%%w%`O(%8;{9_}s}GByB1xW$~vvWkCE z+E|R`I-S0eoGe|_aP)t`V^4GN7}Vlw5o>%XPf|Kr1WYpgPEw!x1|rAc3}!SaHI5J< z#M2a@Y3qk?&G|)+Dh+1oqI&s%*GpsM4st*`ibuTdN4^*!-6DE|S)^OJs57*oVFyM@18pY3+JUM0+-pA9>qu5ou;K{TU_ z<7iTGrY%cmJ=ML6*Cn-2HC^{;$Sm^&IC&I;F+MruV}dmC&vjluqlhG8lodr+sD?GcaiW7Yr6{VgU)P`*|IRme)ib5`GG`j(~?NJu0_`g ze@6R4g8uh7PX9OJpOX#r2J~!Ji;k)Oa4bS5D9u!^dJ_4-jLBAlwpT6!J+LZ?Nw~H2 zkUqZ(pN8A^Qw*Djz0bZh7{=Cq@tn;Lvzm9k^5jq*Kd29|D1l?v|EainB96dMf;8b5 z(|5hcndpF}E6ek&+B&1FfNe$2`uqEHWbiV6Rh&LZSM1%L{7^PLV?Fo*KJeXDAHfC@ zB8Gs33n02g&_q>&1u+tz1nEs@@YQU_Y~_aLAW+%a<604pS{?XHj)BU0RHvVaA1Y+w zSWAKiBA1?0fR@g>RRmtNxs?z_9?abyNK|KoUHlN}r?~B8+7~w^(3D?2-|FYT$a18* zzYtoY}2u`!rsqK3mvnhAnVbEa2X(j{`lw2s-h-y84-S3oW*N_6Htx$|eTEV%jb za*62gt0Y7hqy^GD%(tS{gZIlemHigl;?S^o;dK_5v5<UhEzxWVb zW-?hrM}<5Oiu)bB><%cvR|=*HWTy1h6f*!!->nQ%Y-)PsKRo8pGPBn18<*^unJ#w_ zj&;0S;+KKlzdr_b-I?VhGpmCf^#EM@%t%`@6G#el+*tRP-n)kz!H>mON6uj zRdO^>_NTroCquUWnWVRwso{^|O(cC=Og`9+F$KuViD9Jx(*u}&3Sa`t{T;j_jL1W? z5`orV+F^pzk+RN3K={bV=I(@!%#}`LYqWn9mDLri*KT;%EzPU%CZW}ZqSF+vlGumB zztpZs;gUeTT8Ixss2gwLO9+Eo;Agy7g->?-I{N#b$y~W{2{og1#5MhWf{E95ci;@y z5bK>S@mqq&uTBz533iAsHyt53H8>Y;cuyP|)1P!~e!o}F*AA{;5Fox(_j9?$E(XjVU&H4wTxQk?A1W%l=!n-69 z`Zrl9HO1bWnkQ?YQzO$xX2!c^XC971zbQi!rbQo$k)Q+7%Wu8=8~{QhO;`9Mf`>tOZxc z7nW@xsO7Z{BS@9Y_KNE!mFaRxxbEgaj)gv>;qJX{*zC59+^IuKdY~oBluq!22azfe zUg@aB<;1>6ex>6&TzK4i&ku$f|6Wdo2N9YXvTt7(D96s058n(a=)uV%A42gG(B4IW z$VVyy83mGuEaP*}>xnRCzjyfTzIo@%6KVWnuO^0s0OQkHR=lq9=El^WpTXq|B!4Jq zPTZcV!H?kTvGD`MqoBdpl&&$byq>tw&!qUZFnu3^YT>F+YR*1;36f=3*Cm}i#>S+I zb4dPa7{#6d-})tj^hN;0^LRaivy+1v)_0jbrG|LAhLjU~Z-;lc)K9dURKG>#^7-;idUOUxbtf>E^*A#=J6p)&$3zj?2J*R8Uj!?YLN>${8NJV zh31yvKFHUuqlc5J21=dqflJU?+hru$345v=+B|hP6e_ZbyY5`gn^E}%XXbvoKeyS? zEvF*9UybYIRbX0Deq#L(o6D|&!x`wez zy}fkQAZSrp-c@ksaU5x}Wb$Vk)=^M}y!qcfp$Z@oH6wSElu%sc-h49(pg;8r)Ho1p z+|Y(V(tR^l#6~AG28LItB^H_Bz`V3NDN8Vz8ny$&7#1Apv!iE0Khby(tYpCu$i20Rupe~sF)J~1 zNX$j>!LfESv+%MhOIJZ-d^Kq}Ju%?Zs|H>^gVDJ9^6c5<+ZioQK5$#oEu<&nE~KtA zkHCcA9K*qYjgtZQOK0_Y#tlPXcy*dLlQ5rN9Da7yaCq3k=S^Nf3>+lgmV(_R1SlM* z4Z$4Iv4cuX9+?d!$@@uo(2}~kmQH?-C#+Uy>{PG>`eR&?*K1an^zS-cj( zmgqc1J{&o4;Z>@4?saF~GDnzux~N&ivs%#4OXMsjGkqtV zjG3N-96@>prh$3`2l3jnBU90Y8gl!F4&iO`0V^eioM^LAh7<$;_p*%XmL{n^8%JQp#eTVUyZNKl1(^Vy{@ylMf&q}&+ zE>HO~?wV)K(eI7mGGMm~4nvn_K_5CaP3PDg++-}YI=Z`mYkvVug9 zq68wZlU;*`{cslG{{|oml|}wg(+fhM46#CbjZ8-`X>LQY?5~l$WYJkCRAlk;@Q+Oa z$?DP(+^IJY55vo{Y@=Tr9TZ8q48HkXpm}EP7f&vC7D&f|93eI-W*CXNlrOg6wKp{} z(o=B6jN8Mjzo0VFQAR{U_O-r5L-sw-49?{{R#D(GWC!&T>_OwDF`dXAVFzpW#`+p# zR=etl#NPVv>5-3UJ;Y+6{0!Zj>7L?K9heM8OmcRf)63D-k@2hbIx9Bk8%mKF&gu zC?*NJlH()X&13R)pwsA^!RF9J*d?nqyW3E76G53MMM~?W|AK57>5{Wo(AIhinmQi# z)5gl0oE`7of+}CNXdT>n4-CtFIpuSM0XDQw6m);?y+{LxVux@%_`++#<-I!$k+`qB zVpqvO7pOcu!|Q2o-zT_$#=2tu z@?BfY2?aGMRiu*nNsYX6U0=mM!sUBZ3KOqmElN*i8J&7D%ZJm$fjb~JB<9`X3%JAf zdY6gZ?5n`uF^Sx=*6Pc7q)VR+Dk|=t0?u8zO8{uDNsU7UKyOqR5_FCaVBa^bD6y9( z!=cMN98M{(%(_qNw4P486&%hTiDGPiBNjBWEC2SemJ!Y{p1Pg`lS4cV3R}hw&={jr zA=SZ;VMO%vy03?k9|d07h)}>^x$l(3j`EChdw7snk4D?=t4EO8u=u3qg4+;5s5kqx z(u%^kUF5zocAi`vNVj*>>JEFy(J_wvwa;Rbc1`E?;8r>55Y^+qFo5# zk!@2euzm%B{fpSJtQU zg8>RV;!mVF_4e(yU*2a(b-T_yL;^j`R~BdTaAdqEUk^LVQkOj5QlC*iUhDr!WB()9 z$!(=TmEBhUxl{p7RC`8qjX)+(`g@oV_?V~8Vf@;jlhi8TwlEgii~IQFF<)CuK`p+M zWi|rY$R_VEWI1!#?VRlu^FT)aRMx4lq?mI=T9BrtWBMro6_;dahvB*a-yjnZioK%dM9<4!q>8;(U z_@}aJY2vvN*B@7llI5!2{JT7`|FIpiCv<6R@jxH<2y{z>wmlN}3Sk;Xe2eIsLhhz) ztC9yO06Sv)IyrYMv!&OUEjC%YD&@bcNB`4(WOtAo7s7VyYSGHzx*?8$?$@@@$g<>r z{X>OdA?O(ghfuTn??|boIe7lhj$g7DTohCboFntdZ@3XZLYiwguD|resz#!Dh=<9u z&T3{MyhG%Qz!`zm5*HahaE?^cp@o}$C54z)ca&kPDfSVz4=?$m;yM(#`&dnX(%3Rt z49x}^wj~M?2<|a&5n#B+vmg=4q{B?q{Xlf7oyEt8J*wV2)jPo{r&MpsACWuPA#Gxm zXYu%y%p7;E`W|vSi~`7(c|&o>B0)v@;&uNUrOO3)O(`-{#4<8IH>ZaW(^>p%uwqV6 zyKQm}d*4d`W@d(-w7!w}r=8tmgL_xBu2e8Ak~K&-p`NvOpmp*ftJ~qyMJV;ON3%gM z7-fDcoo*SIOpd;`PW8C5`_YL{tO7OeMQ0nJD&U&VC>`jR-3LiZ1PuQa^CAy{Wi>w@ zAnIy~LXf3&)W}M8^&FX_vmG;X2j0z6gtC(lw}pB#Mn%TFci&B83Rcm0DMe5qr6XJy zV=2Iksjf&JY9d|i*TtTX`ymTO&hG=r#3(yEi{BtVpufA8;5l8}$e=#oal%{f0Iabh!mh5es)# z_DNva_($}MvfNQo$LE+S;$c4AS5j&W8z>}|_RMIAj`+gmT2^0Y{`QZG>cUW~375vU zG?{=;nHqL(?ooPjHH$Ki@@LcYl8FnTU3YztohxP(hOuQ&g*GcCD0nnua%w$!!t0bTlGWP;xAlm5$R2ZdaE0MO)Lp$ZN(}+;exy z@Hl(UZxgUM0PPoA4bW2aDTsq&E+M$U-c{lHSbDSMMRv8oZLgyfOxZtJ^h)m54f(32 zIiMI&b!v`JsKkNC%{WpWsT{ck`e97dQF#Nl6o3nExE#I6x`k@$YDczVwwLQ2A30tN zk38ajQ!RBiKU3Y~%AqJfk}H85(WQoP1s@~-TyO(|o?NybZ&I*^w{JV|+ICC?h|Rw= z@VJ+s!K(Cil`-y_2VKHur@JpRs$l&)DCLI`RA0fzJe{>j_n-lYI}Tvfwc$|lGjH6F zCh+|T9LCQbF3!@;)zdH=szv8Y!7?5_km*UK07lRT2$2J{-ZGYUW(8lz$m0D82AevX zSTN<_t3roax!0=0_>Oxnn}*$I!M?VrSYG)On zZ_5cOz4xlFl_5(^a0(`G(@O%6+8A3xq&bVXAtC-zEojhup*1Ak0{N*~(#J)C&cj~0 zcPT(hm3X$Cf~9?D^doJ(?<(Ukev??ZaC^h@(>a|a*Ksdq_WDiIprxH#R?C@@6 ziUGD|o2qAIGn5Dzb%cVfQ#P=Q!9ZaktoB!V`8sI5X&s1mroGm-7Y&YC`>x zneK6%1%9YdDMg#l+r_&)#oxft`;olGB^hn}+w=ER3CGQ?aINL~_% zrMod|cnuu|7seYy&Fst4lI>8u;v)K(FNTH8 zM1w(eBWcuG3b^s7mJJ%=+Xj|GnSyM-!^)T2dwEqM1rpb&Z{L@7O`X~}ln%14K}45= z+6aen^@6vK_=LifrHg8WyI1v_jd}uhb(384hQ!wf9}G4IEq#Nv3}GCXp+uQ!q~{DA z>aG;)GUoarM?YKiSOLz@f{}Nwe}-Q9Fw^a6i;)+>1t|!OF&GzuqP0LNM2o^l`>!JJ zI16C{$}#&r+1Djo%6xKO%v{!!%q{S==au1CLweiAJzryxhXrV^8G>{{_X_Ko5Tk?W z;v`Fg#^ciPo`}R~SPC1vReeX@H3>@x{Yv`_yE1!5qbY|f8)H?JodZ{(cnvU!6j6)+ zxYgH;3cRy7{==2wuOa1Mexkscq2kte@fV8T?# zC*Bko+wE#RmL7KB(T7d^a_^g&x98pOM`{cAXH9-htgeayV~h>RMy1#JP%_iEGPvj@ z+@*LL{~T`le6}V%EQ;q9y54Nfm_PZ*5ohzatKO+9_p{!w9YvUa$9rQu$!uRh3c60D z!hbI$e^_Lh>irQ@w_I(&W@*|&o^thN{rI(}l3Y6+IU6uwn032Y1mYv~lj}(yVm#Er{y+BK zJRZvb-yc?z-DJ-?6{SKXOW7usElE^%O@(YBWXqVVRMsSfBBrcm8~YNO>>|m&#h6h_ znaoh*VwQeypL4$FKKJAMx$pa&`*)x7c-)W2`C}quT-Wu!-q&k+zMik=YZVoENAU`~ zy-wh^9T8O>18_7H-4>3csga>JPyE#pwu+Ax{s+;!VKvWZJjKf)*M>X2pa{xPbltgl zq1l?jkKv@5QevKCXCRL^!!3I}rGvbbL)`cBJ8iWDf^|aB-m4%QJyIwYzb)34pi{F zh-a_e;$*&A!MpHn1EQ#WTwL``{IV&> zZ;qrQtPIAGTHl6_xy22hEGJg#De8x8pZrCthvCJt#})$SISgwK<^iF*2pIDd>~4qgisHuuFW?j?&`%G{r6Grx4Z8qiZ<^Cj!kq z&5?wiNa;>45b4bfI}z{Ie=l>E?DMKT?@^Ipm0QN{_}gv{qA7{!rr+z_3?rx&0c<8} zihI8F4@XVVq|YTj+NC_jRPPK!krAiUE+!7K8s@sW6^Py2dT8Ok3Wq&Zz6SG_*dty7 z49I^Pah#O{xquYucw)$v?w$Uzo7AlMYQQ^-EV7-MjY%lyQtql zbnmD-BYS-!gmRr-gV{`zu%I$kjeeUQL=OO0yAyC4Gjp?6W(8u25 zcS#xBZ75HM73V%%?zU%6fTM|E4v!itL#;B97wRsG&&_@rV@^iz{PtQuPN}zQ)-&Xs zQ}XUBZ&ez6XV>vHgt=*}2PlL-;B#&x4YE1C)^<$6eWVXXB@^&n^GC7Eb14fOK2HkL zIpf5o_6jQ}*EFBK{YA>|cOKo5^$5$&?wXaOq3&U~F0Odp@<`A(jUXp9T}#CT%o0`1 z_q3_&H(b1OJo%v*AKXMw1827n+XmGTyFhW&!2ir>vl4l_QNoF_%#W^1_kX6nvRZt= zW@$og&$i^)ZFMGIGwQWTbs$m!-44Jj@YBwlQ$y;nl(=|G8JvQX`fwr=F!y*_* z&d~IO3ai6bYNj4OA1=u==p0@!y(|~yMmdxH+-*FRp@@A)A;|Ueq%%X038;IQf0Ir_vTe8|m-@-~( z!R}_yLN(nW;C7u)>O|hVbaYtXJ-pm&19i9ABvV%U0}i`#T2222e_Z~`84(fj!vd9a zmv`VsmfKOAI6#}#RA@J@b|8x4N20}agsEO0yJQiaGBY+PEZP@Kjo~?RcirwGdkROj zKuxUDS4OTg?Px`(8meh$v!z>o$Oa;xC%lv2=HvGFJ}*sXzSr(asyRGelZolE)#oQc zOkoBI(e6Lx`W8YmJ!ti0e20}-m@poHQwAvwrwCr+86)qlZ*+^JNSu3;qNd)lxXQB@ zBF@;2&O!pdx=04yij{1*7x{p_-7@GB?bg)Z2fE#{wGOp1FIC%mbd{#J7oVWyFdT?l_<;k$+(U`99uDBun!)ChF`og&Vz&6B_ySD z3Ly$Doe)WZx3k(xte(icv>YMne(;`WeW zVsn8Z%>hHCH9*m2^KW_>&dRL$45p~hoLv+~i;7+F+Vip2Kh92v^W4~;xVms!yH7B8 zh7_m<^y!@KQ>JKrLsLNlJNE;>v4hR=hl5Bt4~kAD^i;ag;4cD#-A>&8%Ti=k&q5E) z31jzPi93BQhFy|i*sMd;oHjgcVV#jd&GlEEs5R)V3^ zH?5bh$}X{eP$U5Lw2q^H?PhNI!$FuT`op1Q{WqscDA3(cKP(giY2p568GtwW=kr7l zWs61ZjL}174*c^qj{MDu;up`x8$39fjqCUL=S%#%6SezQWGvsc`@_IO^*>*u^+*eV zA6my+Br7sHB$0eA+u>+EEseUYA{TUfXkf|M(Myp=d+ZeM9|{35lk{I#hrI$maR{0# zoD~L^`5{J!CgP2iX@()19%=RkD`6hu_CBVg3sQj(k^&d`+jl=aem~h>LpNkfLSe0O+5~i zsKsO-f>2oIpxVv#pQ;5lDQ4)r@bkH5Te^EslQIMvZFo-_*#1)+AAGkHMYSeQbhn~w z-7}Vq$^KH2S>)&!dz-a4)ab#h%MmZ{4I*mxHrC1H?ONT>`J$EFLd<_zWMhszB3oPvmG$=<05YAT@$s3LE4x zF)()tpt6TqTw<6`sI?4e;j`!t`E?mlFs1**vRouDMOyQOY4TH@Eufg`Ri5vJWZ?e{V~Ecl6wi0D5}XE3c^! z+5V-TtKGp~!P1u7K30T%PJDZ?r?1sU-AGn=C}B&;zpW(|7mcb(YC|Uw#PlJmPiyFq zIoT>Yv@S&V_hYKLzoH=ISFgRYKiLkW@xuR*(MqMsbXuz+aBNQZuTEnzJBLUAst>lQ5%&M|+QR&S9U)TflBVX#+>|?HHLwP} z{8YRpoIQ`)axiHpoY4^~=ue`EkX93}O)3hi+>R=Xd%SFW%5>sO7!h_vQG>s-M4{Wz z+8+*J+`c4&6#1@eS=n-nFnc&hquFR*$tz4KP1nUm{e4yGjmg*1esh{t!auvm0DZY% zx#Uw>(n2a*B8e#%{)gk#JkTKi!!h!QW8Kx1sfvVkSJ`phZ&8dt9Kgn{bP)<(A6QwB z|GH`ptc@vK2t+5j0aOIAao-_kC9qll{AbWZGh%`3_Xqw{2igB+l^@ry^fzaa^p7h} z`aiB1vbbs8w;+kiwECMjk<=P!?#IA}(x#B|G8>r6K52IPx4ew^hagSd-@J(;d2Sk4 z+^@5Sz_=1wpg@$a#ZnzKReRzL6k7l0T}`NM5G*<#_3ar3*l)rH4m8vI0D|p9w0a|R z=g;Z2oT)+kX64y|HY|m7sMdD*>7-Bi6iRm6xLCvNu(h?vFvd z?1OOhkkhh&_iL?zDSbUM56PxquBj$<=NG=xi-?lE%g285s=MOIe~MA=O%%@m7e=}AdMP{& zoN^SaVUwB{)P=07X9^dQ4CCejE!DQ()QV=8$aD&%!dU*_dX7^wS2Je{-Q3+(swx#l z6jt7bupj?e_XaCx^E}!L-s?pZh!><+&Rd48(4qN;XHQb!2hG&wKG@h|ZR1@0t?P{` z&x{C{fQKN1C7N^_x5oJE8GuYK*ve0tBF7e>V+87JumRM- z2H1jHrvG*SQ6zxNwvM5}E#3+dDgSbZrL0Zs9!2_jtY(3|UBniu`fG2CZu;-P_O_%I zQx23nz$2o>V*mAopa;{$BD}c&=OF*HT>pRAT}8}I5VRXROQ;H^h|dj7{^4Nr8&H!@ zP;HZFd)74x2za$ML&W06US^E9QAn!gjX^&}LCQgKq)xi4z#>a>d=(E&mgQodB#vsD zwDY^h{j-pb0F}ZTT z@3-E{3X>~607(C5Q|%qYl%DbR4`v*|1k!wad9@7e=FxGPKNq9jpOi|R^7G5z`u_Mg ze$h|kzeXJjf&+6h0r}`Z?wihkSrz{u_xFEtj{o=WFaPhHBe-qDp(RG11GKB}p~kw{NETw#L$)%t^nch$Yl>9COh*(AGP zKv&l8*XXGTPEZIhH8K`Ex8-kM+^gqFh;_%+4kG?2FTZF6QG!%{BI5K0q7lU8H_I6G{+c-!AWQe>UFS@Bl?6|uguy~ySkwD zvsFq@1$qbT-U+(|Hfuy3+uG!~pYtAo!RtvGtpr}9IpjTg*&*hE7mn4;)(gb z$`XMUZ(qhc#3}K=TWET{DHEdnwnY&;jN@t%#kf$_-a<0o{fGptqGhPiox)ast@riF zD+wR6r{9?h8%swnDo*8%Z;9ntn^GpB0MWvNa1|X*tW}L!vYWS(W+jlX6$H3@`Q|~_ zi0@i_jV_jrQ-r6rDrU=@pB{H+PT-sNS8$1wP&F|WHL3;O7`BL{4bY><2qbxG`BTLm zpJrK^5pR-$_H^!CDa%Vg-#8((;|_6a(qt1o1K!JUX6~(kzcTcHpsA0->}*oPeDT4BxTh zz}yaY(-Ls%({x;QWCqKsAEzVtnuZRszRagg!cCuK^XV~mgYAH#)JQ?z2vO}gwK&Is zBBxd_gU>@{73jnC<4LuWw*=}m&Ta>Zh(|Q@WFFkqUMH_YT_So8|F-Zfr?^j@#~j&z z)3=mU;1&0_tp@umWG?(eZ`tl1^DrwPE5dYvUw}C2?j`<~6e|(TkvG+qv%6iPp!B|(L99YN(+!*-S~L3y$ep7UF}s^9?(9xyL&5`rJ?idh!6o1B z>bw%}O89k7@8WRMkxGpaQ-F>BN3jfBc9-J+pe6qv5c*G}CCI-)bxJnQ=O(%C`2P&J zx)e%YhLu5QRXdw^0PmTj1-8N)j5xJ~sW9gMVz|5+O`Yudga$6&Ikd-$>-&-7h8~gQIc~ncO)t9t+GTUo_40X- z@7rU_zOnZWsxZw-Kc3)kvQF^)^ex|gnPwqSEG4(|#>K}`Jah4*_z%mlqRW?2Kw=l^ zqX##>NiKNAJVyPf(yhdQg$MbF!|q#Rko3P9)PGn6@b$k;$>YC&XbQd^f{z_xpUa{R zu-|Cp<^gKk79UZ+qNO)Nng&0B9t-uq_+8{*%W+7d-$~Nnk%Bi9BhJ32$tO!&Mi!h5 z7M-2y-eqf&&gHM6b-DLK)$Nl4!QxYPs~!wBZ^w|tO)M8X`nmAT=EAMDdVwe{pn z|o9CW@8WrEVIhoM*JnfwYyo}ldf3V!<8$4c-*(^WL5K(miO@!Zby|x1+IKC zv;&DB*Uh0^z*6!bm_)Jp_(=_}!!?4+wn&85z%G>oLRlE(J7bso`ZZBbW4tT#SCdmif8lA zE;tG%M{^t|`lN?zl*+$`L^+xeMZ{RuM;WZlR=ZXIZmK|KX9 z8VKxdAyye`2?wM9aG2Lj{nee#N&@hZ2S7Q8+3!Gv1;UUlOZ;MPgGZB&6EcoSb++0G zr@Fwxb7~XqZ(kGW+GUL6@UR8dZJf8FPa;(>`c1M?uiK9w3JiJw%lf#C8^YwgSh7Nm&X zwsAI>mT;gJzq8loT+i@B(lOF=9?`hrTM% z^4LNE-iitaj9^5J{`msKMslK4M@TYJ5Mfz&Z)U=&FFX48tb3G_yUaZcm6Y2Tst#6Y zZ8$M_pw{Ck577I9Z(YY8W)Db)LLQY(K7yuaM_x#*(IvN>yqBrvaY9bYLH>aJk>2P} zoVriQ)NH}asqXMVYU|4xM^jl<4Z@=Y zS>f!;``e9$0^bU+gIsC*L!XVr`Y!(mvl4NYUg5cmIyP$h?|k)po_Waf;_4y~fAH#b zn0}sm3pr@Y6ys*|TQdMs)2hon4+NCr>m8F~F=*9_Mqkl(YIi_@ugrX}j;czfG2)4# zyG@rRPW!$1C+u4S)p1|~=rc%?jX@mPZu5rvHBnwCT@3+8?WdIgv=t-nuJ!z!E=Ave zZGX~2?C7&DadYH*E2c~Vb+r{&2ej5v!_2F0R&}%t4B&{(9ewfer`Cv;Q>SV` zZN@eDqpniV)|jK5{CC+xYE<+c=xP$Y->z*riCFbJvHmX8A|SuJz&-H8$iqcy?UpiZ5tNM6R8Qq6AFO;$ebyN% z?$9*FHFf=Ip)CibP8>72ewhtAnObMiE7peG(1ad?4%J*_h@6-y3zzZEiq?a8L(3x=g4fGR47IJ5kV( zG=!Ca4QNSp6Pi^!QP3yIQhYhqzpQpYWy$uaVzR?jq~ZRz9>!Kr$Cm>>9CSW?h6%1Y zDhdRY!5}jdavd@}f^izjFr|)1FMPz;%5)`u+0iPW{#0*|U2@Xpmj!WiTYczGq_0d_ zfc0*hq>Xo)VdcEl;iC3i!5wY0s?f!XJDQZ~_!wy4g2Q}Vi%t394c^%us#s01w8vXS z!no%F4Ro5ce2*wrDtUoQuZBns%cY+lWeD%XiRC!W!!n)%dgaEE0Ab)ZV)qiN34jB7 z)qgw2nW`482?_jotrqWFli;N}ef0YKvCxou0s7}tuNn^AQ8c<+p?sO6V=UcqpULZq z4F+4taioUg!R9rM{8ywO@0n_(sUm+&A@pkLgJyAsZNjZ`$}@zyS@ilzVjGeHjNIZ+ zHbES^?!WVUjxGpb9k60QZnYojXb|Hhh&HrS^w4dxvWPui8Yb$Et(RUn%~ULx4KzVsNcfl! zn|GdJh_QMDJLR>Lk@5otE?G_h5*3=BHoshGsJK#?Lq;BbH76$TbeTkpg3c1IBH{g} zOHIJ>@KObdNvLc60r-cH2Mdb|XH4l4EQ3O|FTCV;9A*kvAC&e?pB1`r2Ugl-dTmnF ztD!i;Rk?_HT#~+0E}YeYRUF6Pd{?feY^IjO&u>rSzn*J9@VNZF+f5`!Om*-$a~*7p zaDLRJ1zX65rnB^gy5pVKT(*ZJO+MA|>9&sJ=^rPZFLm#{c=JSEs);yf%>MY}+s}Fh zmf3vceS`Q;q%*+h^8o9EdRCDCYzymouwFuw}kn~L2(NhLdZfId6 zd{`b?qKWPvV@CZFKWibIk+0euD48yfI23v?%FOwQFIN!rI+jSN4UL1qy;TE^Qn;zm z){t}F(%sPu4Hi6pM(V7>KOF7k4@${KHt%FU$UPQrIHYmpFA<3(oa>FhvsD zHJHj{pu$}+%Qs$I@&nWwMPIyoaim-S#*|jx%(Qcju9` zj59Q$R&CTZG%sKsrxkwJ&Jk*Ete~>fJn|g7Rnpn+`PIjBoXn+5y#$-o1a8~_Qzby3 zT<8FEdVtlBW5nSBCH5<{ZfpfcfXHcDv5tJdkTNL5z+J?Cgs6f3K*nj#c;SBG;*lSh z46+=D%Yu)gNIT<)zAJBQVAu#qVe6FQSM)n1M6g@$t55U}vFpR|AdBW0< zSMOeaE4xA05&m@Qa$|LKob~Li=fF&`gK&#Y@5MVdM%xM732w*-zLR@>Bnsao6AD&H=qgK`NJWL z(Q-XFRpVHgrXqT8cCatN|3+4PYHifI_wjkc+|d$G-s)dX^kVRlI0EQeu3GN2Al_96 z8Scq>N|0%$PvnKLtGx$&m)vWwTgmhVX;t;mUS7rE4sRbS{chB4Kn0H-h(V(;%v*2? z33>MtVz~K=Zy7vbTQPV&ebutwZ2tAgH8TZ~DwhU6xh`^3h3+*14hJhh6bq^oo973^ z7844Ck^>iiyf1%eE`J11El=6P$llfKIbPSIQ2TWDsx|lQHMcL8i?zQUXxIl%G~5tD z1X2mFv3l4~x74EM2U~=HWYn*O`OShJg8LH6ePvdZkc4ZkdhjF)R*@R= zd8>b~(TiLBCm}XJH&t?;ovvu9G;fmnlBrD-xUn<$!Hko5)6hb){eqo_o^b=mqO`Ao zk+2Ts_hxT-@K4`gWis^3G_L#v`4V^9e4CZ`q)gQP#%sF~+qT=UJ_>ld45)8elVbmy zO*6`!Eu7dv^fnZT3Ba9fd(Xl#E#1j)wmF+$^S_S7Tn&rLc<_1a&h6(P+}!h~knjuv zYgMzY3xH^aYayr=<!26M<)wNw-%w4C{|h|u*4=t zrpHlv-<4PE=^J1hN5iIcg=#AP{!KK(8FKOFr24)Hl@zw3JLU?Dc4^ENX9v^!8^=HBdo|23eAsG*; z#WRTJm-w&#BvYoO9T@`RGwkv6th*==gfP>T##Q6Nz#jXp9I0kLw9hSV{vP@aZc&4h z6!5@!NBhB&^j&tJaMZK@@PYbh$loGqC)0q2ysL$ZMvEBA_X`;AqSjTA7H(TLD|CF> z9lk?s_bQ**-az%9)11Gz#_uoWUOfhUc`ING5wL?q`X$UouqUS?p)! z31wyA#GyrP9C}#pl%be*cv37@8zV)1Sg8}!Y)7U@S`YvoV%mk+y6s)#$wQw*1vK)7 zlRw5!QE?qfF<5;f;7(}2De^JA6v~SFuAJD=74)UDO9OQn{q9ZCQWis>Jwc=f5&+Yq zo1RX6Z+P%yDMqyL`y)pym5;?PogU9mRdMonP4X(2A4$q8I{ghbR{=)!pe9|}yo(GS z#Pl>>hT-346?C=Sl=PeA&A(O%8^H1wFN+cByK8)@CKEJh?j4JC@UQiRq zjzWAbLfF1^RIMpR>Xqpn5djkOiSN4iGOR7LJetjVVo&V&Fz}|0r{mfgTD{i#QxYg& ze_bkwh{*fVqE7{CP}^Y6jw7lqY4i9UVPVr@A&XC+UL5K=HA6GS!>mVF0b}u=g?)8Ky{VL^_6#`bx~uDm$J|wXVF-{u;jPkoi`Ebh9`J zP`6`yW;GN#an!pQv*8wbsg?mC*grE6V-JVR$(rs?33E>#R5+BCl#%>Q9C>ICF*Bw7 zd3oY1@K{PfBuJeAw^D_CnzH%Y?VASAjNgboQ|+*%Lc87nQb*I&qwOcl81;6RrM|L{5=nJg!gv%?8&dpu^y4tz`YJD z&v}CEM>1vk0jExwu^pPkP?y<{r^kOag7K1n_v3T*uHT>|IBM`RGna7&(1bv+M`T6W8dNiFjqgK%Aw~5@F>}v@1g;YQFZ803zx}a;EqwMm`Ym)C;BrEFe&N@8)mz}HzcJc}4Dv(beCMU6!D)5UBbCG-p zopM~s$VA+Bj6$g;X;~a@G9Fr{(AVq6L#aj@e&_lc<=O>aZF9T1=QViX?E7-IxZRwC}AYo|M)KXh zF*X}sc;B^wu{~Ly_SsxY$yNAWw^jxS$N+d26^eb216qQ2UMo>RUjpw*rM3YijJz}~ zGODV!5aDZlE#pZJe@tq>pN_^>VUnGl?WM=&teb=A!A8yavt4h$bcF5Nw1&<~9mF^=_KhFrz5o}{&<-`=QHi-} zX4y%95xFa#$8{&CN<_+u;)f2g7q>l9rlr1xs5g*H+4JULzR5^47frbxbPhjA6oJ2< zqt4?HQ4hqb2mBR3BtN38Kwi1~HmrD*FXA3lkADjjJP}dq-s+x5WCWqV-Yoxh>joM3JL+W|qa)vQ(pRx#m}2 zPRmxSJ3U$9J0uh^)IgX9Fzzu13frNZ6oZkO&+_%@%+aJ>&(cKmz-N2BZ&AK;-c<~l zp5LlF+ovUAtfQVwpSnB4+y2}FrpXJB4XakO{?3$pH$p%|p_O@>;19|@9Pj5$4` zSZ_PA%$FXVZ2kH{Ot;<*885~q@EF(_@&IWgv6|({jlT9v$}o|42CT<`9JUb}1I^_VYj7O|@vxKR=%yE8 zPcgcgm+rVe9|#IBNRT-%8d=z5CNWY^&B2e4-A3Vim7h?p;!ByqbNvck8^Lcn3tFn( z{UgOBlXv>RwCgt z;oj?{Zd5FG=ZaOV;}l(0&vdAnT&$2nX)ifl+UkD(OTOu`z`>rlkL)fS)iwquGK~8e zV(d{IP)3fz?c{p#rgHE%ZzrYjmA|a7!kdC#)16mEbRwHh`q~e#?W3Z&6-vWf2Hp42YF$th==Y zmS_Mv4_OO3t(;6?NydpV1^NkChvd+*k7-SN*7|wydimIxs=^u0g%$#?FY8^b+YbGz z)7$-e%C~G-8e>DSV<7sM)1zCmUSK)d1GhlIA%FBITj%?@?y1m_wW~*eO{!)N@{4;` zYoy*hoTNfBpcG_}&m-eS&qcmg6## zYAU4NGB0uBYhd}YzmV$37+tDWC(vN;3p4z{?!CSq)VjB}!gJb- z+ee7!=TV@10v{WvUuEU!t1>uYA2Rx0vr@q~>E#%Vg=^vBxUS0s?GA@eoq=w({wn)+ z^@wnBkVYx=Gd-W%X(y}5Jm)|n9}X!0V$?lNX*3~D;ihs={xnE;`Bm#wb^|h z_nXmn?l-U|TbeFR&(uFz#`iF)>A4{8NJ9Y=S)J5Tc9L}I&h|#y_O;VlJ&kXuze0w* zUZfdak?W`zUN118e)6%sj!teXoc0D~mm+?xC-Z z{91VIzHQL`ami&$ic-oRqjQY3+nXYmcQ2#dh?B$)LM1xh@F?8Z=6LB1*(In$a!O_G zR(8J5^~{^vR4A`|VfUn1j*rxZ0413!@#D-TR{Tu^Wx5LZ15azAJ7_NLsW7_>V_Ml# zOJ$!~Bex=dr=klg-XizbEK2OSo3#IcV&cdqge-XrIc7%ztaL6@RtjP6nY2W9B;)w^ zP)X@Je0tT#nwt;5UdR+~TAQ_aCz7xC_<8coh5Ftjcau^Hm#tv6q$LnzadE&i{tUe^ zrBgJu=;re;jRFkr-73qs@(rx3eClScRZ|`Ar(owlTWoTIBIJ^WSk0iCzQu~Mnt<2& zLXZuMpg)d62!ESWipQAm91d1Znf}yH8ql8lFq7*xsd##KV&)$8-lCA@tk5-c68a8m zn?6680H0+F&&?oA#s%{xAkT@#G5Q+XEqTZ!raw`fSr|4L@8RWs{=psd{6_#nwt+r? zBFEZc=;u~KJ~WL0Q!^N<*luKvZ5%`Chof3N5YXXtw&(L?3s*$b!!CWh<1dyI*;%o= zk2_E?qQ_Z+Pf7=p0jdP3Dc4S7CaOps!)qc>4I+40^APmG<5af2EnL4`T$N<0x-n{F0VNLG9=Z>k*S5IK{3@@?%-8 zjw6}Z67m*bi`Q0vaxCb7Trcr`us>j%kL-=<$9itp<(_XnhTKdsKv19!UO^sgvd|`DRQsM?DQXF>^gO<=BHKhgu)s^A7T6{EtBw^{(9eQc^kgrzw)sI7? zpeaTT%l%C~EQXAV`_))j(-PAz;kEZo z*=V(Iiex6)p7ZtXjwcdsGG;3Ivf)VJX^ySYx5M=`opu}_m~xCnw!VaM=c|W&-5M)~ z)5S9B7w-$)58ikA&J(|b`FB3br@^4=x$QQq*brc|Qjm|;Lx>OXYHjVLtr8rkhCUda z9ojdN?OvPwC4J_M(UY?8{Q`Zv4>sKMZ{5@K1?;L@SlO_cc^rj7TctCpU++z~6Zx7g zwDU7%B}1Y#b?i1WXWCQ;`sEpBSKXhSm{thCaA-S+-+`^ySyx&3-=VBD!dw|Z??5R+ zFt=IaHM+~ff*6h6n-q3;;HkqQ%|@qyNyR9S!zEk}TiHuTxOq?kK2)H5+r&-)oa7?A zI@ZigeK(#PuvSD&pDZJK+mm}=%{RzL5ki!8st$mGKxS9|9qp3)9(x2@xlRyeqoZCodM~rKI5O($!fv@PH+hTo`13v#y=dJBc3;$(;y(0MxSIreSJ7fPO*OZlP$;H8lts?3xT}aZIs?^l(*&#@RT=yM50Y|^hzvV z4tegof45uFZ;P$TCB9_9y?U6|11FG^+JVw=#%U@#0b^XSc&n>nT63Deso{BJ^X20m z!g}0~KHs^wop1y-7Y!nmw67F^BX9cv2a&TwzFYKrw2uvi`Pig*h?uz#ftazHqvb z(%Vh#HlGmCT73~{`OwccgNQ!=Fc5nDi=wg%^+g=2E@GwPg`h=&iyK?agRs(c3>95AFD0V$_?O(u&Nq z3e>v~<>;n)94X!VEM?ZE@`tiC%M=9HUD)>oDzGE~=8uB}OZTh>`v@NW@%r3qbJDMV zW!7H104`Km?BJCyBChzx4{oM-J)P?f_*@jb_dG_Ax_3m^G^zO*x0TPWE1i)M3>9xwjbQ;*v^|Mow@7yk?bu(|ob9DKZdaWW_Rd;m zuR}%Ep)uvIF+~mzvG6U-_v|TTomDK#56GIUEfJ|6t!wiB4+x%=2oq|~a~lULzTj-d zcM;7}ezgtf8|fl(+-VBhZhW$TIP9<-RMVIisW8MT*q^k2N=~_bOk&FSv0vw;Dn-FQ zk1I*>-Z_n1-SX*=VQ zCyp~!k4+9v9zOV$Q$@GJ*su|7rsu4jmP6Pnkd+}sFbb?EH+z_Nc%OP{gIU}*osf^F z0o&}X&wNN7P&r3g>6!~=wikxshY*QqZYD3lkoVBkS`l$|1YZD+#sB(|cxYqy?ODrS zvAse<9~eAhwNZvRCf>9J|=|8`FX@r5wJ9$V9+EvzmUYbOP=9opebWW&!`{04BJcoA1 zojGrrIb* zWv=X)XrR$vo8<)GT=&)bB%$ISHKgZx>0*8Ds>7_mp_NvGI?u^v^iobdX36;_+nz8t zkARgBlx#*QJV3H7V;l(6VT+VNf|K4KzpJwHTwOoE>6?BMt#)XaYJ&;!#*V`uK$Zsv zv@9O@ zoNl3830Gg{Mz<;>d;BS1!7T!tDJla43L8v{8Em(GKY2!e^IMyAO}tO$G3Q9c!@(Xyo)8BU-4 z>MXS-GuA8>y?hPcj|VN8u8c$Jjjdtn$GaJExh2n>ivX8O47m43RM!B!_dL6eSpB4v zc&}z=4^qf9?3;ARAf`5PZ>oBiyyxg$Andd-Q$PF}2`k{2!WkDqRln7M=?~6-9UMO? zOvqmd-m}+7P|)`KjNc(utz&+N%DWgKRngZGDlaEph~d-W0;<6A2ZFq=Ot6uL0_+|% z^#(Ung#Q9fr*(OPVLZfE2$SrPy!Mm-rSY52`YE?bzE26^3sL0R`Cj|6i@wy$k&9dX zk@+a9@c^vIP2p$gU;=156e1%6?dT1OGOf1}+-jZfLevx!yF|iu09AX+bHgqLrV;+dUe_y} z`Y}VMRoZp-IX|yb-^Y^jJ##&$j+ail5C#pz8fz-ntv)Y-wNu_?5=cO;Z9~{eoN$Yr zlQ*>eZ1G0y$WOfHdhV*~FIVwWoa>RS>_K9_6Dlap9ntn++!EYw8L-?Q*y$D!K}MyA zIkEZB;OzSb6~=0n8&%zDCNc5_{spYu2xrB%lL3mtTD8Bc6we<_As-)dRZU|Tz07;! zT(klJcJ8g%R&+grDQ4G-+X8Ivlfn!&g#IH;l{=JE%@MfM%C# zw$T^VICb5RJQE)nCQ@B+6B+buRuy3mH@g*9U}(c0fFLRQtwm|*w@jZ}-IT++?bwtr zr8#NA#Zths&fL{mStRI@Ixq4gmsrqs!}BYlbRSlxp*GCP=9zY99DwUjF_e;W;mB9t zd<$5GGc+i`U)NOfS&F7?^~;%s>r$WZptnPRq3;TP~ds zStojFZt+8Uca$Y+a@>z+jW4IszxixjEH~|D^9^DZVT2PLOjhqcJHB-2=JfNd&~P2= z8mq+KzU-xhX#qPxMG#fC^gehCoZlCznkw$cfEN$)q1q{wTKG6?YIJK*t9}{GTZlMlz4|oJ&l7^xq-&W`EfQ9 zYBgXV2t(P!!DZg6ul5$j=6Ze=4Zo_oa>}>hLXdP<>Bs(?0TDU@S5x1K zo7OwYYDUc!BSn4@Yy+p6>^bh*oM_rc0DpdG& z22k?QTsf|qxYP*nVJEs6D_64&-NxB1eceu%`#~_z5DGCn+w@rbYUHHd`AQ4*EiR)@ zo^Brf=}DtN*7bjP2paw?CtDCau8+OI6Em?`3@sWIK&HQ$RqQ@A)yyAOQCo7L4zdxO zd2-EojJ_7>otpcI<1kxzI^zAO_bjtD#dZccfHBmZ@A*UpjapdRB7V4x?|z*c)KuN{8N7A*c8`+9OVB7)o$cG3fo=69@ z)C~X3K5aI3Cca}|fBY*(-L$lFPb(KkxLwEEu;wzX%Sn0*V=MatAnF(ib4JRgxvY}- zn6-Vb$9S5+N1456JRd zeQ-le{Gs|Q>Eh?B&@_P5A({*sK~eH&++?>wI|w8j%oT0)F+K7eCF5I27m#d>>fw**Ptg$$cYO%<127zH}!jx>-BZ7%>Jv7J7vVbPlvpkghTLn ztTgz`Y6LIT140e%KFSLSff74q)MAI3`j1>-r?x4HoOE5ef*-pO`mMw7v(t^u6+Vp3 z##@BD_TrxdKrkPw(0KrqyNSXWsd46Ryn`o!S^@}pWS`h&g;0ib2t_8`?EKHl$p^`I z7R?Zy{u9JboGj!4x}TyGX(fPl@WRmxtH#RJH~0*T{O2QD_MWCnL zs3%;Ucb}JFB=E=F$m4WC#KCTX;ft~{ep%gFxP2}X%NhgwYu zSJ3=`K3aY01vUQrf`MqKgpKh_*rQ{s!MOC^6T z^V>gH-w21HIUI4N+`S$Y^qhCr6&1QvC=n|2JsZE3^;(2OCX`$=0Q_h>L$NAJ8e&(;%+xv z`Kx}(tvacbCH}ehB6cOfMW z55TWMwkN^{P?Qo#P(vWe``yKN_TKl5-{&4qy|*=M5I?x+?~Fhj{XVQ+Xp*rc-L!9ByD*A4A$DffLw=h zvR7#eKwIK_7joUIc=T6~wOE7*zWaN$kr1GbR3J`&MUjNt;nj$)m=@86x-TNb+G-SuXzb488Iv@CU zr*Z9$?p=#`blhVUr|Rzbw-^L5sYzCq96D0WrlWKje>CZ#hoLJt~dE_Th3LtDaY*N>Df#3octL6lmH*8 z$et!_om1Ra&k8E`sT=&EUuW^Wt9Z@VKfPaRho+H(9HvrsqF#2Q3nK>JM1mXk7Y}6y zS$UgS-P7rRcEW^lh?{Y_=9AXmpZ;3Ev%SY}mhR6qjMP^4@^LV%TlfTvFxusfr{93b zwj)n+&aQC~l^P;~z5DQGpwbh17GQG*)=bkt?mjWA7)mAC$*>Kvpd7#s zY*5LauoxUa(OpagE!T|{#<);5$P(U-Nio`mk$!?G8#nv!_E|@B3d|`#Pv_a~NQp0F zT@W3fR^yZ{GwPvUlVT_;fAlR!CQ7I-Vnea~ix& zl%4V!oXg!$&#hAlKg&=K3=q_;I*l7Gk+hkV6x%0zWG(!e5rQ>N!LjzM(Qx1z18^CP z4ST;M=5P`PA74fYa=F*+lD_~pHdp)is!WEuuc(eC%RJhTQfZkF=n5s!g4i+;Xk~uf z^s@{kc<3=F#79oZOJ5_!xKj*)@SqU=3Z%OGXQem3co=+8CJ^9UV0XGit z0(4tKG+9zNdzlp43T*~Bj8K)=_s)~vLHlhjrpy0kklt6TjKH%N!KK`uUYL{zUj7aMV{TkWxhOp zd^OxiI1otM0{4f>`H=;R=>SB$OeIH@i=(!q%{pzL3$wj@qA4q38RuK& zy?zced4X@YiXDRkhCTQ%t2g(w*wRoJyw+uv)<-3<_8HJxF(>j_1Uvc)+R2nY5b3g{ zwRrvF8PD`t%4f`Y_Pezk&ew0HyuOT(W_7Xm;Mz{XJ;Abth{pvajU7KpUrqdlireK< zfhYX93oQSbPaZ#mEL z)P`S8NA*gb1P(c#dZBwZz&+wg%Aj&nnxJyprl!U#I9-5%(&juhTigi@_|FdF`Xv-{ z=-ZzU7k&O*9#NJtI;4E?bi~)>w>Bo$DHeu20+&d60X-i9Pt^1uR2lXItUOpzOOd=l zubNnykpxL&`?EY>mZJSAQJn}O$NKnAA8t&E3xb)aY?~4=3AqlW)zOl%fe&>up!tQ| z(oth`Er28j5)P0PxY8%w+L`A$0Nw7|0BHKfi7)?nxT>7~bc}c(dJJN8UkzLTAJA3u z5WZ(XqQH1Q)2AUgUt%tjb<7O9Fg~-)76N6)H^M4l;<`nQPAJ|Gg!uNF zCj7spZVa5#U zA%y10JFaieg)z!W-7Sg@pEx54HQ%#?uV$W%|JI89^5Xa@TljN5Qs#q=6}$`(nt8%x ziHkL?0#GEosIl%&n7svwcmamUgDeoK_9*Uxts~q34*Q&uoGc*qYSC?e3nGdmtDgE<%C|6su1bQX~~v|*~5No;A9a#%yau~362i3 zA7jEZ0yKTBJQfh8G@FI?cZ_7-Q1$*{H_&CUUEXqd6jpV&X_^65JNi@I2c?`97SAFW(_ zeKgSvk%9Fa0vP0IGTmkXNW!}^h>LyHfwAQ4G>ltO@3jnnhKzB8bcT|+lk*)@-7i;i zlpY`W)q8G>Yrzsg)0i#Fl;GMa8&I3a(!kB?YEd00FHc$#uPh{Pq4er!4Hr+NvW3?miBfd_f$u~RR!pIog> z*uBG26MOB}_9{U|gw7)?@fLYX#(FUW^^kLzLJZr5e^8?#t5xGCBI+j`s(k8ekH1ty z!d}7QtIvc4&H)M+;EOUcu+%DwY=v=JrQ2}d&UQHx>M!gGuT*6 zik{Yuk#~P;DtfRr3KB^NlhJVw&eZXPt?PJ$2Js+%lWs(C;~jgTct`m3IBLzL0)*P# zr9ni#K)P9&Y0piBPgfG@=Kw;U)yG6}6LoPsXd)%}O@&4IkH-3#fC@Jk-KovdJ<2*( z%(A^^JA;tA3K7Xbfu45`R^`Ph!?Q1f@s$B~Rddn_NEz$^QW1Au4s6~HXh;TPdbt(B za#2(r)RRMso?_x9#?e?S3G)ZDiQ2mXt&;h&^wzIN7B;6FJQT0K;2z^f;-+(P0F|}4 z3>HRpRs`Dt&@f4256AUDVs-VOf}Ra_5FN!QGIxi4KIvV)Cw7Na3$^E(yV?;+>@ctb zhyfCDdYKkphHEncCh^Jo^VpUH4676eG_kEkX-YA(+)U{C_--^ay28Fgb}imLFyyD{ zcMBe)U{=9~aH9;(gY6H{xQfkIY;8h&Fbhd_S;t4aGwRYx&XT0_sx-!S9Wi{l)3E%j zx`W{4yS$J}BQo0#fIRAYT2T`}!Bh*&!?vY6YA2$aPd#hPV`aBxbv`G}FOeqC9Cp9` zy)PzqqCMDC?@Ok}zUy|w#ABYNKJtF#joZ9a?vK($V|h#<~YzDc$DTr2p^ zR2sS*eePh{fs+38Z?Y9b=iR@z90M>7j}|e`tx-cH8}7hTn-S>nzv}(G>Z6|8G12Nm z{tHQ)?ReJ2I|FBTdLA-&h~64ovQ?k}Y6p71c&p4is_vVceHFjI1DQjnTO!xah0uU+?WO=SHh!a@aIZ2JQEYoB?0Cxw z@Wye_B%Ge)V|19|8YLD{gBN3YI(XH+8p_L5b~mg_%Jn*U=G19Bxum}2U6|n+0??Kg zI`KEHjs;c;8biJ+j^Z4h`HM#g<5pL_t1h{GUI3BazO+5BaME7DCdkXyrRRE=A@Q*e z4;1fRp-p+Snpg{uy1S)_ak_iLlA+g~0w3M_~r{<*FAR(cWdzVBPd$OVima z8#^&q!kQtdgHl`Orsu%_;yJ5X$^`Qf3=s7t)+(q!0mWbI*IyI5L8-~VqFmV%K!#lT z6BZ5C?5dt6Bs}~A-DC+G%L3!xK)0M!nQ`!W_16ECHuGq$hWXNi2|yFsUu~|u zs5F(UK|wr#(a(XBz>rItw zPk;BE6XIHNpWr-^q>`EG(fF1fAopWH#Gi6lv-q|^xFPDfj zhfn8{0m5<<)RrsF;9po+Z82np&x8)YU=??*mqfhzRB?1S|K#kE%hiYSl0VE|wP)NH z>q}zk#~!JGwsC?OVsVc8jQ;PPv@?EBG?bs%@A3Y*+vY$^=p>Ey^igT*6ZrJ&LoG4v zyHF=qg@uz>>&DS@vb~w^H1V+)*$C ziHyznv%APuc#fn7m9RL<#kJj6>nd+nN+)Cz8~iw&?wZBAsSA`9WeR)q%BrCr4s$OpPk1%A|f zTB@IJ>)_l2W;KLg=Le(nw)F*6T|ZbMDRKW@5FZ!~VJ;@_&v9*_SN&ZMw~A|Xl&U~q zd?`FGefVXy`H*;t%z^3?AJr5r1C*cQ=gty=v73I1qa(z?tF##!iTk>W4AzzsWmu(M zGRvjt?9TNtTH|LF5`KK&9ihEnX_u68-oLl6mO$n`-VM*fZ^gYE%~b_2%MXK|(n{A| z3A0h{u$9{HF~r3Oz|AT??#o@^77~55!|+wwZC0Vohul291)L}v(UCqI+SfZUo!lKh zo8llRDDN$Ne0#}Z=S!^2=HL?_Vp&leBniG?63{Q;#&4N&OKQXf<{{xBL*o-4hp+#B z7B(AduB=n^K}qGbWzFL@CB86ILH6Ck{~6@w`8t1YKkwQL2n0X2gL+*@W{lr3@4 z5JCTjD!)7qU;3?J1gR#_)Xb(}X>T58?pS`8P*%9l_Tk2+?;_B61gY>iW+Ld078mFf zzFH+-QEssO$}p-1C*zD9FS8{jCKs2U*)`E0T6W@-B|!Jh8w7ahgu7}nR+&P;83U2G z64#^j(Kg(~#Q`%q>Y?GjgPFe(`fu$scS;*HY#+DMtv3*nb>HQ-*Ys*(tL*5ifQj9a z2E<$VL?|Q(th4YXAlwxK#^WkCXGy85;p4BI17u^rkqL!P(wR!DI?uKa=R4PhZ%CVM z!(K4KfZXI};NO82`WZ$+&&bnt`%$I8_-y1p`X0!%=qyt7Dz?^s^$46(Di0pe<@~*;TNuA{Fr*2$d z+}kLVLPET#GzgJN-Mx}UiCP+TOEFJVSW|w1m}~hT_yMp2ynxY7mnSq%a_dGRp$)V4 zp(Sy(OFLAw^iIB|;f<-b7p8F%eZB#Kn@4``R`pg%wwLxe`Gcag-oq3GQ19u17;+83 zhX=BKd=wSwUL7v`hvr2P2+ul)ApRUFo4nW|rwo}LmO{sBx@Fr9AKaAH!kK)s?k|dZ zhf{J@2QP6EJCX}_tKkoDlqy3P`jUv{^eRVLdcBxg)CJ`-lKr0N8>@|r^pI|51U?`9 z`c+CKp?MTMDE~3=V<%`F0-@Hhcp!=+1or5gyFXs}+-yEp8 z?$-A*>BNSoN*S6il|Q8k^WCvWBi`XO;q)77i&AV5+io)fb-xlwAcZY&5me%B>)78D zeY>I7MSrJmX~OvvdMQ&yS+hr;dw6@CrXPNO>1dlX8$iw6wQDDgFEg?9 zdM2Q$=NOcGhulqwnkb`To?QP`+UJwq54M|5*Q@d=@~KbnpH#-3`DNV?#Gjtvft(|Q z5e4Av;^7_mYD7{E?iHHU(H4?*ZXHG1o#FAd@?9w`M?OKY*JCZ5rdYey2Y{4|*fxAE zYEfVs9w$w#?THv|7a>f|g-9R&Ev(U%0{orLWsXC4(l()xEp`)mggM&=7>uQ1!b+V! zfuf8?cPJc3zG@f_2=kx%I%PU&WT0^%xU&0l(gicgTelUGevWga4K3L?&Oz222Duyd zO1mB8{IP?s;v<+#S_w;UDt!D)%x-+_*QNJLWYSbsFF&$S4N(m?*!&VT0`lr0u_`>l zcIWKo7B`Eq0a4;fET27k7fNbsF3TfksNnUGRYL8zK8LOw>8Z6Jv!)z^c_pKDy}cI0^3$WZ$_Zyf5MJyx>wdEBA_udK-~GbN`F z+wSwd+TMh% z-|sWU*afK%*XBWZc+PrH6d5{Xop9Dds;4R-zOBBUVN*&jQ96<8dy72`xr7% zR{|0QS(FJqz$w>PgD&9ccHCug+#K7QFeXMyf~+#A+T6HZL@34LZ{+uN1P5#{rwQyY z%AmHCS>SYGGG-CGYHcF*>Ds@;%%nKOxVB-~1dQGe6T?vy|KdpkS9c&G##@%xmltUW zuPfZF{ zPpcnnRoc;*&*{^Qe`23!16AX^a3nqoiiV)ub%39=xfrt%9Y(5k2GY0E)X7S^H%;g7 z*u|dlo(esb?Aq|sr!ly?7*R0KWK21_n11Zv{1%WL1=c1%j+asj+u9~?M+N@p08^D) z4ED<~aYzLk2pJmp1xy%vCmd&-=q{ZhTb#H@CJ$~QO`E@KJ9GB_qM7aEorjvntY5)2 zu&s)^AOH5QFThCyOgN*4=uW?Drrp&qA<=ttuDEyC8~T}^_iqW}{>ME#Pk1^XV;6Je zmZu4l^TaBA9Q?Xi^eZ!vaXQZGxXid*mU8F47G`*xb%xS=Qi|rpjwxm-Uz@VsQ(PNx zl&3R+vus9lXK1wJ72sEUHI*29=p&WW1o561lgc|Lz3b;OH7;-OT+-{l^Hwt=u;%-! z6VL|I-o^pM6%_3UxgwLd2?9Tuj(P=0c)w_<+e&%$$c-JCJ<+SnJ74oZk=+S#fDs7$ zgnOro^G33F)S|fJe!rN+gT(1t*h&nZBm;H&0~35EEAzT;5K!0_TQfl}0!QT9LI-}` zSPGk9_g$MK&IeiBv5(=#j{zc(M_tGQoA!WzA7TIVHx^u}%^&wo99g5rjX8#g;s_gX zJK%V|QwP+yY}H&~_J=W2(?q%xa_vk8O%P-O&KCm~`GqgU=_L3{ENmPkK8hpZzsUSN z*rb0e;pY8oh-us(EVKe>7}&S1xjO(zW=Bt71KC@_mcbO$UY4j^p>LPk|M3XZEf?6>Y+v2&?^ zr01srSQL(fUReX^?=alA4Z6e5!9N@u{ncw?J}`@d6oGHW{}V*UfT!QL0Zff4_`@;6 zTuLktt_A;X$Qgpk+pB22f$5=X_@TZ z=wb?)R?g;goXg!ltmW|;PT8H^O}bapxUKGOC(`)%^RXDcb}xSXR4$lnA_u@G7Vb@E>iXf=bu`wsSWbW_oZO}zb=ROB`LI!s5Y?0c*DT8TY;(o~1gv*meu> zljo{Nb*1!IxXe?T!nQRR>UG+)4eRb4MQT(m{a*L_rxFk^2NeSCG6IR8ED ze5KWnwSu6pu6b7K2}6&I zV!w1N?q+A5`P&4q;OO?+>t<@PaC$`?2;5x8b$kU*$1CKoM1&-c;vxft(@WQ{Q0kBTg;%$F)&R|w{+~{OQ|Lsx9ctYS6$2tG8dgG$}|LLj!Z}k*1nMij=vebzj$-~SjFu{V*r4%n3KU-k| zztvDX^j?ugAT^Y{T{Tanx+6rcJ5uV^eZ1cNc-TSrqu+~gXg$sfn5skYmB;}YZp`i^ z@Y^3sUGQBvJ!f%?aC;V`1BTBA;_l5j+7D3${7i6zT&lW%-kCIv%XZO{`&;c8bj zfrUhYQ>DNO4_OnCups*&<~E~vF|-2h)eVUabIhc}r!z4pQ=Nf|VW~xmSL|2&XAhkU zWOjF{PQRuSTeLU*#q&0~05_UIgyI~b8vqu9TmjJ+@BmkEz&_12&NeAEo-nIiL4kUp zKr9GK*JUhDP~i{YwSZ>xQl~+#SaZE!Ooay{e!Io<-c6NGV`~0t+bwze1dWy^OCu99 z5&7U%ONjMRw3i@hInc73<-+*cvARxtV95Jz9F6KR7>Z1Hz9MqK_R8~lOLVl`G3~cc zEDkyvLcnshfq0@j0)!%kv={{)$SRNLO{i(uIM!0VR$Jb;(SyuE)UKu5!Nzy=)UWFJ zpYaNQf6eIpO0ESrn573Q$54yE>Gj4eH9Eq$mQIeq82F~&4aoM)-V-Lt={qd$FclMW z(;?=nRnUv3$#?6p0f0dRF1V^2BM$0A#KM2&wiBNp`!h$>a8!Gk6X5x`HQ!j1ZM9=` zaV&Gg+C;8UHLY2(!ze@dcT8~f#Viyz*chI+5-SY z{+w|HeuGn0?puJmsXF~15tX7BzK+4kp6ik2hQW1=ncFF^&Nx5amU{na>&x;A|7ES2 z=IUgRNE^|_hli?Oy#?eHO3&3d-PO>x82ROYck+vO$c-=U<*%B>HDhIY0@IYeHU~OB z=KYDFMX+_ymzaIrY|JJ0Vf4&}rlA^wRL3?M8QbGVgS$F(Z;9kyt-AK4^&3L!2YeG& z!kfqgZABYQ+eI#UHV-#T<;{^q%DX<0kJ;azxLt3S>TUC-4}ctabMFiNc2wV}vi$>z zNiK}F-;Acl4uF8qLU#go{u(@U(j(h=0PR z(~u&MDU-`PcQqLIBlKUg4D1O=r~~*`IV{jq{01y*FX+h^QC8{nEE^SW=;hw1(SK+~ zRebN^o;uTdA1MnVW7Ad(McG>&@R;l#_n*UOK@X+k+CqM59hflw2;2gEpu@OB=Od?y zHY!_|8mr!V`HacQl9FdUJUqM)?Ebl<<6^-TZk=KW&XKFqBPh!(^_01rmH|VA9Fm^#q<$3Af=^e$ z>Gqwq3ECh+38|)BxoT1qI=Uzem6toTVwlRZ8uGvFuEM(^dkf{HuFw!#(uh?ZJt9@aa-=-2m+Y z+Zo%Ft2797?SbwVJ{qI*41M*B)z#sy@)_AH4RqRj;gzYjeM41SkBGo5%zEQ#J*zO} zhaQfk3?xWkgMdsMKqC219j1iPWGYB3ccu~DYg}(nq4#KfBO&&;RHDJPCL!KP-L%4Z z1+u13<3Z&|h9?z)=epS(lxX?kC#s>XG=_F3tTsJ0C@_$60%kKEO!q*U0os@v}CM@h^c%8N;ug0(VBPHd}zqD9x03x#bW`pbahr0)7CyhS6vF z7difLg?((6ducznelOlqq_(z3|K@(PUFt$5B|JProN|y}`WK1{Qsh^J`)XsXdPTu* zU~M-n?tl9E@56O4`oDYdlcNueQ{OzUMg-)F!{foV1Hf?FT>7P?1MKjhLbYQ&`oVLH2S7Vt$h z)NNp7`QyRcM{(>~N>pumzuc`XVY(=!u**feLng*Ov}~cWIY!CZdG{T|^1*XOH#hAy z-Er}f(no<`<0d!_P}Hk&&?-#<7J_b_^m*n1y+TN_N3cIx9HS%jl7{A zQPK)@%WH+bXB&;!ez~X-19R^ji`j0TZ@Qm%HG?Bn(Ea5wD-ak0GK)BakIH(p6ka}; zwK>&ce%I|~zaZ4^+r0Vddn*zW=TBACLxP!9vJ`q15?rSHb>|-awDh~&{W3i<x1-2Tl%?MKFTwSYt{m;4>9rN*RIz9N4+$#}o*zXzk9KsnIK9J%A}73kn4Kj8^fgri?apudud zrvJB&cU6xrbNBIbr3R5(z@Rx$`^Kvi52z}_9KT0b%o5);&;uR;Z6x!dKBap(M?J4v zz#TcCz|)AL>$|Y33^L8ei)wX#`9jn=RvymZGvlyfDN3mKp#97?^D{g=Qa85jd%zVP zM7~Q}oS06E#nX|9=$0eg0_;&V=p0ej4vN)Hq$sGiQ{4G!*IQpJl;T_fs_ET5P{9LG~aI^Zmgfd_i1iQ`O&HTv>Lu#qP5lRszQw7*7YrH}qG z#Wa6-vRp6kQNQQtLoWC2cwNFh^zRWsUp9cg6pO&uEpKQ*ZYgE#Lc|)0(LdyPQHh%u zWL96|Ui1`yIl^|SSrvRe@O9uu^v+4CiAQdt2npgl~NYfG@Q9yUBl0J~&orySUO_8nySJPJGh59Mg-NixCDg zq9PyRUdxfGu9M!m@`?7eqEDVOGKlYIe;&8kZL_WGYd~=9c0g`X`M*Sw5r_a386dOr zPhiX+$Z7taq~<@qk?#qdT>$l`jvXW}dXK_?T?I;(|2$}0gWaoE$<+aQ){+0nvux+UTq3FmoVzK{+PeKD#cX6 z-ov#47yH{TB*26$T7vZVubh{>Yf|iOfY3~x*nHT zIdaryc*$1NeDEMier>Ssf+-Y}M(^RR+swHO+UY?r*dbADEy6C&e4zY<2Dy&0$V!-P2?G2gs z>t5E)O`mM1YWk-3595;k3N8GV;{XUepd!TmMw~a0VK*0>tNPEQ+>%JYyZRY{D6f}O z9sXJq*B_`PTNegw*)DFNc(V1k6lWd_RNSh72n28^T*HB%>8J+-;f^&!^4KNl-5WfV z;q$n43B1hAUpxt44zc{$Tjt23MhXl51xSuDBOsMBF{f3uvXpv0=fDB^7Z+jfZ%RyA z`*#ZOIijpMY7h9it|3^;^@~Rt1v3MSmOi3!)JxOj)A!8S7el*Vf2v{oWPLeVFL5qT zEmQ4@dy@AN9v+QpP^r7jv}cI~(K{%sG!vtUrXEK^f-X|f#TV!MTJBeH-6u7D2$B6& z?29@|$6Y)yVT{@Wau8t~Q3NIMu({;T;NMJ`!&=q3qVc#JuksOIbfHlN@8YWN%-wt+#4)a-wwJ%O^7s?lbC5VM-&NJt8ImF5ptBTrUb9&2L8AZfPAVC2~QRa^QG zeVuoMr@Z`768m=x$|hgqyBS4_lFMv`*G~9lhxVbDHNQHzr5P9GfkAv|D@gLs#eV;d zkV79nJ-Ea%E}v)G-o@h8QiE<8GJ3X&UpvsN5PjMI{T4w1Wg+MmZaNw#it&IaKG@_676c^7N1v_A?Uc9ud}p;CAAuBW5#Ssh zgv2rXhLr}Jk3MtKdXD!H^fcddK2|v0{kL+eYxucMAT}blQF#N#dMcJ%KwNBQm2Sw^ ztwT>1{+6b{0;t=_MDJy`wh>rahy0B|R45vweqDpE1c|h;dKjSqsVnhiwHv>SV^fCi zut&)F+Vg$TXAkE1uO~wDuJ2C(56O3 zb5!Vk=NTvHH6Jl@K!hO(_d>Qj%b`{-CGlG1!M!WHPx9PwNxZTDAx@ZHKCTfcR`zmFeluoWRaHoo1=ESZeAU{_Q0b3KH1-9kDbqU4L6G` z*e_gQ`KR$WppQuD-`WYd8X`@Qy1b2}K3uNV(oZ>|b|J2#(dFqN%|WHH*;(#kGUV~| zbgx{c@PYmpJa89v!q-yfE>%{&cm+k`e_!njG;!~Kj~)5V=A5hQ}SFX%1U4YvEsqL8=W@e$lYTk zQCE9F?arI!1S36GsXZg|RqlaLj}u@kq0{z2tPE1a%=vcufsqk<1hed5)Xmp7|x zn#*T#R|&JVFpkt^@I#rf7h`SrBLyOBjc(4$v<&$FX*I$sp@2hMIC zKXb=Mr~|$SG))i9=Wn1x?7L8g#(#2rzx3k04I3f) z24u96K6oaa6}nND@@o|NzL>L7aiQKa3}3|2SD{hDTt&kI#`MvKwD#Lq8*+RiX5d$6 zeaK9`g}?LQkN$9QKUuIpjwLpC8f?Ig-~S20BBcHTa4Id*zAVv3)blScEuAhYiBGYZ z=273|yI;;P4p{Uhnu15{M(kF~Luq%|k===DAVo>%W4_qPA%ywCYR;E3j#*$OoLRvq zj`g(pmRBABi)U&n^Kp{q&3LMfkq^20p{(bl!PN*!8+APequHf-1yo@v{LV(oK9(~vTv@aXbS6O#Y6gqkFUQfJz)JIjNJm-% z&MgU0GJ|-%)uEOnFKr}9O%{lBe;FXf+5vXCWQ;q&fC2-w96EzY!0sP0bSqPftJUxzjx0a_bl{9mo3E{o=F)Pd zrMS$Ioks}dyO0KP8u%fV@J`f3DpTy91Qp)}-@&q^1cVcij&qO#-D8yL()D|7<$KnQ zb%V~^q3))c?I)j@>}%#sd8e|i^NCqxmWtj$r}j7u5(;8WK!T_s>ODj!wE3fMsJ0Xf z$@cyF#l@k0!JDwlqW0uz?HxO}^4;E(Z2Db`xk^g{s|7oX8OExAiSgqW0emz<2JJ;L z&GGSPq6FuTJ#D-7FkW>u~$ysYAS@7 zl0ESEP8|k_!HEF7)d?7S@LO4cxl0Npx~y5@1Xj2?!zGOn{fwbsM+);DvD45@RC5zg z$~94E4?bS&y1z{ZUI7G3mnXz#WEQL0dftGLDcj)+D}mwODV@9hW59Sawnt?)$@aHI7^l!;ebn zCbCqSnKStK7S+!fr}AEc@2k4JvM>F(+wKXU_U`oZx>sWK390PfFbXg zT^RO5mKO*sPgjlPDsjt*0p!?KNRqCY_i}0sE!QgTx+mN`cKBRv@?{s7GLv0Sk;yJ0 z7O8D*ShSgd9n2W)U~Nfc3H*ZAKA@Y`!ttvOD`+ zbDCb}-V~?T{=Z81#p8c4Q%lYb^Pev0vzdt6ox#WE%WuJO)4u01U*1dXE#2?0`2Qlf-Q1mf<{*LKqnM(KVtBA-onT zk1qUCZ-;R6!IsNM=N+oQagN9KI_JLPXZA9(FM}gLYSiRMA&-xIZ~tf(^s;B>v)>U< z|K#2y_MBQzNJ{HI!tF9sdEKcCUybC*5GfgvsOF&Q0g_(_d3&wG0hxxlj`gJQ#oH{x zdnH_oYF)fJ;Q(3dj`h!b-zZTNfIN&mQ85Dvsj%gL5Nm9!3)}J*5FsJYed}xqvrauh z-mLc~F*lOrOex4&mkI0@zWP}&njAv_5cGMi;ecQCpECA{mu3m5L{zX?F;9)FE;vmJ94ls4JMbL3sh;jkn5 z|M`wpiX{Q;E-2m7x{YVc)+TXfYrForN##MWg19`W?-#+LkSwj9j-;_BX5^==?-WFM zvs@rmxPQO!jK4_t^Of`Qswt|b@f5f7vd1F!oL!522`XEgchXaVFPexI!nGNI>U$R# zjNmjm+VyztQ0>;m1I=qP=FP$B)*8M0Qq1EsblzgRkAIdtOk7{bz1p0)^<83e8?X?WM8`GltuLkdn|F3lr1nYTy$>_3MiRJj3x|p z97pa1x)zTP?->FLl?Hjqbq81Y)-rAR%_rJq39U)2{a~5xK(dsIITHTZt=RTD1DI%3 zXOcuuk?{-Cyv)VkqBm4jQ^?mDanW5G(gRrq69WO&e>(A(*aiul4pvvale4^+%2u6Cv%W;HZZ zYQp&)>hI$$9s?ja{J&JW24$2+;v@_b+e{=c#{;L3=l_il9sggbT0p*n)dET=?M@_H zS`WunP9cKIs(<8<>hc;S#<)xQv5+m?6Uoal1r4)`{@FvWN$SBXqVer`KF(o+6@@H_ zHs}EHv@5an3DZc+(sB1~fH}4xIknO3OtQ5;(IT09X%P98Ts68FLqPzFo(Gs)KfKa6zF6x!~- ztumhoH-ZKmQGh(40}`&D5gP~Qy(__tlAb`-j?%w4zgp)zJZbDD+&RG9d>{@LqO@bz z;TEuHfbOq`2bln3VbBF?p+lLR6w{t2Z=MVF;oTf zdEt_YvrXrUzr`4=opuhmSozMnV5{Uywgx1+nbpAXYIWVo(xI;pcijaM!iSI^!_tSm z-OwptH+gbwjJii&j>6xLkejUJHAg9L;lEktEcWAWEXR(U2>8U1RjVC9jjlzOTZ+57 z9Pg_2$xu%Tp$F7{qKUP?Ty9(L~oO@KLVXZoibkcA%0-e;=q7e>>TGK&~Pk&k( zHqEBon(Vz&eWcDO{DWbKbg)eOgQlIWijR3Wg7O1!j(qYAY9~vWY0PpjL(AE{84UQ! zD#~yFX@8zMr0dytK}Fc~GVNV>VkX~E<1*_=Qje=43&jQz>@K1RDyzf3jun+B7b0#U ze)xLB9%UV=TN2y(UKp11V1HX9-vg2>4M=bwwRB)ysRvgl*0~(ta%i+gAAQ*P4dbG( z&U}(rKFOkZcXRqEqAJmF!YSWmcfAmW)BT0U9VdbvJ&@~!aikA3Rp|Jh<%u5Z_vjfb zl!bJoYQi)dzgTW0ag~PR7TnumCgT`CbY`jN+XJb0-LYFqyxgOMNKpQMaycF^A4H5K zO5;3WNhCHJZ_kpM(I5zQymUUhGG*x>;HqNxWhq#xm3kns`rG#azZvBCgcCi9BYmmI z^(eOx7%0HxoPxHa1SglfqC?w^HV+52tu&%)&;XH&KsZW#bs`6kKfc-dqj3KU9R(b1GM)j)cU1NBBLKvp=-- zIG(b@tvzfJ(SsAjh_EkW$AGXsid4@u8#3DCrzpt+;UU2-nJ-u=%jbz%TIY}Z-Of}R zu9+cvG$MD3k)H0^a%7XD=#aD6LdzeDRo3g7*4!tC!fY64XEh5(U$FjM>vHHFb^eEo zU&!8--(Q=Oi*Bwwv?3ptzjjSj-!3>CX&E zkQaEgrQ96&Vh;KifI(a(43_%#!;#R*_hhC5*EsIdERvlstnc>ntN zzf;N=D*#N&b?ia37!wC3X%5sw+zF{q_IgkTM4kN%veMpPu( zzA=kMCK+@d6_w1#xnI4qxbIj#?48)R#4U!QfeZvirbA2^?cN!3_U(FlMj?shObd(> zVwe{30ce9W3ep^{K`*x|^0A628Us8Wt3gdevpnDZFGHTsrw>k%&X=^^PuXdc-0HD; zQ}Jd#QXKEfcaB5|WIaWffO-cah`+{B$cM;kfgZ6)BHaQosS#*mfa;2p^lq!X?Ry$^Hk~dpRIyl_QCojZCr5IQPm@K*(8lf3(j?o|A}13s z4-YT#Hh+KrO%D^*3r_A(I>T!%$!`N7UKJA5o9edd1lBi!@liD&#ncAh1=tV3B*10oLRN8NHKD@G z0<7HiR>QFNJPjv0BFxg0&~bRh=&^wza|#pXxos-In1)7C~O+&97&m5Z$5^N^9smd-dg&pwY2HX z=2r)=8Wz9vKOUHH3*I`9Da`g8m+EZbD8@PR6qBW|GzsXH4Yz zFYqX+zuG{CiZ4S#WVSF%4kY906K1TKnLZJCmy!73%*OzXE>XW)?{oC+7_G%mHL2MT z6V6#FT4#P1G&?Q;2aYMTqO#17SnHf?@UcSoc)u~#X0krZG(YY;skSGpfAhOTMH#^( zZTmy7*)2&wVETCAXANVk*fpq0LgpT%|0Lq>__j z%ARB@J7uzmBunMJG6Zr5}+jAKVoU72|IAAD3cd?9UzR#GT z&|6`PQ=y_)vOMA7>@WI(o&6i%Ci!I_Vy9R{dHSiwuerP3NC{m=UcED_>J$2%>ezA$ z23B?ozk;P$ElRomvO?{`feaMxg>7jyv8Mgu-jwYa>9h7a!|A* z=b^_@a_aQ&uM`yMx180ilsj}kYpI0o=;JU51mP zy}d=%5HVD6Z<}^jJE`HY%Vdi8f!X$^zQJ|TOJJbkxVHkG-NY^hw>{O_$h0~fs9Lb({?TIJL%+?KX@JFfVaDd5w4u#aGMiD*|a1C1FVUEEf z0)sV^xLRV zI2bNcL>*PP6~_h)+eROJ+5UC>1izclnkiRaYEgYsVQ5pn){=Y7W$n5jj2!bqj2w!t zf9c=j8)YvT!K1XnV#}HXJwUYrd(HzONcDGdpq~1im;#LmreiWOv;*J^w@9c%Gey2Z zY*lae@)U5c*x(6j0VWU!BrtR%ZX1lwjdw61S6>9i@5 z)`D)fo+*Yac(XSPZQG@V@U>B-aN+~a%bU)WT3*${ZDVJ8<&NlBD?fd6%h~kQ*S)uR z)ufkZKRT-T`Y{{t;Q9m%JNQ=GJ$mOxe`SuGGCe0sO5pekZVw(YC!uB{toj|0_pd=Z z9r`!Y6UHu6`CqDcj#wh{ue8*kFMXXv?uO)e4P9~t9!m5m)syU5#cpF ztlPFdgGfVpD(IM1C4Qd$^#J`k_k{z2Ux5CYlg^jr0hoR^(-P`FPj>{9V7{?8x0_N; zk7C~P%&)Y8F;Rk!KPNnLBHb4)j#i!!!=&98rFmd3Rk zJn4I5U&US|Oy-el$xIaq!mL6xM)w0;$JhtY0P`0N4^f3+b&soKx@;lplou!z3bJ$t z*xN^DmpQ*UbCAN7k4HWH z{7rausr~zl^IiTBZv7zCBl5f*RXU0lUDvRAm@Ex272VE^=#G;(OZo)0F8qai)=1Q| zo)6fqE-%Mgs&kwy6zoq-7HwWCr+RhZSArrr z(Q8+h=b@7UGJ4~0w)#HW7}`WNi}Js+J8SH+ce!Ih$W`V+sG%MlyH57i3lm5jR*zYi ziQ|HW)36G%birGcq!k@N3%B%{J~&*J(VNy|AL#9HF7LDy)<1ri2yavb_vOU)_Z*eh ziKuvCqzlM=pTC!HdiNmM$G|1IR$5Pei@T%ab^bf18?73Z!tIan2FN!_ECZ!AK`&58 zJM@Lj;1CVYZhjmMj_5dRJsgvTug0u|youos+TKOzWd-A?8iNkIkWX!`x+1$xEYYQL z$20@tt)gF(H#kmARFkg|_{#O*gD+Om6RNh_MxWcIyz8EA$H|AJ0&UChQZ-w*`eLYC zUXQzEGJKQRNo)W?K~VS9}Zq-|h zPp|492ysRr6ae&PBy_wv2D79f#z9JN#$@eiYrmIWU(TWX5(fGK`U8*}N?%v=`qg#s z3?6a~S)l;8+ZGcJyVE9-fKRoj<@NVhX5w9-xmKwDOxlR1P2?1*5ZA;-~!!)T0l7D_64OSn=ad zj+12IdjeJ2prSj$l=;@omWpRvEp#>B@5-kd7SHrUb1}!YpTLO2?LYp=`Bh_PRC^}a zmvc$e`yD6Eflb*3#P2jtn_Uc88dssJNregpIdE0QtU}(SgYenP^;xz@)4MD%Z;AsY zh+;R5JB2=l&_s{a3A_Sm2{145BLPcd2m)89#RgMPWUY?iAUja_M(CMJW(B%uze_32 z& z!!H<`H5pci!QGvKh4z=1!`tX*tImK4*)j({wjPGuyd~<5Zz)<)u5HRM>080CUhy(| zopT9|#LB4}Kr>YUmgaN7`+9tRDUuFUxE}aBl_2>5MsAiVYJ^YZN?;!KV_$f6_; zqA6*Bo)LA0OtXeAIRIyd1grWZsT3^lAI?RrAWNZ-kzmE)Dsp%4UOHy1f<9Vx z!Lk-8yq-SnR)d)Rb*JhdyN<4Yc;oz+1Pg7=!zYi5txX897jZ&&plJsQU6!@85o3>O z*0tzmEKQP#CdmWK8i~~+pdQEvIJk@ZcCn1uHJ5Gj;a*3u@c)( z;M-`8Ydl7}P|t*_Vtdr9ugZ9ruWgNB#NH}u#Eu#&bPTm?@e*}4E6^9^Up^kcFbQ4K zC-YHYJ?g#KOS2lJ2%J)I=8lkbua3fHd!qQUHKJL}HCC)}qsf-r`=st)`}FcyNqTv> z)+wM$c>lZq%=x$a%n+#ara?oil1=8gp5_84F_Tw)R%QXx?8gz+G3#-s(Wyd9|855; zDRtuSzp0E`x-IY-dlLZe_8sB)VpufmZbYpXQ^Q1!!{yKSCoJ8;d58-zp#sock?sq@ zdU=RlIs`0V2B^)XG2+>%&U-8k*nk#6@e6mZfVf%esW95YEAeBO=bcR>j9L#a-DG>- zt1rU#I!mt++MQxM&ll4Sa;`x?_XF~#POkd^2rb_aSo7Ya4zaJ9aM9|beQRRFJ8i-Z zhnUu;I!|&YLy}SxL|HjSf>czj`fTD;&f2?SLNK6mq~*r?3~u}7os%n;?o^qWRFg7R zt?j}sYHZpN%lpRhqpOKs>}J3jFOAe=@(r$@WVzFOsl@OXnCvdK>-12OV$GjFrRm?IHbJ|u&v99{u1HAhKU|UN z{oG1}uTJF6)0yn+4P~aM)W=b=5b!xGw3sTA(5ylM=A|`Fl>z`BJLr?JfvL_UN;!#9M3)6-|y46rW;}ia&z%ln_Mu1hT5RPr&yh5exipnG%rUucLmyK5nuC zr^(L6txV6&$FGDN+A!t$!mp2GZC>vp%b*ih-&Cv4>Drqn_mK;_P`-|(3?HBqE1M{O z*UF_^an32b6!-Mt^W0alAKgS3*RF4AI|H)UwU*vrbA6snXnf3j5d->B)jm@|KWYWH z091c?`Q@|P+GifE z544|Q)C%HpOR>XbSl=13!-7nGS4|G|xrW$j8%}tZFcwX7t1Ci@A;P##w4!ih%X2xg zkGw9}JV}4sDRqh{=Din94+Cmvpy>j+ip0sctPTnShQJ^~m&89};iYMFC(c7Wo|Lrt z&CT%r=Wl2uCRx{rNU zJj&?1a;L{hv#Unnu4aYUvjCg0pqc%%6M!&Q7vGHHdEr9@6-_%zh)~rbP?wO!KY=ow zOtISreKq$GWgZ=SYh8$+lCM)uygcYJMq(NutquVl=zN}w(zM!Hiht<8_o#nFYsK@f#U7|Mh zEq!Mt|Kyj6^urMQZa^e-L})wgF*$Fsh(_SJS_Yy|q@> zIcc|NWBko8PI0b*`;~6AN+_Lmd-NxV3vlgUAbp$y%M(b9L!chPR(t*%)yh2XWe^!^ zblV;mXO)1>Z^Ex_VXXyVjn10nwfDwm@d< z3swhmQBjMg*^qA0R|-t={yS=9Op?c24=`-n0~*-H}|6d#X4aum5*ACW2+NZ9I=-FSAx%>f9l7L!i>;cXflU%a^u&GqT~?uf*Kh zX&W= zQDe|?($bAe?{PjfrfWXkA>pVxR}sN&mx%MJYw0Q;mJPMLechJ@8aaQ|3Ihx*6R^_w zL}i&esYkMs0m=b!UVP42ykH6KN^^){cDC7#ok|69(hBB^B9rU+_BRPX>p`b)r~!~^ zzbX=7?7P7Z?H!-vnwlEYO8OqaX8AH-_>a_Z;kEJPXIcUml)MSj-C5WLif|~uMV3z;aIkdkk zc))N;VbP1Cn{Oxe?nv9Gre17lsN!B7wC4I|Onn_oxS8<)21#LVK$*G@$iXJaCn*5E z1Y4?gO-s*K=t~cIcHP}aBME>i;fzY-26LIGMN{#gjti>rZF+4F9ehtk|9pD*++CD< zP%xvK4WfTa>IKyrRJSUHCuZXd49?i_tXuZIAl2X{D;|s8A!}5`9J`A@X>fbIQ+`L? zM-odon(-Ch%#>s;YX_gbk15iBnaY4KV1blr5!&t`IqiPzB2C&u10cQ#_tb%Y2+j(-1+5&rFLe}WI5&h1lSmk|?{ zyQ|?kud;9`Xv0QBT*#Urz}5-?FUw!$v7g|1o%P67sP3t%{hz&ZZAH&-&3G2%n~KRh zv*J~Pz-KFE-l8Mt;|5>QidZ@01GY>RfJy}s3!5L-sxT3NPgAj|WjN#3jrsDTd#;&! zWR7&udwP0nw<_D2Wk;4baOGam0>=LcOi-=uGFDH`y!I)d$(=acRjH?+FU77e?H-mY zx103seKiPTYlqBIyMkOMH3F zpb_gYRoTRQSI$x~bi03*U~MkT9B_p-4)TO!Dj*4#VLF^?5r6q*IMG9VJZ}^C(qnfI zuH=eqd-y+l~#`=0E{WoCN8lK(q;$E3OX{^9I84m z+UI+)*{NM3QnJGC(?ey=Ndk$)%>k=)2Qz%`3=A_M_&l|F#bH;AwI#(Z`HjJb!`JjF zHycINOE13`s1|(RiF1J^X^w+{Mcnc--VnC7q=(;r^3q4|o|@k=%UXk!h8*EnpBqDs zWH$0lCV*2g%PuiliGV}8;srmT&-E(ylk-xLrXvZDu&(P{pXo=qpNjj?Q*~T3;_FcH zy{7XAj&0<&n;3GEVKNax5To6ape3+xyDXdI+FnBJnn45J)&NYz9l(qM`%q8NlHz+X zy`M8J^S7RJw2OJ9?R?&K-|N({Qx7jvWWJUw|LUP7?BwuCNg3;;!Nfg9#n&g*ZH_g_ zeij+nI2P}ASKc(^myTsh#&$9)u{P4!P&@LDZaiU*~WeVGVN zmwzH1Fy2Ka3Brw4MaXJ;NLbYL)0s=7;90KQTosyn(3 z0|1#XrjvFUHwF`<9N)dZtEBe5G$k%BbjvjH4ZzwJRY34M$+7pK-v%`Xgl4%t%_Bk= zBM)1<7ANeW8(gjGIfOm5@r?wJq=>c|De#Sj-$vatTq{0*B#wQ5#Yy(CHg))ug;{#Q z|DcXr))<_a<2qQ{w()5GJ-gBqCv;qlt)A$HCB2>D*LS)tV zec)+>2ZhCj(>21c6Bh(} zut0zsx*ci&T~r>R^yCVvb*)m5r@f19Di)5+A$n{V5SnS1 z;7*qCFk)xQ1dzdQU0NM-kMcmM<;!oBpy;`l|9J?=Es`&>+qw!2TOPX%rZL&lUFx3| z&1&#`dicS{hh zS+XCXX}2&48q-=Jp7L1nyfCLPyV|l8&6Yur15nPDM8FC;iylz=fI?zWjEjIn&IW?n z(C+sAj8;K2NDoaH{tC{RMJ#hWL|sQ>;TQVTV!>WKiU1b*kY)M7lq2OJ_ zIW81&)213^Irt-6IS>P zzIjhDwU-AG0mq?kbk-py)@RK}%c=bfyb9hN8G8f-P4!qlayW;9+SVX0skdG ztL?dvgRsdqbx;ib^)>;bT$serWIuYLL-!j1t=VFYjM-w=4;XAEh3jEckz2C8T!B0B_72Cg>=aGBOG{C} zwnW}7mxNk~IN*I$9zr43#F40|>kfr(C9JpL`0gR|lRkHrZ-b~BLUUXyoO7VVDT$`K z!r#D=&PTHnFm2*u2fwcp`V@#Lo`7Az0e(pz1z))*G@}^{U%f{SWXpW7BeGN;g3MGY z@`tRH=3-AsheQ`OsGOM9n~250)}|nMIS2i?TIY0)xkN~n=}gl6+XN^0$Hhn)8`Sc? z%Anp%u*{lhWk^i$U?(E-4&;elP(Lo_&uM`toO*|NKm+Ui!pPHDvI2>A2ZU2MIVX34 zJ=w?%kC+0b@_Ek>B=3jSpgs0>9(}s#yX1L~kN$DBQE)t?O{fo9$vDGzkPpE65m{oQ zRIPDKdM4H38O@q;9_Gm+2!EDPP`lPUieJ-dpX&VP$dQaK12?zy$w^hJFAwxW2eOa0 znCs^7{6mF|_y3tf1|e1>SIc-+PR}8W)<%u6O2sveI#RE>mb3%1H$gNd6v`W_1z#xl z+g4*9rWq{%px6c4U+iL3ec?nW|FDu$q_5jsgId1AtyxOx_?Z^_bkE6{Z5vN`_GrdX zKWMHJs$$&eI_RwV+Gl*wDJ3RETx|KWF1|P5FltqhX}6(DfLwHE7Yxmy{U$}nT`@N0 zp#O#ORy`vn&5~jeg^03ta)J};2hjoJa+!8t<8>yI^&Tc!CqMcaz41uGWf1)0i@z@O zIyt#QoiSQL;4_h6ZMoxBt`LOX{mymR1nbpts^_l3X1B7218<#fHRG|Ql;+u(5LbjaU* z3(mkYmx0azAkngGLLURG&LUkHn%~6udio*MDjbR~Sxf4^HBcA%6?ZhVFK8>O;7h-3 zJ245NOVOZ5%y$I5p3c3V^emfJil zyDOFX&Bt7<^(i=&_J578Y5~H)^e>1fZtWbGx@*uq>j$p9+*qMC-tP13yQuVxOfrD-=w3lO_S$F@sw+9i(HJ z7z{V-C_Rm^WssEe>vaz=tPm_y8+<(rzW&hDr z1ROFy+Oh>Kr`~T~97w19Xebx7%Gqka`nTFYW%1wt=7c)VS0oEK6N$fhhzop4osEQo z*zq>z888^6T~1g`5UZt|E|}FpB(2=+j}o?@eo>Aw2R|VMvZDOwCn%s zH8dcL-})V~3(0`G0AT$r zMNNRb)x&m((x)XEG{I0}7r#J(fUNR%JI*>dmt_K9N^>e0ztuxN*_8BdpZ+eX0}U~) zEBSF5Iq0+X(PL{smD?Ub%`qhQEQXef2#+sAa6WTf6X4@v3bkv$S)rP=Smpqy!hIfK zFp7ZlU$Ke|=szqMx`=7*WGVIVuwYOeC8q;sO*B>E9eTKyjIgy}yRl=7_w|>XxVT@7 z=j$&d^!F!^7#^gHT*v{V^N1PzIwzX;%fw3T6i%DexzQgLiCXRNEb+W;>>N{?JK0Qj zs`Qw}H;10F&)JXj%Q6|j0r?9}mro6s4Z6dB&Z#AcFO@5VE8~&5xBaGf(WOdxd4+9b z15fj8HKl~z%u$uun|LVdGY>%WZ{Dc-MD>}31+MqVrv4dz^S9D5Kl>V_XR;df2oQ~J zK%s*V%{J_HoPLRE61svO!UB$;?xnGFT)%*tla`|Zvd&LFG)nD})NjK{xc(3PUNYkc zQl-Gu+}?$MM_?#_fo3&djv~AO1)%njeGeK-3~~d+F*L_eU=sfRP%W_~v)g`4r18wP z{r}|DSR$)7-Pr;X5;ye|)o*A{lccbPMIbFFC%WQl;TA-Kui^A(bFAfnVlb*rPt4Bc z#6O*g{2PhNznV8fBZ6hgl0Y21;J}?0E2iw>80si?3lBNjr)bqPp0fqJUX7{Gp32=# z7S7QJ?$+~-KVs(o+5ayrg$R(&o8We?Hwi`qov|9JywH(T+0z<=u9bM!q}mXp)721l zak}r2+1_nV7v7tm16-HdEgZ<)&&uWBdf&n_`-XhcBf=iwy@Z)Y z8O_<4d)cIi8@_CxRwQNYrV^@~2A=}_p3=YSm;O6H|DS3CZvUMMF*E?NMBETQVdh$- z-Nb;MNupeXL|BH@CZz%YDev-iaLAyQijOFB$6MEdrY8fpYhA7yEv>f4mAkDmvmFVI z5-ofM=>m#UntwA36d|}+1-6KRCBu;=e|C-wJ;%j`)&TXCn9k`ub4TN{=y+6qavH%5fF4kHl4g+G)z$0eJ}VLt$PE5aPt zMbsTod#N3rJ(zO+e4wEH&)$jLY#EbNtL>VQ_0F3gI8E*p1H};2>h$APW42Sv* z?ZDtpcc`Kh4a@;;+2ngP`~mgD6N8zZVrLtR&!o;|ARv0boC^&DCCD?B4be-{KP*E^ zPgEODI5lKBWi$W5&;7U(WUXu0tVwN%{d8~MgMXg47IMckHQA+VahqAbi`)E{a`R7} z=3LkiM8o&l<&ZbjbFTs_0HY_c1C9|d?EGl8W8NmBG@;8!;&j#g+5y2!Lh_bG%b0kl z^#@n3S+-`oo8O2<)CS*8|E@d==kE~TfDtYpEKvhc9N7n5p4E&Hiqr&sS^Tdq+#D_E zqq|>RH@K@kwP#0LgQ<^z_s0-Q+(n^19SQUF)S?pmFI~APUH=B@`oC){+9ge-IX<9X zaA0d22IfhZuDhe9WV0(-fc|1dsJTiA-zEkTnEzkRBMfkH+*!bnb#O&05)|~|3HoSl z8<-(5Om`SCB1i<~QjPQ1OY8|Ke3raVtXXx4T)<^!W`^lX{J#|iaw5_HWF+|hcj-j_ zJJtK*-}>uAbbexQmQ9j>2HQql4rH|l^m!_iJZ+gG{%E$O>$g89y>IVDnn(;AGCJ_KbTY-Rt-%beNcEKExWCegj@$?h% zA_*W`I4O|1sRAI#aGbTj!o7f11?}nXk%z@ZG&}jIE7d5W`ePc*I>0U(!33qQmi@E{ zL~x7*kDqTI4*{}v8HnIXd_P{(HWFRnQR$*ojjXY1UH9JZtM2HC+qI(-&e767L-Bi0 zrC1zEy>tm93srC8Gy!SNuH6fYcRO;S$;yjpQ}EBHAma+nZosbwclrV3dgW6QSs{ky z99K#vRfr|u4bgizi08do;WkOJF^HAoLCd z!za;zH9CrL1x*Wd2f#>xkqocmbmDw|1cUW>0x+xqk}-Nf3WC=ruvce6vMf;V6U`EN8X>#ZuF2Mx5*b`%@KoBmvUr zFtBXciV`sKo{x(S7MXWDu|M83cB%?l8v(Xh8fxJKzxh^yY{VV0#XVfy!#@kf;yGND zWDb z`9q6+VN=(Xa;2K_;P(i|3Rr0X>ds`y!P}U|aNom@7s8a6LZgvzh1)%fv}4FV~J2k-fp?e1Uy{+D+y@Qn*R!@lppf&KSb{^gj3l?_t-xAXh=?JH*a#~q7V|8_?X z)&rv(0#z@HNMQv!cV;7~6RytPK zCFVFxRz7(CN-PhrxP+vXw2G>lx`w8nzJcMP!$#)EEiA39ZBCqVa&~cbbNBGQ;CInK zAn?-FYY~xA(bsR>iHlE2OiE74%D$hIoA=;herZ{GMP=3FCr@A0H#9aix3so(_w@Gl z<6ggcJ2E;pJ~25pJwsR^eqQ{twEXoOWqsqvCiN$LG{IIY_fjgTJJBPx4PT`{tT<5|?6c60y7Cm;aq^^rc>7XN7?EIBsUU6kTf(qpq zOMl_$KgLk(e~P2OGxT?Uc4i>_Y%Jh}u?az75auiFeD-S?VFF%n&&a}X?m)hlPIT-* zrnKf^fbtr>bbuPT19{;*k7ww4OyYk)24mA>(H4+{XUoo34Ikm3QlHZJahlD5*MRqy z=>NrspQ^vwrd_Kjjp0@QU$CEa96qvZrsIAAhgk~bVwYu?eQAu;bBE_=mWw%qMT14R z)maKSWd0|A2zw{U6m*i@fy7ouvkPvkc(^hPH&d8LzHc*jAa};Giy9p)zvjIH^%d`i z`OF&w+xfukK+R`k*Hd+;tC(+G@1TAR2fzWweJFrN$9s43At4GT35EuJR=?yKXDt1J4)S&zHmvf3})(wGtqEp7KD z$(!szil3tJIXe(uvti=UHg6SU3r;TWylE;%ga%NS7#wGqbPV7?&6G!daE7hD-GRi5 zvrR8!BxzQeuw_m3&qY*sW<&G_Gxo_21nEjwe=a{{5sjNMxxNEA>a+vFYB5D`mEq7o z#O!J7Sb{|i^@djeABHOXS2XP9svq{*|Hr}pNW@vwq7=%%8te~5q_V91a83KeP*(qn z1~)t4|%R`Aq+TC zHoSSWD_o{?66$*jYFQC|M0eOi zG)BEN1@d3DYoRHI^gCRPYxYat99NmDJCM6)K{Ar*%SWz>I#@vdtM}6_JCMu)6d8tM1o+vno09f(gRy-$zp`7bf< zK=#@HvjSBA+cD_K|0c!I+F+xuFj_kdk%MSirZDn7x_+tWl_73U4X8JGvg^@}qK0d3 zdyKs6+7Pd`Lyx`-0A9<+7*Bp>piI$Z7eV=GEFqCDno2dJEX5#{ZV?B4PragedIm@b zf0+H`^HA?e**V!f{yN>bTc>Zo*DP%mF=gVIysk70gdp&QAjpd9GB@MJ^vLny zZWMpGh878ETZm3P>FsC{89PVxd(~bi)eYU7G!>!7ate0w@zo=4-oI-^YjaV1i6RFJxjAk zumZPf3bs!GR3}=5?in<%D;{kba%|z}$%p#8m71~1q0%lXHv{2=A&Ea@L|M$Gy~1Yq^$^X+AMPzU|FzXm`o#k(f}Ds z4PiV&3Vf@Pt0En)`D&C=rjm}!)ANxWJujVG{xZj0DJFI=;l%qANmgsj-17P7Bf-Ml zv9mKX{bH6#H)7h^D>oq;9z7@NnT$5r{8Jm6403diAK>$y?^y`Zq`n<>qh!?T?!wTO zW?V|19If#97`(5|zGER+Y(r_Px`p1VwJk-nMZojs1y{Zk(HT^6jc0~;e#kH`8Jp{7 z<-yL!SMYp^zg0pdUUtEGNi&bGqQZ0mwPun;8FM-xOH!c-zyyH;BF6yW+jChJT@AMo zxJOv#zY$Cs3JNl@{qnKi=3YZB%WMYrIg^F)M3GSko5!z-ApBcfJk!t8^(Pu?g4kpt z><*0B-YhbxWqWb`@jf#vk7l_BVLx;tjO<_!XiR1>*`ra^5->yc`=sSY_Q96Vet&LN z0u1UjCe{_Y9DVWLlbg+UG3z+;ta5te8dd~5|5Tm@ynOzFG-!g@`HV!hq#pyG0V7tyIMNyM0-oQ@wIZ=ysSfB!SN0v+a;six*(GUMc`t(8^Jv`mrYsVzB}*IkJfooM7#4mL#a=9cg(j)gUO3~ zqS!ZxuCtH$iHF{EjkG-=E!_`ImWufyg)syyb|A{x-DoNY$k&e|BPO`W1luP-Sxi)R zz(ye;odVjb#{7_QbRHowNk^~6Oa|!a_xuUEfkf@|@o~?+bPDyZc)a&FZf>`4Urdx! z(7HeLjUFp7vOguuS}ln)hxg8?k{Ep%)n$u>1;Xp`D>h__d9IaeVy5s%z;z)5TeU0% z^z=+#4Qktg(3PH_cc1BB5I;D0d3iwgqS7m0#}pQbV7A^@aF+1XRYbuacnXw!5}yc{ zG&#JGOEHa4Ox51>7KAh3O*Uu|KhJfPhTZ8~vRJp3t*-ag2_Fq|kl4?-yw}md;E8dI zq$q~J4)x3t3p$TWW?J;!6WAa@Kqs?zZTe+jhw9ifGg&+G!0}fU`J8iCwd=p-)}0d4 zX0Lm`-(2}c!Xs7wB;FSgnbe;_)@wO*{Zv-z!HE{61yeLcv8B99{M%k zasOcJJuSDf>b>q+-?@+QF{z*D+m(!^wbxeaUTh6fBuACT-sFx$Q<%rHu7s$bA*o9{ z5DC=$W4IjbQUN6bc4r6TKg3gYM=wBNv$?>v5Z0itAtu$dbbV)L9Lj$S zdtPu>i?9nzI!Z$Uz6(2$3^N^oh^!V&(lh1qp+OTD?qyCZ4A1om&ki&vS(=@mcVuZsnQ)Z)2IOm*P) zLP0j(Za?!bbhowBFE%P&7cz0FPq2RWz+S6vZ2IX_ z)FKIBtMn5f;g$AM`(;r6CB8y~$dCNCd+(OSoUPuMEXp>h7l)jB7CO)(z=iDY5MacD zM)1m<`hnb>*>3eXIua%T<%lMFk#AVZ4ZozF2pD}}(WR>1B()!{%P&z$gliyFTPI$?w4wloM0z$f15fp6 z57~iK44#_dU7E#`9du_!PBY?B7h6GNDU#3ZUB}fd&sa_QwHXAZyR2)SQG1}6$8gKO z?2R_qe{Z4#-;_7hoPqGYk$|hTKx&}6x}fVBL1>yR9l8Ul3cAF2Jc|7czq|dQ>$zIB72DM*3gM*KU z*Wtu~DB3NA00q;(2B>q96vD~5QWwFP1t|xx;p9cdferAUCa{|~uL}2Gw$5_veO}^N zb-?$sRBaWt2_RLWx~w9qp-J-W$;xE4Zs?snxJYfj22VP|ej)X6@o30{G#>G}Ipc>c zK74+j&`3;9fZfnC8`T(kpA_#+qJ~!D5NPL&VNF%H{3z%WEyW5Gw9@T~85IQn3DL_4bf%0;=8{O&SEfvj+oX20=tq1ZpD4)hAw& znI(^-eI`~t-5Rwjn{s6$asvu4h!5-h3=oaaIw}I)L_7lH7Tdj!2e$&sn~P;f(+)g~ zm2iA^Q)m#i7uwXE$rOynQh`(|3$RoLP`}R`cEdRW4cWSxJayyxf~1gR%kc@Or#$U` z3VWDediogA_etThR||0rdv=!^VG&2NC4!69#=Kx0Qy~CepP5eIWjY$jR2|(+*U{E; z-cWkc8vAzjXK)=~YGBS?D}~GF_dYX7lWpUB@Y8+@OAI58(jt)`V8Tp;WxVhT9ZG&l z$jj6hGuqNEL zZ1m#xb$LT6`{|2Lk~2qiV~%Z|ZHPmbGbeT+GHbAly3>g`Sc667ef@_fa%EnZ>ZaN& zKk3-D4CQ@^yA`t?7ktA7w=zt*f|_BYh62|qtKaRtRpGLLuN8%Fs=VXIOw*G)$gVRB+CLn8 zjC~5Mf4n?gtJIirahAXDVRj{6d{mCDG1`3c6u$?o<-2x~?oEMzMJE*9`SdccXi7K_kn%NfkgDGO{nYE3Lu zYR)n+Jk{5HA~NgBiTT=(BYPPtj5{b$jAE{(m#t5>X-_Z`BF#l=S1EvGp}v86BrQS%Q)zQXE$rlP_IT~B78p~xln znBLMYVUD!k96Yuvr*@*q&qi(LLhiA;IDY3xltLV4sj79XPU`dew9v+1`DsukF{UU?>Kpl2a+@pdZh6!k!d z`!~~EH+U#Y)EA68Gf>&OU^|eDu$Y;Ss;2;S0gl6n;O0@ggCtzS{HlAZW_Yqz7Zo3A zBrGf{Wqt|Xt*mmr3tEM$uc8Z!P!BS)nR`JEl7jiCdZT)7EXG~wL!O(|Rleh~k&|Z> z=lJ^Te#hCdb54i2q{IyBTw(Ua*g))9N$?H+u?u3xZf8@N0kDhk>1Kj1T|9e%s}DN^ zr_LI7Xp!*=?S4$hKI%;+p`9b=TM{}~y<|~iErGy!okAID>dBpJY59OTlZOJ@Pb=U}^FEMFpR&8(QlvT+8O=d1(5kt(Twa4O~g6ID=-U!&e(R_pDmIF&RkvK*(?b^s4}2a zRb>~*9lI}0b}LI~X9TCaNW7BDeose z1oR4nPIk=b1c%OvUH5(Mc>bPq)Bfdytq9*9_zN^?n0A&qf)-$c#pbSK3S{gZ#C~!t zJ;;db4qexzEg3t}ovE|^ZS`1*@TZvl$DU5_xsWD@ZyY=GMJ)48Bja}aQQ8USI9#a1 zh&g9;F&z^f{-f1IWqub2lr;P`-*g0Ay5_8vlf`sHXW29*dy^nx_F&;THjvK2sDfS0 zjEREPTamP`R|455$>~4x!{YR1-18TLFuOG4n-aF<)psBxijZpP1ET{OPi+{xF&vCC zCQBf`p{mCfI9CmzdwoQja=-8;>ze4I7zK!K&QFV5U9w5DWic#DEu{=r+&q*ngr!~{ z4~B(grb)L;w^F!z>|<{9W9RJ?fAXH_31igU(8Z8)ta}>;fur2T~tdu+N`dnHd%owby?!pPc?v*q`s^wRgC1Nb{LNWJt6wlV_B66mjqa zDi|GGwEN}A1u_(8fSqWU%Y0I%ds6%X88xcD@u$Y$57RO&wBj@K zQ0H~;_-Hc zq+9(0fX>0>k?)~A@^4|gfm|)ohLB@tIG!8)ynAZ)lf&yRrQmb+hs1s^ioze0J}jEB zmCi*6b>5Ij*wn@bj5d6?b&r&-1f4D_jH}FimH_MiGny10=Kn z%5R7RSiH6aNyhUuZc@+*?I)ktPtAU>d%wZWe18hrOK5L8WlaM@Pc!Vuw*9|G)fI{! zGXKzF7Oh1Z!fG4AKeh{pL}a!^N6_1IL_j|SOy0&-@&5Kcf+_7 z`ogL&Y!_%F9%g%(vmV(~f}9|v#4IG-RCa%WztuFcAXS*JUEreq_(;gs5Sf?{#@h4c zuoswg)Fo_;fjE>aw0N{b5rFm@*{8NEMP2)%bzxlZ$@sXMA4l8)OK*6X(&C}u5E#nA zq8rzVC);DD)qEHaQ5W42Je0d%x50Ge+>=Wk3Y46=2k@228#N-(w$Yf48Q(Z?)_$Z!)_=->Oio~eelQk604rg|yCciomsVcbK3NY1PwRe|i^H5mD`2TrcjTLY6}uZh}_&n=B3 zKBk(V+Xp$97w?N_1`6fC42~RvOnwjiZXxQm7cA;1#EUu{;rQGB$HLF|;y=J5+yb$` z3+I2G>)-?ZLnd;-M~6|3y+|a4#h>Z&Cl2ukdAKIiwXQ~J9Ssw&F8^3QoLngO;P_i9 zc)*a-SNIKwV;=d#~8HmZXKsgtyEK&4zV%Xo1{PQDK3(KI&w6=dvEU;YeQA z(I-LL2RuVrEst38#l3MzyNEr%3{KF-Y$y5!t`{Zlt6j+r4!>WVCs!sg$%!2Kp>jLpxb9hJP zw|g^}!hxN6>wd->6Kkpi6#j84>t+?#UZ_UgMTKtZmRwIC3iFErMEyT;~5|o_x zwL_|fT)r>)wA-zOFV|Wk_ix>N_mb#k_%P;>q|4un`m>MKRgVacbt z+hK%hr2N+?BY3O{lu-Q!Q__(8v@v|UWryjvn)|GvWgLAMTij1qj61+qJ(N?PNS*eXo0wc!Te&rFpIRgdH0l(p;L@3j(TROV;iF4|AKWgbRSKyte|Q!nSa@Vl zq-wj;yenMfl8JRmNrZZ4N=TKQ`dwh?eqNfBGcf|V)s@OrZD|> z4o+-**rL13Lex?6H()5sAL_T`e3<~fc?WVErHTEATlt%N`Sp%eLUp-=zP1qCU{CU0 zSmUO9GVV6MGVGbtzGFFo5Wx&RIdRUW{c!DZ^ZQ!-&qijg-H9iQ&CYElXi>kU1WwsO zc|nllMLIqSHD73^%t%FY^aKX(EfSEI81^lEXHb_^Vy(#Ex!=jz;e#-jFT4oL+765YGOzbjFp&)iq(@d?4jmKcvRLc4RAgTlWPc5?Q@zNkkoEGG6a&j4RVYl z43Wxcd%;!YI8qTg2CrWt70?cUWpbkX;ru41PdtNMq)D3}jXWqAZq+doXyRJ)EHvFk-3X5*2p2z^EtcD@3vA+4pMgs+87 z-5BaZngPxfC2w>u^#&?sZS~qLTJTcdmV3go;~CQzPmgzS)^|V(%{laa}M&bO7SFKtn+(nbG3YL?r=-)o6=XQug^X3&2aaSG>h_2 z7Ml$cE_>a2P{2%G75iS*yv{fc6n7(@?9ULjxZ8B4j^Z+^@%Ykf+EErhY$v?GXjlTFwsDxi5z}R zSzoQEl2+e~;P=6PpBn2Q?h|`%Jrjc9qzKZ*WqNlYBJKL;XjVWNp(-AsRPhi+P-q{< ztm&>bhJAX`N0rRixAogFc)3+&9L?5jr4?Js?1CCN+Syme-SUTl zn-qJ`?B3=M35wXqZRhvt)gP<2otpBv+57?55ZQx6L#LpktR}Fu5-{Eo{U`W$L zzK4_Dpwm!7Vcv=}x%mum{V^bTbmN)HVIs+{7VkXre-TftZ#`3_i3*e4Yw_+7BdxfZ+LCb-r#?9BR?y}F81i}+>A>k7)Ppb z3U2>EebKV4Z$NFS??X+N?LhY1v8?DhSnPql6JZLv>_F~U0$j{DE;(R1;?{ZWT}>ty zHNP3Qbs{VVwUQGE-;SEl+F~sh+!^~Hm7<;x=n%H~anq(_^1xcc!P{y4kEvB3EG}tX zW$qr^fuP@D?a5{TI4nOXkn$0Xcl2RP>tUmGFz5RFA=|(A$wp>6Ud)UYIq1WOUeG0R z5lMz!9%3z9YZ28`!6id}F86D{9O{0$m^{xvNf=53o%Fo!ZIn>N6S^9x7VCgJMU9&N z>UNWy`Oy))?FZ2xj4vfOrMG-dLiYumKA<>DHkXR6ppG`UZsh2a_JfcC8^Sxef*jVJ znE>amd9WzaJM&|E@Y|D3=^HjIN~2fCs71eezjq+thLa<(8F2oFyd6kUq>pG3!K&%2 z&TeMThv&{k!%8zftqchV`1>_F7<>V`f(%*L7#KMf#k4JBih&WFL?Saos}l-XfZD4v zx*d;N6bunq`e9*#GRBfIARFzlAK^X_5IWc1fo$ny0Nf+TV5OQ`Tc5zbY!v9W!I-ivh0Mhi3fqCaxrXWnZyyWdox^?6APm9{u5zE-%u3)} z=CkIl459xheZ!l)Af_unBR901lEREa5#ZqR5{+73Xa`+(90;DTdqaOm^kY}%Y3hen z)Nv=*jhA3s%MOIe*Run$5}OyKE8;-_2So9HL-nm=*!9Gw#H-tirPxCJZ#e&FnSKLh zdf&9c7(FZ|MqdS`_8Y!oza#rJhF(G{`N&Lqj9H5M=b0J)E!F=`(-k3G1cFwv)EV7E zKiUzxaa;JXD*0Mc6~(hOU_1S<8e(UGI`jc?3hW9@Hj@eKc+~(LrWnn^wxSY3%+1@f zr>~^%hPgwi)%U;f}$lo*57Gy9wvp zlKf+Q_JB3<78qfd{T9982PmuVz6aM_mgovkb|A29B${CLkHaI9|Mn*l|0rPBZv4Y( z)WZAbq&($R{3H%^_@!T-!l`|=<8HMGMRiM~H?|DFGDZJ!lyGj?6HQL2qBHHbQSs2f zA3^?G&Le$6@ty`t;5ApNQUIKwtu{~1EjkF)Q_l3}`UsI>i5MM@SFa{YB6|>%0{h;C zgg^(i(jX^ToeYj>O)+Fkv8)X{kOV0Cat-6@To7EEDe^V%z!hp!heTUjf-cq3T{ZcU zpks=CaW$U&1U5e}xUCE416{yfum|z;)678mCD;ULJ^N`x7D#?ncR8pJF|g%V2whP3 zj}bxBdJLMD4d~AxsNov`Xs>bcIMCBLkd!`}HCt_E&fU-=`!dDU=(3;*TYM*=n8FfJ zdX5!c(;hg)#!Q{;27QdO4s1&k9E-5o278Cx7>3`0{)QMB!4cKf3H{;IA+ZA?D1?KK z$LXf^Pr;gDrti<96gucHe@6+ZE!j)~(8HB(l0j?b;mc&6D4VfrxfPnZ0d4=`I*RdP9x#?lv7Emn<+sA2-dlxwmeFx*reXl-^?yUj zZ)DSud3pO2^b73zGNvo@pDFpBY)s26$hTTu8Yup@u%=s{rS(A-jmm))GBHi>CdJJ&j@MRw!Jsc!^viz2(Wqzc33B%9;ngf!Amvl$byWl8Zg#6 zwcUr>Q_^bqzS8#z36OSTW3G(-qMXT~i&pHAFL@oc!lQK5^ zdYIMNV7U=u^)0q*90*BQ^*kZW zK0FXtYx4EMx!l>+Vj}O$IBbiDd}1l!YtcwvN+2nW{-8S^qY^fs3#~IG**sNCqk9s#+f9jl zQ5!WTX9#&l28|(yzG`~Stf=6s6MkYI{eWkHvIa3%z0#$A=p{xil6~f?$#CgWB12&O zfbH4y+Ln4gujJ|`#JR%Qo*_*Qp}2yNcE=JVUrr3 zcgwUK**7=k<@-RV<`K`H98}x4gX32YW1dxSMKh{F%)3K#l&OU!O@b2qIB^c}ru?+` z-gvRAO(Ny2kH?TpqnFb42i@->5r;lKQnK211}_@|U2_GXOC>*_=n$bFA65avDLtJx z&r(pgH#z(Is>vLK)v~ur-=DlATm3=o)3|N4TiV{ouOf{V9BcxRwZj%-?*hd7qJJ`v zB=11H7nl(n)L}W7dcs!)bH$LdB*Il#e&xULhZ08|q6Mc9fiMTVLR zxHM%hKzZFfKTd1YErZUldmrTyNO^5NwNxe$?juO~&g9jOBI{E313?slA-mK*m}JM^ z03-8gcz3SCoo_}RWr3o#U7SBNM#Qg-9DeQ0ay7q^b0WUK;L)< z7eH@PIY1=g5;wg?7Q`RVNm4$|6;=-LPJcJ7OMGE4B9m3ug+;)O>dqH`UgO2uqPk9C zFJXn5{qVZA@RWngH;tXmDKB4d@i`OcnPnRaQ@g8=XZv#vg&{6d zZ^9R?J7a~&y;nFjtI|N@j6n+^!~!YM1;O-d+JZ;Nv~$--m`o;Lw9eUU8)5{VfiIl) z4uctS#>?}E1U%8RaBl^<2c+v5c$_>}$b=P0rJ zr)=y%#F(R~djH!9l`!I(6yg|pBCll5YtTOo75;RlqG8hDoo~MS=K~?v6(h71HB`72 zV*14S4t`XBW@p^nAv0lO3QcWDx19KHlP}n4b&LNsb&&JZ+}RwPV+Geg3hei>nupG0 zQ!fJ53tLPmVjqx5!0i4$G3WoVDzL4haqxrC<~X!?UcRwbjfh zM(zh&Nq(iT4Ih~UZ5R^UO)`v)`l(4s#uuXi}!Dfpq7t%2Ap0)C`Xx$Z1?R77D zWq|-!7LHwv1`}f%4|Bjr9Uxtq@m_sdF=cnPY&k!E+wINMA%RcB=8xVeobJ3Qyq`_h zHJ3RBZ8VHG0nr8aC9v_plX{slR~S*dY$lquK`N3OFKtDiY|Awr{$lIScZ_cEj-(cd zGj}TflyW-sQ|9OitqmO2kx|>R>nrRwd6N z)$NV?DekW2qdvvA2LOL+5hK4nkhp~ujW%A8NN9wMbiYq8pZ?Y~>a&(4{7u1zFDpiF zp1)Qg{LYDYUYNR7Cs!FF7R=$z#DQCMcM1$XiZHCOJl6Q7I!Y|3#W#lQ;X%dESrDmb zsCcwghY%v1jHjzYIZf;g20I-N2$g{x3QWPA3v_p&+dgbN20aUiz)eZaCMFiM)(2A6j!kOqAo$HZeVpx2UF30l&$3A+@`+vAV9MxwCO>XoMhqm0z=XDubcuCY(iyD;q_^ds_ z3VVmie+*n=jlh?G?$G?to@-~WF;6(78P~x?xrFxz?*d(+SDqKxv`s!HC%;0yN=RMo9x6&}b$_RoeTLu|0;u&^tw}9bQLe zd8edFbgRXzFSklc9C?d*EsxdQz|lnqdh^g^C+O^eEvP$g(UD)ykP`{#r=u|oA9LS1 z>&r|idLKOQFH4MXc_2ubeH|gmxKdYFH?neeY zztm&p#2AooNJYQ_ z{63Wd1{sY^6(G_16&V||;$qM-_Staz{+pK<_OtcaY8~9`pz9c!HgWn%#NdYzsQWPd zK2i>pOl)L!XTYg+?&yK%iEwgqL zwmhWR;r**;CMawM1!BH*F(@H1rk#sC)SKUzo||1;V5f=mY;J0#9FSa4owxKlo*MWF zRh~|Ju{%nsTC`TSYHbtohN4An0PM+kh)Z{{{+OiF9mvTslPLZwtFgrrp=0_$#3XRC9Gdz0|$Z z9{DUY1E?;AL#Gl2b-x~9J#-aG(QeYAf;0_H(KM0TZ%hxq`5WF5f_Y<*QHR6Kg2yI<-Hg}bpWf&1FM(6u)G9HQ!K zn2Jl7j%15#*Pax|N4_n}NWw5YJqk~DrJ-aHds2x*D`jN=KD4NbO5laC##bja_KeJr zx+vpKw7zVmU%d+HUnV$K0bX}4ZuM0lCz+g-u$8GDQRA&9HWA zM<+LTofSb*1j3h9wFXjO}ZgmoD$9lg*(8NnS-E)2_2H(#1iYREmarW1V+TSTFdn?2#|0cIOrUwORPm%;Flk?af zLDoPhpNVNrMQ#~Ud-u@7E+t1>)Aw1{mJjQcZ64Of{$#u#?}@~;fn|gcDW*7bP+pNy zg$jnU!4WF;A5<@dXnvvMonAgVxc72W$H2Nok&6&c{%X-FE^BHEDN-)8`TKxL1f`i^ z-vd4=>f2dlI#DvJkCOWIDkC{a(R%QDxU`hCo_bnw97o6bGFGfU2`qU$MQW3s+H_OF zPEx^`Ic?w^d84;ke4;HXB`h=q98jw4&`raGv+e-dN1;|16*zjIoal)iV6zCPMZ^=5|=3aMKUjJv0~qojks=uE0MC6jcIFeu}2jm-VB zc@l9rnxnG9_sP_Kk!icTZ(ry>h}-uz=d4xnR$f1v?1~o-K=Tn9r3gVXSHGDO;2Owo zD2++!{osC7*kE{XuFHmQ;Z1g(lSt&0;zgG|aV#kjifxHnut0T85;g!kTSdTkhQ*n2 z00pIA4d)JEfs&h*fpbE`*ZPw~^^FUiU96o>ygQeEJecQ7-<2cmpQX|BU>}SjWvwrg zsaOx-$omho%a`@UT^-?f?WGVt;3&Zfq;hPztc;_nsI%^X_p-PLT(SrIpzD8*`9 z(M3@Ubs&t;vzLAr`jZz-u0B8APVH)AERhn)d}kUd%?T!c*I6Pm z$tAg;KK5gNnrmH%=}8gIp4xls6t{`M_&SDUNAp3LFz$l+#jfboh020zv-pKw9&)O; zeBO=?`c_B*Hkfj*pZmoX%!Se@II%5I%5vuHKEkzCRc0aglJ{S3lS!} ztrllFD-T>rpST?)v;0+&D=oJCg3zewHzHClnhQk3dE41a=?BOscZHNi-{{|E)Gu+b zB7j7?YbBj`#f{*^pY5F!um=PFJb=3WiaBvD^#(2x8>~A;Je}B$YJl~~4iN^O2z$mu zEKla2DtVi%9m=Bl^1afqVYPXu{M(Gns|wGcHnDfE zO@?*K)StT#Woewh%V}llRjsALVLhLbvvDXz;Qp4>@-eTsU*f}kOhIr-5j7c&EsSjZ zV}c&L_uos(!4jf5?5sUay$#2_tBpF|NGGAXF6=-S70}Ps827!wl8Uu>C5rE04gK&9 z=!*sii_2&Oq!X&^GImh}FTDd<5efvK1Ee+raB@#4zOi}5yb=M{L}L4q!}icrz1-no z6Rp~1m)wQ2@Y!mD+rQfcp-j5&8NOE$B$5um?KyTn~#%D;<2lbUq zDL@#lV4!^zRjjCX(XIsKD8?s%4e7}?qL1EUrKn5ulK{OOj8c_ z`!DDxq+X5U%)TCV>BFgfJ>0(OkK*}Zhf6q!!nAZr=M@c9ByK_Z^X*&}VI5 zZkI%3EMAhaDZw?HrUxD4NuevN3+^?mdvYR8u2{}jmp##Z+Zijs-S_L487xuLH>rLt z6fmbHYA|I1_svXBy8k%I1csc^f7m(_LaN#8Bl)aiy?ItS_}%L|G3UM3Z2An5jKgAE zhGZO-mr)7>Tir4JaAC015SuI=dXGH2V3l;1Vwi+5e)?4x6{$mwYxA_tY_Pn?7w;^S z=69FnXH&`pQ|Z=yH}tR0qo>WOmVguu4Ai=uQ9(69)cE0$_H*TcZtsjqKJxa{?mQ2h z_OTjO%eMR(i~WPj;YSzymHD44pXXc=qQXdtPno?}k>f^cJzZo~XeL~mq{nWMujx56 z7*rlG{N}-8OUTi__`3%LKOM?^f7Spad1ljU1xJ&{BZr_3YdFk|{8F#OHwq$P{954& zZPO6`U6ttvG#1oHwrW-JOBZ(EBh9ACQsz<8*mKaC6g;(W0vjwiHMG!#p1)<@pRb>r zx?pPTHH>WvQSpXdj$Pmo>2x{1>!(c@?le#J(kag$x`l_sDl+e(=F{?{k~-v>AMuU4 zU9IgBe&hk0h8A*DqhOrg%beD_(a;FV+L1Fn>5eKJ!gGp_`n)Zj^N>RrKyhisou*5V z2DFGn$yjJR)R#y)VJ{zzF>kaNw$wNFHa_LKr5vzVduf+gahCgqT+4VDt9w(qD==Hw z`OGQ7KJ1G`k{HF7nn-ExO~ohW=k@VToiJ>C zx$YV8*236s@0AcPR+nJ;oNwqCsNSJz|9RcMwRjK*A|=y|15xZ9+JScR_vW#?Jp(^@ z1sc5~T2UQ?8*QV<(PQ(=* z8f*7Iw_s{$m##c&_j5XBR`Oz&Uyju@Maj>wY&Yv>Z7{P73oe*AXew3!5l#!Cdwigu zDWUJB$YE3TYIJ)#Ktm3CWjW~iI(NapweY@j0rYKJ$e}%1`xBNVZe7!Oud}_3C?gMn zJ!TK}p@1&B4=RYKA0{_Uh{FZ-29a|!cJjj|i7xr;pUOLq?+@{;x_B|LQ&`wWdk_Jx zDS5q2WdOu?5X@n?oS72x{f$K2>>3|X8^#epvAbSYlbHB}q<<`FWXQQCQ{>~kjdH2t z0dAMYKI8&mOFh&9KL0Cf1(p?>J%N_o|#$j~fD zbjp#YgD3ADpVVqq+Hw{-|9H#p)>UwRt}yO)umk2)J-~)`?9!au%Yl`CL438`9}VC zN5I&+d1Qv6Kr{L%$!VCB`lcTCX>Mw>LbHg3tw}DlkHk;lz9iCRrN|q<{6A_w4=Y3m zpUUDpHP(pcpGt1HNvn3NY#aTYAK+rum^0SS|BbaS?a5h)3Ga6>B#(iz{Cc!k-E2Qt z`@c)JN~B9M8s~j=N*H%4Jp-{pS?Wm3f)xtxi`d+(?dNpd|6%XV!=e8F{?Umf`%c!W zD9KhKg=Q)t`AA8~(o_->vXe1W3Rwq*PsCIRS;kJt#8{F__T89K_GN~QH?wqJpYQpd zbKm#(`#IM+_ql)9xqjEV?mwnWbItOa_xtsFu8+s#sc0EK#&qF)!N{;Huy?HXaK3Pk z)Tgv`wbcmS@qXjn?Q_uBY_HS9G1)U?Ng@}vWX#zK0O){)V>&Ph_VeE`kM7ud)u%{` z3T^4SQctyz{uxi(3Y)VyUq7K9eeJy?sk@Fe_=$XNiBCUlHv%Gsvlgl9OBi2IIPId( za6d4;J6D=iM7ZV&pfV~v3iULa8J!KkaknPpFsjv>zOL)5{3>RjseKweNy}gzPP?S& z(uR@(G&yVqBd_%C(2={N-7zd7!TRT#aj8MIb#Y&9jkX@bf|Y{(QFX!CimS`rEQuGy z8$EfQeClk*o39YJ3+IX*>;(IKgq1C>$tXV=pd$qj$|BwPBHfz zPoKZ%2H6$6%N1u7_Ql7bbxo50@Z=qpY@6A72|vnkd0li>Y9stYf=M{)N;7imEhw5c z@_?-ds2CCVLcUoIV_OChyN9X^bx}gjcSjX-k8fA^%LJR>>+KGLrFqnR{tat-F&yPL zB73muFx10TgC3YRW08176jRW@e6=am}HOn7C#3 z%#2~30u07UA$lMmVug`+yCbfvK|APo`zJ!MZ8g>-&WESYKiaa3Pr6Sp6UN;xZup^A!NJP*t5e^hB4MY*J`Nv3I2#IwAh~)$N>}o* z`wh-0pz)*xiYT$vX|>Dpv`~Z|%fCM$6lFs5s&3RT|5))xe#9+t#b*i9>^0e^rKP{5 zzIKE;Kqo9o`B1wfff@io4vl~dmS6mnlFo{ zDa*&W7$=2}8n|R&xrkHQM5!i4%!CIskmUfC=qCnt;dddUv`bujr^{p9!ct$FyYG8; zPx@1V)D73aLwd0R|akDl;J3DIN~#&`3Hop*g+Nk|t|m%GXJ@=lE9wZ%2Quizh~ zDd?Qdp6u>`D`7vkz;|Gm(jd}r*ac0`Z6(5NbO(N|4A35!2bzumVqBa9C%UKx--5&P zvi1PpjcvexAm99Zl>GmjzeCw%*7G9d_8O>~ot2uCn%%^%Sr`9?Eq=CQ@4j;S0bu!@ z7x_T7WwlY5~pdLL(Z7zOwAy1~Fk_*qg5 z23hmLIqdVX1$`x%J|EXu`K^@G`k@!B@1@?bJ#zjR8=|9F67ql)J?#S$#FiGie)Dgb zpwr#ARt9#ClCf$(#^C|9jvCocSJ`7Zw|&1Sqdd)C&n3+jQR<4PoKNm!9;p^9cye|Z zS^LiMrLp#L7X@44XTePw0f!k#VRRT%$rE0MZBe)Ik;jZUWD}>zVqabLN}1t@PiXxR zZ)beY3p(m@@XOwP6{p*RQPrk-Z$n%-0usz)ca`?OPy9AGOS-P!N682h%T2#nR&kE+ z<7D&2fz^0^rbsiHW{Drx2mT(X$vGHeNU?q7Qamy)(B+Xv_H!IRwBvS^^ua?fy#)F9 zCy|eA#po>q;{P4&Lu}P%w3m?@o!L^Obf~N{$a0tgxPTeP`7@InaL`TOqm z!U;0Tt}gdLVf%Jk3ob|nZ|zxKVy+2r868W=Rl+)<6$$N45(5rgz44ffynk46_KDeV zMx0dUo#Ic;eGvwBnP@EtNvoVnf8@p4!`eq<G;KplLIJ-LpEx}gxq&+`i+R+nupQNBrHA2Y5x{^nSTFK?zEKvf5+-Ajh&2e&} zVxl(s*OqptqrCKo0k@h&>a@?-AC81CHZFU2B&lz1Qf9%uk0kOT6vxoY&`k3)Qej2eNCER|2*fU;9R39af4`A2J)Lox))83@_HJrPPCsG8{bz(av|ky4 zP9udG9NOaJ%{Cl*%5LEB1J`nz9Epb{-eKwW)gigX=IhZc8QU2lB*PvGD@GlO?d_oE zN0w?G^cLUqZ24sHDJe6B?8E6S8MUQA!iZf9Qlj{_Tt_5JfG%-nsPTM_w!(sL!C=U! zF{l;_bc=D{mv=;@Zr^;vGN7xr!yomr4A~V`+QOdxcd6&APaSJhsqf8ug19B`ZRdM4 z^4|2%8~nfWSxW3Qq#N!bN_Z6^fmWuaCW2D!nYf8z9uI2W9h3MMXzI_kSjiRDy*8hP z$X`70*<_k60f*(n>;?N@AMHU|-R-&U^OVq}GEbY&maZtzHjLR27#ndB)i|oA*{tk4 z#%zKJDPZHg#qhFU>1L9LXDRrWQ`UoBR3n4TdE1PvHT?d`O{RKh+RyZ3@CmIgN!M>b zxo1|l$+W*XHB1q-6ck02;z+Z!zX$k;wyw~w8++A_R{c5dHPCgS3G7LYtA^Vz*|OBt zC&1>h!l_mw5UIUhDy{Hci1p7^-LRrRZO!j_s~F3h3y3|x^`zA7?g7isn`;7NE^9g* zzJYuaQ<&3}eFE%-K-T#WVN7<=52(9^bFj(8qFcNpSbCAJvm-{UDwBEHx>;5IJzR#voBd>L&=^kRu{5G#rZVIkPOGG}=hK?O(Qwer>v)peXPP+rM{-p4aW16FX&0l=TSsJ+o(~v$ zjW+wQ<+Kp_u&zEpl#zF771Xq%=?Vy$K_W$|XG5EBLF}&D+q-EQ30b+8v%{ZSMUEMj zo)OG{c!mlxQnzl<7^wyY-O<%nEDaC<-%Ysf8+B@e|+dC6R(HN{9CnIH8U$o1jLIL+0*ch48c ztEYK(pD4g^VLl@(@k<|E!*0?9TBMa^oy}X*{T$VG`^s!CH(e7rPukrm-EQjpvS(@H zyAwo1Yl00#nsTFVl5nhp-dJcO97l6!xk90|53}sj#%r(cW{d~>N%#L+#4Z)_)P%oL zQP+8S!fV9v{U@%bhXh@yn&v}s>%i?Yr!7uvJ$9oy^Lw~tSO`F^Wk$(2XHtw8t%j25DIa&W=td`RA0>-t5gGDB3)VWd@{Nv^?zVaqTe} zqfsRa7^6F8$KA$x`JPQ7OX!tXsAB+7%ZstoG?##2MYIK zut9YR)G2sYAQQUxFdGlPsj4G#Lu7|%D0N@S+#~N_^G_8*Q?$C=*GI^-b^pUo{aZ}M zJ)zfzIeF@_^i|d)2~I;BYq7diy@R;6(`tKFW}U#J814CYg2bqASeA@6gG|9L%Q9s+ z{RrK!_(v=u`l*s?_SNWZ#LN1xb+xq_6wcE9t;fPkr?0DTA1T$L%8&>!v;*yW4sDv6 z7UM!iiY(AF!zT2tYb}Z_p5km$)VP(k%Ox}F%a0`t;JEq#bWzjcv!;dlKyhiwTj!py zLalzLiwniYXW$p!v){k|4eQ>hWwWj5gs)y^93il=BQzH5Dp1Jg+S+3^tB3nq>)E*i zOG%`HTzBM%3GBgDZ72%I*-iKjI~TPM+Aw5s>{@RbL&+_ZoylJP$Ue>)|Na{`yFJF? z9G;Tyi-|K_c@zkBZH419z*$r9=FS^g@*!IltL7qAKNu?#rtH_Q=B22~3=R_AZn zQ+Hs{V}a`3c^@RLgbpZXnuq@WS%%Wh|6bmIU%h|Yw*Sxfo>O&0Rg+y9pUX?Lgqh>Q zGN*JBUx2=67_db>AOey44Vz1v#8MooRvZVrujHy#mZ+Uq^luYC}uHLoN0Ty0%cUi~{m=~k$B#(DNw?un~z-cpj4 zO3h~0;V{B8;!WZx`6IE`Y^j?<7}cgMS&zOgt#BaEuji-x<}ajq-)yK#D80$SuNEaHVULZKYZuW+ebY=%F1AZ z?N{TOG#r{m{Qu%8_o;xamYR^4#u)(G(mH!#A{}{{JJ?oTEX@SF zt)h>IY#~t8NP8a55dy%P2z9Y4*1m@3X^trwq)=_4>+^mG;qaSMVkGBfL zQPDa5=D#v)=%3z8KsEr{S*jb*skg??o&CV|AJKL0BH-K9c5rUs|5bj=ZiD&vVqGxo ze`a`6GFqPAH!U^GQjntJM|u}qgDXO$ho;DkaRXWGFWr7@Rq=l2X(w#)+K<3*501V~ z!i{C9ue(ueSyChhFQiu#lG8GIl>U6R6f(AV=3<(a_xh~rN%uQwpD^KRJifGE+N*CV z>Mok3IQW6YFk;ss@2rkBP=3y~;zx*1UeM`8UO@Bj4kXJ#Gv4pcgTQ*y&g|> zHT6g-G~48){d59e_Qcdwr9jTY1NHS<#&xg*^D*uCYGP|BMv$dDxFMw9u6EBy`8&%v z@49uXL;c4c;p?crw)V`T%wXL?krsz_hxi) z-Ci%NzqiXZOt*#>{?b^0{nkSz%EU25zdQ2V*E-#6OEL(`8=@8(*eRYg@ZAqW#wfut z81*yL=2cx-K9t$p1&2F1Z?8Knss|p~b?8l>1HAfMFBTT1`c33@$9qf%k#-3+;>K8b zjItV;bDWMwJ%$8?P;eXtcO<-NKTDbp+%)>iTlZ|eA#c{5QObv`fCTG)*;TJ)zY1g=2X^SHCG+<%oS|EkLek#`I+Ue(is(nUz^ed)jqK8di1wAgWbN=z-e4E94% zL4j4*Ew%N&6x;lOIF-SXs4uIfr$0qo<{qrlcV8Qa46l#8Gesu$xjvS)+hepN=?vGC zKb_$fy0$Wrjpn%_T_l6xI2K~{Kva>Z(9cS9`IZ^k?*4Q~27cROkA5B7R@ zE((*`gW}`#k!jY*5gCRl-2_MKjzjEyk8gYMoNIwh+CG>SSbUdw=OlZ5bfirqsco%B z<@x@UTrMlAC1NFN`5KLz(EtH6AP~$z(-;hF(z) z@Ox9XQ{hIe2JiP@c0W~F2oi@6v=s&RuTn;#Ny_WK)l7@`Lqq3@KEiDw303W*1+~mQ zuy|QCSw%^IHg>ZUNG_wwe;~`NkhQ;K5jDG_;n*u_ZuH##8ovvdeGS zmG)qTLthmLWu>^r2PaP+ zJgITQzEdexzXLbwKV`MlN%LErUhS|FJ40FJzt$m9{o~Whw>uvb_DAV?t1mFj3@xw$$I8;=~lA&wm7cV{lI`V`1L$_!%G2z8+ImM)->z;7sTJGZxIj&A>)sVg z&&uku7TQqrmpPhxwbotQs&aKiqT)-(%D#L2Yy);C769qG7-dk{JC-%1S3F_y+TCA1 zY_Y`gj}JXNwF(qc>f@!qTvFo=;o&7n`ZYLdf8VZ;-73J0;45Th;gV<&5I@ee;+(Vq zIIeW&er(TW8lEGpiG%>g=fY)_9srEFaz5dI+5F>`ap09^tmCMq7(hHRYr#S%Kt*Tz z`Ho+CQtPOHeIEPI?>(*5Gfmi%*bv#KdR`%s*I?zpVT2IkFENy6$QYx8opO+t#k!0J zIuu8rJls|8foID)`Lv1vwNDS|5l#IX6h?t3Dk;*^%1hj@%rhPgq?OzrGVuD^y6eQs zExTtPDZBRdA8EI_gFlT}CCms|514fzp%WzP+xI~YB~81~8sAw*y$uhn5Y_s#KD>C9 z>CthiX#P5gS$4hyHst-n_G)_^;AqV*|AtY$q*teXqN2zmP`<_cC}-sk1N)tA{`csL zF=6G{aG&OT;yLR3Rb#Nt$QAiEIL$_6Y?>-WAsb~I(iri3HpDy(&}9Wm!+SAY^)#^$ zA%GAiSmdz_(*>AIzoP$5?%fIAii|}GA;jjev}b5*`mQX>S3^US_B?;Z;&a=(k_wQK zt}In(j21TC5!D@!;fHc8=+~cDgqp5is83uGWg2^F%I;Mx7Ct4UUh;5*U%eH5jcEZW zCET>HIF6ez^5viPQd!?@9xXdh}5 zn&cN44_0s?d=ZS6+jS9^`MjR558pRERL@O#)8s)PTE5KGchABQx|;!*l^ITRaGScE-?DipjP#=H*Mi;*y7N- zTasDWvCQNkQ@lJMLaSM8E@! zau@Kl;x0jdA_VQxXJ|{4n%XKZW|pShVNK#!hdzrm9JYx#nq=}|vV2;bFC^H$uMtUD z2&%CgCE^+Kp+D_^)G;+M_a+_6c^V5i!5tIVFY?Ho4ydw=W*M>HV%>b=vbFmWaF!rg^EnnQ@7Om*1{*N@*&AY;*02?)1B?tK7G90 zxomEJbQtUoiuSMwV}gcS&Y_94WFyk8dv4GZ3Av?t@7xW%zNcA8)G4baWaV8dwh^EB zL~y5*mpzzTEG>F5Qi!9>GN!FWQu#()a2K2UUq$?=bm8uD6UsK*^V-~j;xgx;_H;O_Y4*&{;C(WY z@;KOi^@+RRBVrG|{l`ozR2-b?$8;&}W*6vM@9*abpfAwyaV0Yf$KFkrJbM@} zGMb8GK4cI^AFzN0j~Iq9+Ts9)iqIdqfSe|iQYHzNBL)}qX-8u_#N>NZ*0h9s&3@tY z$)gKtqIZKdR3fN)X~z}$PkchA`c3{Dxb!{|OncI*W$L35gKtlU5rsT-j&7z59_Lg~ z{5w|YTEVZ2ga0Q3sC|%SMp+dcWjC}2Gq%x+MqMv#53q(GT@N5viF~y2!cB+6#KxEx z$iHE6uSBlDX?)C)6PbduMU?&um{Mzj>a7_|7yc2stz?7*-b~$xqxJ9_Y_WEN1o`Hls(9$y_zn9`2dzDptRt2lfF+$_ zaRY+UG_YIHu^M1&=D3IE2%wP=^%&y)4KwZlE`#Rk1*}^S@SD6e5l%D?z{{pSkNsnj zsQ+H%|C(2la}GOIjfWBMBB6sQ5F%rGP-S{w98^gnqYo@BMW{QEnkk@Y@r%cvSI61( zJvWyg@k#MWv3&LHIA7<{C!A8O3-Bdj6p<`GV*8ZBfGM ze<~qlaDNBv`P2V-C5FG9f|xZ4U^1yM8*c)YK~RLBAG0L%z@dMc)cwi>Rc_Ye3};aL zu|7=44}U;IHne=!zTPoia5~+DkjtV@EB1}A_oj!}3r%`YyqA9D7mq?kW))FeZ?5<%R+U{`74J;=9RDfrxq^7kcRM`hb9DQpARCMi z%VCgrinbBXW|-#We0*6rFw^rkLtmh}eYWl%KDiQK|G;i(^CNP2jDlL4rDsy}JCIxx zcdsnMWj55E?Hm%g=l%`^P{7-)DKNyFo->Pj({r|r~@#t3g%O~6?Cb| z1g|4D6+WVE_d0hOql@X7sne3)*A!1v%2B67n4H~_%oCm%J-T*GlTgSeRlAw3-CkjV zAW~MjLLOC>$ZdNN2hV$<3&oE=gIdFE3J*)}I=PLV2B0Zph6S_o2TC8bw?POF7a=DYS0r{X3vVJS+#UWZ(?bFeVsH)ds0hUr88~E~A`+jl0-~#!^ zmO74HAq6$uL9u!aV4*}w&Un4^VD97*deCD}%Gg-wf%y4F5BE=>T;k8Eh~GJya$A|5 zY<7&r)$x}@lpt{|Jvsb*0F0?hg##K$xH?FeLLlsv1q#|{_e6KmOYZXtkSFqc>fH4$ z-Yvamo?CC`5rqu7cG}v}2xh>~WKM0eB$!k9X7~=)btsr3KXKg=p5QQ8Ub2FYGO}bC z22?oic9OS5c7^8Ed2~f;yg5?%a6F~x1uz>2*?5i=rUgzj>B0_!k1+^bsikogjnC8A zGv!4^lbx^TE6F9T8I7P&c;3GHSwYA(sk4?p(|FGCf)f3~#VPy}ooOf{hiS#v*0yTP zqfg(CK<*5=B_e4bQa^iz_@d^_y#TUW*7O}7`EQpc?MWi(Ue^7N@L@|Vf=oM0hJ_ej z1X-CEz-U{=1Ws5@r^nx0(e{1eFgeP(qHr2tAA2ov;A3a!4DmifXp(iIzeyCby9|Bx z^m3Wz`xPdu*X5~GKdNZ{+So5yqBFsDT=X|AfS*nt@nImqzzSu!5Rx;mH`~pyY+yI)5g|3RWh_->WbK#f=3soY<*YZ>!Nt z>HztPW*|APNvW?ir>{{+v){I+VxF<;5vsrKbm;TC$-cC+0y2Ih9a2ngCZHSXAa-;5 ze#4{#z&He@ZWP^$vMpEqJ&!#xwnxg_UtH38$=`I-eyf{t zga&W^0MHmarr>;#zExj$q&7ml=Xzpkdnl z%Hj5_+s3;hjs60D+=t!`OyDZsrG!UfsuuAyW04WFkJtI>$6w@!5UG-6;haRKO$`*R z8sja|QnOwc8dOqz_N3f_Q$VmqEQ`6`uFVpAHiakojJk1l>(Mr+tlABE1^^ytRb)Xf zD979UEbVLHi38W}U+$+Q4++L|Ywea?sgxv!OqVqXUeo7?pX#?n*nM$5uPDFQ+936+S9zT z7%lWAD6J@RMQ!m}kKPX0W4^z0DV<(`kiQqef{U%2jLEJZr* zDa$H&3K@%)VC|rgJG}5~9zmx+U77v-of6{gK^x}?|EA1J=%>FSybq{i>sjQ=88|F{J{qJh$_Rb zp8dRWUqQ&lRjOS=(}9}T23Cx2`MdbhT4|2M?v}G0Cs)yT5g%u@(-tmXsjiYU`$nYw z#6l`Kwmgos$`W$`upUV)DHY^VDac>mND(5<#Pc_-)oy-5N*Q?+Ndag!nmK%1A0ADH z_E&?f40Q6Nfj4Sa!Oo3i;Ml3yA<#1*^wl5YUYFn~0Q|wuWFW5U3x$KLu8!jD$3cEX zmbe)wp@Xu?*VTnkXfrS>4C~<(R@8Js$^wDzPs}5?W2w@g)a`EZ%PL0ZBPGw zsnhBeysLvHZpGQ1H&x^_%8!B$`z&9k6_Ln7XuAhjS-dpzxr*4qjuu;W&byhVbMLhD z?5?DIIV$YG$#{Ag+idjO2c(+K766sJb3CDvL>D+Wa6G%|dZCNPQ_K@h?A5ERrul6r z{7&o?)hDq@lIU}oJPj>)z# z2_T^H53vrBFUT>M4$uxB%%T^yAh_tLmY+}?J;)0|T|E8$RaFF)I%ajCcD(aIr|8(n z>-)2BZpCrE6Vam+mpr|u0YbeQ)fhH4*3Eqltx-u=C4Dt?Y@p4gVih;4YWadS_=5LE zouDKJog40y{<-7Kq=iEG0MioFhLvm#WS*PDSCf}&r|XQkrGP2o4~G*yR-dAzSULdAWN{wK zFPg7Gj^6ctRa5KttSpv~FH-Tx$xtcoGJd|6hdRGuE#l~M(X&iE^N*U*gkpCpk`)FV@ zXG+V@^i-i~ra8L9|A*DE07H_UZpD%aYr*WJ2S@6XB^MEfv|_}8!N73q5pQj+)j&_z zdbjo4O1GYNUXA0q@Z`#7g=Zz$?_z-AtJx%v{Q%0}^XQEP8jyCcW?HfK(Aqkd$WuO} zyjQBBqiV@sOVedBM&|jW7h0>cvU5_(%&Hz%?2fq}w4Xbc>B5jHr*e`V9BTb5q zOg}{~$3Cgq^q(?`GgR~bW9w0um={l+$iH~p)8ZO7=>=V8d6TKf3Tk8*v-kj;?j+6h z1wGn?X8YJbXCg%XQrd}9<7CNP{K2MH$C;!={jk+u^} z>YOiN%psB@Wqcz|A0a>Zt93xvTo%4Rz{dS;taGv7Rn9F*iOQb}sE;^El@N)RpoqjW zi15I-&|?D2_fLyy0n)vi<7@57jlO5R_@$3_z>--NPD$sDZbkD3cd;J`&;CTkAYk3o zR>KI?NIoTf312ccKH=`$luY#nT)p{$w4(=_@(z5yct=wOO1X38eNUE|!wS|Lz9Zxr z6_`7bVPy(=`HYut#y3J}!L^Wg<6Y$|TG+}Z>kXBVnuK&gg$qTIxBWf{0npw$;{f|T zhd;!jYs#v{3P(4gO`9iZRJHe49V%@+b2ER}Va+%7+CR!8#KrkkVrrHzm+gN-$U(Ym zj{x1YX?*+YBkUeGy37c9YNjx(@Q;+!0t=SGf=A)+tM{Lb^KDt4g~1MoXAMp?@N{<* z#;iaH59%MYj6pWe$eD&D$)O^>XX&XOQ9tFJi-QUeW(IO`OB7#dh`6y8%52C~^q7JmR78_fk4B?j-ooYF!L&Z0UJ$7H{Yjy*@)n_bIDObAfWL5C znRyA*SqbQl(xCNi8Ry`vfmWrTrpuqcxsDj++PU^<=e_wM-udf}V9Zo2T)fefd3u;L zh_Ar4!XaC{D6*pO%)rmI_EW}I169L6)|DNf%FCuHx0rX1N`(yPUU}RUXF>R6hd4vS zHfuj2@*t-g90Ca~RwCCz{Dgccwk6mwp(kf|=e_qb?QyV-ixfO9`%#wAzdE}9u}J`m zX6l9Xou$t_^C-S%mNPk?E0a{*Q~bSUMxX!V!%v}C`hy71FuSnL2v9IT#V!LK2run8 zvZPjH7&QicG>o!*#;rW|^MuLQR*7>pzQ$|DJGl-zDR%GkS{bWEPC7ee44fC~uOV`GL}F01i`}~*9Cau%fGEER7ys3% zhZkF@&wJM$8tPkUXqa<>nX7bmIg-EZH_Ud=%^1QP;!V`>(Ddd~7-bO~p`;8w&Vxvl zJ2O0Ija1Rh+q-9ulH5d_O}bXwvP6zRe9;st}6nmYA?Idl+Ej*lS~E9cNryq z2o>z>f2#BaHmA98!QfD1lDTe>jJ3-TjoY`!Wn4}w&mO$+v@H)FoL4VAgJX&5w<{eU z#NICPq^v#-bS%MigN!u3x?5mgE^qve+xxzNt+Qv=g@p&^ap^m~rls&qQ5zMBd*dNB^$uRMRE2&yFctR%{6NgBnY!^+ZhJDdQn2tIY{A9OCqe)lkuJX0!b<6?7WUv@7pLe(kU?x z1$49eezhh`L+ieu*Is4WNPXi^Rjx1J^X|vp3}?Fypl@f_50M!Ml^M@~#f?V?u*>t2 z?mcMusr(iqf9Kk<&V1jrsWZyW!$!T=hV~Az{^XPZM?-!}{B`^4wghtz z=bHg`DVk{nc}_*fthzfLhz9ljAn_|hGLq*>y|rWv4Aq+9h_&B0w7%?_I#%ZlN|?N$;?|i0 zxo|$iqXB4AK7&2fkDTUUx97;;hz9_ZlsGIf-LdS;l$}Ra65FvcjG-p&J|hz^0EKBW zlYX9LvbSn|=jBZM&#yi{D_E?}yBA`|2n4@7XP%?oS54T1=^4EaM0$cHhiZ&l((Z@9 zfqm@U6L9H@-;Z62YDIFi@6*mW3X&ahgEX6toi&##gr9!(bqtG$k1 z*57(T;^ljJ<5op3`*D*fsQ6k#c}xwKSVH4T7rMb%E8IQX&NGK+hUiUeeC#{!g1dh> zvW{D}kYR-G5^GYg^k9bbAU?s$3FrxqBba7%l%LSB7YHAS$-d0PFYaGGB0yB zdhBg5-aMGscGOvx%;QUSgCL8_l3|4Prho0{XhY1FY>~@yKvmQESaD8;gcai$m0= zUPA8ktvj0!WZ#^RpdNb{oMw&mVTJvM?L7zJ(R8cImUN8d3U;!@NbZP|K za*EzXQ7ROPY}z~b?72Cys<=gs7^^ZC=SWI_8p)US`TeCmtaqm*7!k4RKgeTPMh9Sv z@8o=$Jjl*(()ojA@qMq7$;%VWi?74fMtnTHiESEdqOq`PFz+=37RUI)t^;P!XIO@! zK$OLLbc-9m_Q;Euw*44dTc7alSPdg>k=dE&vr;f!_;H4~i*<%+jBR!$PQBI8#)#}8 z-WGYh>C_xn=AU(0&@}6t&;wB&yFRx~7tZt~O>M*-036PA^<$AGl#zU{_l1 z!`VZHMQt-77A+}U+jfrv3K8xl%T%A|lc~)aw~oo5a-4xzUR}EB_tX0>(YMQ*cpe{b zfMSW}Fpkl8CKPk}9Eb@`dW+BWUm$X(t;Da7zfjm6A4*Y;eKSIS+#o+`r<6X5Wf z3EonH_ECBzU1kzGSzSX5c~bKlrw+V{WhP=mgOTb$?)?+UqAsAe_bVyS%s|7IO<+7B#rK$|TlB8(-cgZy%F<817y1msWDegCG}pIFwv ztOEJ3S953~QZh^q`?+aqHoO~TtpB_W&|LhNi$WOSKg7@ly^iqaZ%lkDxIzE4kh#CP zD5!Umm;d_~{omfA=|D2HcaMX%&S*S)^42|x z^7FAr2fqB-hIzqh29uF+kKxti#JB7W%n^4x@~$QWk7V8^nrh7MYcrP+9s!H-8o_1v z?|2o2=e5!5RJP8hK}}?V*}9pAgckhH zl+mZk>W_1>SJR`#($8M__3O^}(!mrY+MUoU0;$ZhB#o(rb}(2tpRp4#Djr*~Uo}a# zi8W?Rtu3)G=MY6brHx@X+jq|8?4Ettn2W95pw!hh=#+>4#n%@00cSPo+Y1g0zbzDy z`gH-oon`xwWDu~NDNV_o1F!;X*qeVs5C2#G|4(hh|5?c6-v26(oGXtVnONv@{6!&2 zv8}pA*ShU+w3*o+S63i5wo?g61cO>A!z$~Ii=J{^sHw~NW=U?@Irr%4Ty9^!)-^S)h`S5zJW^YI zz>1f#OY{J|g>mA$5sJP`wE}uX2|YloO$gW;>dGEM^@=3>lQTbA(RC^E56hUTin zZ>!`<9Vnlv_5y*;yT4dl|AvJ7m%q`x%n;6SwkA7$8O=>&Qh1}68?ERP)VGZpywMuf zd=8V%x8()sJP)H zb?y3uCt@${OFLG?L)=j}+eu^nk(!%H{L5*1j`MJvAqBPUQo=gJevdhf`BYo4_u)f2 zX+qnqzoh!m4;Rf2#>>kM%I&5-Wwkt37yJG94Ksscp=S(3cCj9D)Tb3M-FT|ReQ?{3 zrH+y%hdA(Q{%~VD*%E{71QDjn1v_zn1Q2TG-)4LV@WXp=~3uhmMCVz2xVP+i<&7Ehv(evZcI<(d1(OMkKBUCyo zXD-N}Om_Zb%q3!UL@>orsrj-w@&Xji*)0HsjJm!E{i1rA< zynaCV(M~yd8&P<^L)??IM^IWmiN9iNJ8by|SiNHdpx^{!My~p@MwR(1s?fMdNHPH_qhUW+mwlMWJn)sN z#O2_^kXf@@$i?5y3k#3QeQ(o;`5H7YdC~R6RUzef{t9$m6RX{aR9N^yD%;|mXrXm>?Tv|1Z&>K_e!)(DI7CvStW zDnhe8}`eLa>=<75=46461jz1Vy6EU#EXxSpfh2PN5n=2*jDkkb`o;$ws zcA6pXiaCITy8XLDfVqPiMAi&-pq{|1wI6{`B#%r}s&R=d<7rLt-!KPT;$T#=!sYqp z=t~Z=lB`!R(`4s({>Xh(+Mc}aW`eN^QRncB(Yrtrti*yd5nwhUPd?-kIK4GfLE}{C zdtUM!b)ERJxAqyRz>ZG5;Qh3xT(#hG--e6FaIeeHGw*S(-0y!$nwl?3i^p2$J&5(#tw&70OS+H_;qocRL*gb#0Q9{MhXLD-G!=F<&61D^l#FkIz*SO{W^tN|=<73<(S9bJYNmerD+tYap_O?bWLZ9c+-Up95;1^(r^6%Za ze7^aaA@63*Mn@^HQ$X%jw;Kh=tWF==aVS66`nCQj1iK_=p>VoBB49i~oOnIq4B zwk&s`id{`kWZJbLs0>P>yBQz^}1f zX63cX)=J~O4EK@qgzj#|5E&9t5-n1KK1xTQ4OebWlQFoubep?=a_xB38PLrrCnOW6 zMuB3-K{|+#^GPTHBTqHFJ!qRPHu2OaCt9&Cc#pR>wVZHG)Kg3~2?@nh{T)eXL4(=MJG@P~$n%>OJb;%Z43fcS6CPL)w$vq50 z0oKE6q>ln$_M$1b!7GD1yfg#xd{9%{lAeIA;l*~6di#X6eU|O{mwt)wNJ;8jySoG@%$lSpR#+rZ&ju# zHr-pRzbB-dee;@tm+ekDz)a)tg9P&s1?mR%khY<92i7l8;Jd~d)CU25a0Jo~A4RTQ zjR8#eLP)}-S2y8~X(UVcyVLMkGtbhH`_?(py0r7L(ZKwn2fkX5Gs1}_agHMFJmAIf zvKvI`fKQ9hn8j4vXOHYdbkp|~h@eSPGHL9!QaACv8@$!FFL}sgZ@Iwtq3=Msz40b} zh{HDm==^8c6&M4wDG|ydSmn2BuudA7xV*l0*UlfaVtCG4MNIj`AC;@_&k9bAt=BGx zF?rDzkUxmBDxu5F18BM4(njYZNWT>!m+{NqRQ6BDKV0ko6x9AyZ2nCr-Zz7Nk_}KM zu8Sb{lVKp}ZcAG^cTvNmrR8q&^Fy~&?Fn8lT%$2NZsetVsDGYSeycX>uvsA_(q{D( zS!I>j#07F?Mrk>_B7mU4y3ivHG#F^d?#iLF<%p5FBK~i$jhV^?9FBL13iN_5a{cf^ zPUT@l=sq1*WAcpSP#(qV>4VA0oAl%B%W@jWJ^AFgJALvk7@4LaYpePhQD+W~qRU{i zdK=g$m=jQM-`NTpZ!3Jb@=`ig#m~$&U=)*{4+@BSDv_k#iHewEd_QXPWtg2M7d_};eEZ!Dsm56T>0P1 z->TJ1=2F{g-)&q+j=a<}3*n}A={R%KL<5(beAGp{v%hVyr0V5xSb#NvP(Com}n*#Gxm4!-I$+slO zn_G7}YuD&~>32mP3n&I(j6b0stYiG3X$_HXjJv-SccFhah*$@S7rr=(rQp{O>68x8 zw(VUB4ms=aaq#$ji_P~FBqBfTI5zn}18})^H>+qFcTh-O6qOy4(W8R{YrKqu-=`{C zQj`}=x$BI;Fno(+u7x$sxqeDbjD`3M`R-E?UR13etaoH8$bkzf(EkJ@7jZM zR63peVt0wNv$wn2V6WV5!ScNi(qH&yDL3+Gim|C+2U<(Z!4pVvNPfNqS>pta%_1ip zOX4pgZn~{JcqNc`$=~&%m61|yKLgbakOe%5pe2GVbji#`r80x=Jg)jyMP&s+(Z?89 zc1-|H_7No^uXzk_XN!EiouA|tiBe1(U<1)!9{y#oMBOJAi$ojj`suzBR1>g)fviFC zw~%wMBUi&=a$k0ohBu{~T#;c9U=HVFZum|qFEtSKG4kBch%I^`h38WNwBie{=2!Ig zkU;D^dqH!hNnK^Eq+5N$&ufiRQ~PJrJYC-N-DVk{JJJ>s#+D)=S2bA4nHrT^&yYCW z#oYqCF6qQG;sC0D3!BlVu5K)&nip405YdyF;8qFsk%JoN+9}sQGB7 z39tPAbln>6_gzi=(l!Ff>_n}OHX@V4XPPO z>|O49f9$n;-MdXu!S~}LaqYNZpb<&RL%m1UNQ(?8j5$P+Jf)_%GDcnHhsZK^RwYIL z?wTfZPv@!~uf^o{ExF{M6nm_=Y?#^NW^WJK^LCI3q;w#y8|+EA(ay(hByQDI6B`KF z-^JEKJ_a{1FciNqwzVKx5Rq9gqUVoY4dQs(d*w50&-#(p*iRHBn zSm7QH6#3LX5IV;H2r#_N`<vlu`QOf25(xw8+rZ>7Y@53!h8kIl>yI{Z$xQrgmU6(yBY+je>TVpR$WA+xEp{FiJoy*P15qfvj zO?d#Te>oOKG*jf=TX*v$*_?R>=Wv+yhFpTzEYUt z3UBId<8migm0hu#*Fxg&dq0ue=+&@Rxu3flE@qB1Jfor-seB~DdRVvCvcGbRSO+|Q4*jOP$~)0_tYVMV z%a}h$jL5CQh|L#QA%iM6agv~`46wIp!Y_Q^!0qF$#kKxP7?xETq0HarKIPoJuJ(Tp?_%{QAG%ix{){;t`VC5UMp5Iw=UpGM#4#9Iuak;RSWxWDSaMg*kTj0&EQ-^wmE4>-^WQ1PQLhOi|@dfKz>?qU1l- zZ$E&E+7Bd#-k$zGsMa8F|#6a1re-+uIH8xG`e|>wTo%I z%Z`zbFG}Eha?@)_E*y^Fu0J|1GE`R2<%3UTVY(co>6yVArk-J#EvvhZcJJ=-@SsW2 zg+4S1trI$)94}abyV*8uGX)F{SqCv+40io2aAW!j?;l7V!~-Qf+~7H1soll8gLX6G zIxihemD<9&cDuh_p8w7pdmtl&phV$+h3Q|(%K}|3J`^XmpvCU_m7dN7CprNp7yLH>t&x{hyK9KGJ$YhbQ? zUmSVxDNpaCWm}i50E?hC_>}lQlR5%C8VBJ$tgGxmfO&gynbb}#D5FvPGUW!HGdRPEDEDHhpP~0r6kpZ=k|xHN!wo8i@ib(iWUnf}#rTp~sd z)+KLc-*zR@_~1dNysSj{7t^G3ZB6fvO7}Y5YaU*`=JMGs*k?kxhg=;@8>+aE+UG{1 zh2RRzL-e+g%+zbRJKyzrl$)o`*hs}M+ii)HO_Dy$JER9_Q{OBLcnJFr@rJ~{XCt%u z+7r0i+-ltWr8-Yob!{A-TtB#Lam6G8BRvAAoe9@>wkr1XF(#Bneu}O4jy?9m(j;z+ zyxi7-Shp1tJYzPg^j5^`j_>O|5bi$hmV%(=}InbjN| z;W`kU{7Sc>PO|n)t*Aw)dE1{E4BR|uNAwdC2@R}E} z*%*FXLnjea3+l3a_3?BbnXD{rl&W9HDJ1^>JcJ(Sr215W_{fJTG9NcY8(v#%)*uvFp4is8x8t4n&8_GC*2q5ax9LF@ zh)`zfNakDEIxAokCsj|~ahxc4eYR-QoPZ6uiY=W%yL?5Fyp>w|!@0Ng&agHD57VD5 zeF^MaZmpILvcR(n{o;0je&1XWI{Qc{F5yJw<)Uw*TuFIpM#%cc1AX|1pWWwrSZ&vz z=+to@rxLUX*UVw1K8icb%&nC^(`BDfEvr|wQ@33JU4x(i{q>8tw?4eG`3OkUtmZ@< zQRyC7=^Ky53_`=dlNFmBbhqVdejHRxee<6w6G)x-BQTw~HQs82<|LDF?kf1zJo$82 zxm{*c=EaMHHx|ZE?0;%D(-IRsV+05y0$dqa0)ORZ_{`Xb2rrcp+=Bvp&UHMi9vRWY->sIZz0U1cK)K zUm_@hijU9S;tX6=4LHL| z);p_*7lD;cbq9BHR13HAx461VE&}C`O~u%`4`UY`K3bdvz^`{&>@CVnkS7P-K_8)=g4HtH zv(PxZxC-0UY&oj!0lypSa{^nerOH8$16mz708bnE3mM3qH}xS^9O*@@W?y+< zWL57S_A)TkR*8RivtjF}K2#kiboofil@ylT53biZ@aC%b zwn84@v2`PK)nr`pRU4< zoH8IS9y-T)3(SPo$A}H)y#1hRXP!HN#@4Q+dKPqROg|kTwWLfP|1?)&a)d8X6|qSw zzZ^quf4S}SH!wUwpbz>No4~*PpU4Y>D`CKQY>9e)%$~mnw{fzpIJ7pAiyPN9`EWCe zS#i+(!1jSPq6#!2Z+yB-YW!0Yc(UyK^U4pX1nsdl1)@3_O)jWQ{w&0zq`3;iTu{Y1 zX{uPcJ>Yz4ofJMSjYE4_F2l>l`}ljOr1^C?Hy4}Ly_=L@b7Q6_N_*k>oBE1PP2B!{ z0Nv9DP|N%vf>+?m6c4c_#!OqxCD@J?557tJ3mR`8aG5iab*DT#u=}tr9jYi^X?`f_ zo?`0nvu@jUF>7NU-g`VUJotMM=_SzLR0Hngr?K~==m64m1|3>2#cAp(w;#{u`Uv~m zym;<$&EvSmiA^T4a;Fbn`AkgqS+EUn%UOO-6u6ZQ9zeF@{|XPQe*!!DKeg*uF+%^u zL8AS{6MM9a;6n-5iJ?-+i1L&O|AEwf*AEiE*D!fyKI;>DnoFu$F}fpAwGeb26g6`a zz-Hpk?Wt6zH((`Ju+R`(Q%m3T1cPFxSbs z3nGnqJ}}#Bpa|R30X?wrW6edhuU%<}!V8(tHd#7Bf$#J~uG(>XA-av*+Jjs%XSf}BBKhV*j2-vLWND*uR<|{o_nkJ+$O?B~on^J|MvYYW@zl<5 zXT@ZFBcu%OX*jLQ5W{|xng!UrAP#$7Q~~i3j#p2Z@l+9YWd;aBujC0 zbsyN$g}{yh;%UD`IoL6cm9J+!4DVerC4ZO{XusKQ$6XTqG|Um ziaHW+HM8d>`> zO+R>mM!pmMZ96HGQavV&tR9tq&y$-QiFQTtY~3CILHxNy^Cb73xHyXc!LuVtowJNwl$zdCc@ONL+kY2t|E~M` z+ivV%`Tf_<_R1Z~e=fZ9`rhKt@(bb{cn0xjL(OLbctM>v`y?2-%6!x(F$hKYy=Jck z|3AFd{jYFve+?M=xBZ{Ze?7eV+%DQ+p|1kC@1tsyM|cbWGjspzm}XwOI#)RB2hWr9 zu1Kafy732(xt29+5_KO(cQa&n2L=IzZBx4oVglf47$d=IYx92mgYFh95HT>KB7%)l#z$4Wdu{-G_u0ABC^ zmznrpGYUFF|DILRN4u0Ca(LIAyzP@!eNVT0p`>Bp%i|25h-4Ugb1;UC$Kp0Dx}|IH z8--t-4l*=x$7>@qcmfGV7_dLqjv`6%h{aow5R#tVh5Q-z`vb*+G;Xx{SSVNQE;x_8 z1^-mM*zyqYTSbGX>shF?i48LP8vLpMNM~pW`bq4{A>=~@Y(0%*#~30~&)Q6Svh~T_ zbs+XJQJ~j?9Hv73KXRDR@}CygQ(w7~(3s6@Xx@ki6K07@BG8wx@GcY1He9U;TWPWV zXVxMMT6t^UfLJOHz6|0N2cV+UB)V#4*Z=6kG1mirM!HzE8%crMlZI)83Y;H$uy>@P z=Ot~bDm4P7qnkdpb?-4j57fqC^sGYM6VDggHoFNG#wjCGl07~p{nD%ES6s(eBwT}gF)-OITI z525!HSZQm>1thNj*thC`d5+=kAn4~%@UEh)n@{ihQW5lK*L3aUUG;Y_UkT&^4z{!H ztJsdP3B&v})&_POPm-%U7fX+GO?WqAOgy?}U$RIY4nE|`wG8$GkX}MCVUg;Is?_mh z2j>Du!$fEY0OSDGE_T{lGPe=pho%Qd2?5^3Wqy#T^=WXA+*9rRFV@1U-|%KiDp1^` zI?k+lXiD?TnZTdV2E$*{tiQ+4Ket}ANw&d_fW>qYbbRr9kc&IaR!#j3N|2wo{NWbN zB|yL5uaiaoC+hu#slOHe`bY2ke+h~Gzq>JW!Pejg^sI`h5-caSeIMa65|-nPr1Kw5 zpvdaLB{yGT;>W&u$o8E=+@e_wy)w2k=+}->+ITA<|A_mptQ92dHb~3tkdC;1*%e|H zVbgb0!WQ=7?kXu%ke-SsCu>}=&$Co~Kwx9mw$P-sQ{M6Ep#$?Cf2U*8B z*h(32W3xUKhXouZpBO_fbP}Bkv$jJb3G!D-+2Jd8OpuWVd1Kme`Qk}FZ_%4aC+i%v zdd-vY=q^+owUn8Avww8qV&!>@nD{UO>!lm{tcHrjtyiro%s)=Nl3O-!$VR|B`Y71( zmbyLwrir@U0BzHZ;=0+RhfpPDk4k2XdUrn@F}^FJhP|-zq6S4b1)9!U_b-cc#o%3} zS!>zSvj=c(h7Fap)Cz%)OqtY5GXKF9oB~w47E^~aXq>^- z07MIRIkM09D6xPTit>lAH26|`e=s+UKNzB$x`O8K6sU~}aULP39;3Cu)>wpNdsOD9 zMjvMi{`<}U^rJy`cm9cqUIPbp54a)LmP>1#+GoR(9e@@^#pWA9@q+Y_KtIKSEM>RT zeL<)JAJ#lMa5 z?=2zukj|ci$TJ||2##f$cX!nww%i#hd10EQdf8SJ9oL4PG(1>v**0)m`MV?29Zb== z0G(pS>m(NFJyxI86Mn;OMQ!$2Z+9W-mQe}4A$#}1V8`s_jY)NZO?U1;&Xy0=TypYr zZa@Vg1RxiP$AP^UCXZDh>kVYPH>NXnB9%2vD3+DD(-U4g7N9Wf%oq|ZI2d^d#zLt0SrYqrWFXm;&356$ z#X<_ACHX-#^0&*AZZH*7PNI$WBnKdH!gU13|ZJ^7R=e(}mG=-AE@VPx5 zm7`~dsXU!EClmm{NNA9VkM||(&U;rcoxNwQT%T=xmbf>jZ|ffAv_9}?`jiZkqJM2Y zS*WiHFe!-3MhTRzpXqWnVa25o|1bU&W3#}_PUa4k0^400Q;mV5ihRxG5~NHr(9WmG zchd4^_?zArTh9zUp1Zkfr^dAg@q0)$0%&j)w%6kZGs!Ucb3IJ0)Yrj-xjrrlI5D=P zV+-2x@JpfdH5@+9%sw8Uzw6Nn?gj-yGEC`|PDt4O) zP2yCEe&IW+SgrT$vlopkvHT%tQ+!LR&t?y`f=_>xaR05x92>X2xe|X=Sx8K7 zJNA^#m_wN_P>|hWP6a%>g5v5=*Gf6@m2xwk*hd(#2jbWoo%qg8dBvH@Z7sg;*|#^} z+aJ9_Za;UnPnnG4g-yntgT8x)IvBi*INR}k=tPbh<=fV1f4}au6sD3F)%J>_an7Oh zH*v}(D@K5X;DXuPaijdqSj0#C;)t=poV&-ROAIriw_9Z-Z(2VI5}?|NvHPKF(MnaA z%(RBb$nEgXzD9Rr5O~5iMYH3`kim?hZ)T>|zMh5d?;H27coRw*myRl~+Gz!)uPldE z*MF=GO-||sx%t`1e~H@F|7UXd9|fU*bnG8IFvU+SF?)MFAmH1nvUCr5?S$aP06k8^ zq8Q^_g$VoIN5;8aR<7zEex(KhB!nnVtc>Rvs_(sS-?lmumyDmd&0V+sOTam>56H2c zuRoHdjbrQeuPC2y+);oN!M(e%9gv95Uz00twMR<@d{2mpcXq+Oukh2bGz#2P_1~v0 z7eUpkj_`2MCY)N%_ZE&gS=i?Z90g7#G9_ud$w+yjabq$r{;mbTyOBlDcQdtG=H{7F z&gX;Tk5#9ymH33qqt0F1VYt#`f3(g)=8OCGZcXQy z#iuC|<82$$v5EQ7A2o;*tuV2bA z@_mbAWH(eXTHWS7uYu2#Y3x$kPQr5_3HB8KQzmy!Ds1Vxedu7koB!+2gpsFT=3GXg z)%zV@*|1HfZjT#?Vrl>ZbMEEkzfc3_Cpl9^(gQ4E5E%2t6mXvJfM1XieDJY=LlJgv z=0Y>?ZP%8WhIL`q+QdW=RZn-?7Y4KMavmT(@IbtH%^XHymbkeR5pc=Y5MV|g$~D95`MMOB ziic)SCrFgGel@so{Og4yk)L*&NVmirqSw4!4?8lq(49jvz?aIw^x-yT z36RJ*oT%)o@kxekJoA1G`)}5(xwv+od3C=r&%|*=IFW9GmBzFn3lthqj|ifeU1+NXYhXR7 zwiONrmZIV*>1{@@UTkjMc92foH{h=4b^Oy#vsfh_T~*{+10GBZu8#2ErviBfDob%~D@gu3jDiuv7LFWxRuSKtcVr2$dv5{AkFP(%|% z)xQKK=!EO~lZwR8Ixc8@vBe}$IKKRlNDudB)Y zZB4^p&heLkL$37TGtLTx#z-ZrzhH zTF`asYFPUF`|jiOal9?E7b5R>FI+@Y53x3}*Kl$?G%BERql}5rFmFEPbTrneG!SXq zRjvEz;kdz_%_sJMM9*WWfnG+c)^4`W&CmyD%6=xN?iw=J32@`|nX&pG4U{-JVotI8C@l1EfA@N*1ZYu?n>=trTnCr&&hZXfIe2 zNn?&2b$)sF@jl#6szcABrl?}ZdtNee6{Z%+gW1Myks?w&lP}mek=!vdCr4*SzmZ#C zH23)W&Ag9^S&IJrHP`DEsFMWU3E0F)pafoKiNfvY7_zxzxvI9iKXm8$@c2BEFT_#B zDGAD(?_Iv3grTkCu@VlPHQE+4SbZBu$AzZQu^|P-^>)L<4vnub)Lt)^@R0xb%+rKs`eV?t%-Sc9Ah_rF~L6h@= zd{$ZX6c7h!KO|{0#SvY_UWTd<5^j71qQ=O9y!Hm!-9T2j!>$c8e5!~sopWC86{^Ku4^78yP+#V zE!)*X$Ry`NeHF_+_Uk9)MC$E%F1?rtVon3C0A)^ErI-sr+F-rgIqr6P1A*t``--AN z=Nx9LNNFj{4hL5~rW@kZtH-Tv-Ac8Ux+X9tZvnTy|LT9Avi_G!+i%}17CH`Xfp1qE zND~;?NVSwMjU=T})3TP5p&J&5*FW`j9I!sTyI7eXI$o4Q7J*eDsw((?&Xfs%FHe>} zT8bk*PK%_xOutajv9V>0YT(3TJwjZM*}y|iy?;<_w&BU4=Qo4n-W!0uKIi~YkawnL zvQDBGeyU~Ps&SJ+phDOPdYoCQJ>G51?T`o#b2eI*d-LS04ILvE4Q?wAdAF-aH_{tx zY3p1uymYfj9fzpY!K0TiJA0JvC@v~Kyv;H-I{Z<_nR28W$-= zq=JXan$$0H42gSeBlYW`cX<%e^w6)gf*Z^}xTM|WcJrXtG0DL&j<%)Ex{n4fVd*{3 zVI!6_ca-?Sm)bh!R)HeD&TGnbphHM(eiHJqlLPQ9(RijEc6k9H%)~{)C_OemOsa=DY`0CQ`RKMHjpu7-j z3rbluirl(FphWzG$Bgq!X%WEwC zkV2^27!oyw2Qn|Kp=vaN^m1^ta+DXY9~ZVG0X7w>X4nN|&^cG&_6(7-VM3O#n#+z@ z#8Rr$O0-uIR%E7I|HJfw?n8?@Q;0v=SCXID^I>gL%YS?2zyEHjQ+w}L zi2cDO{q~|I4mrW@1+J~wAT6XcHb^+Yqf@UDOZ#%fs}{Xm0x-KuK_Bgu_Xj8aAOeV`bt6#H1O(#LCd&W zSi;T1Jp8z+S6xw}kSedd>e?pNN82{*td07?15k_uSf(Q^$C3%y*a1011H{9@wUwfD z%4%slTa&$a%ij}5W6w)}i#1+;9&P6Dk&rQ+DEr(!)#YjD`GNCwkL`x4g0xe^_la%V zWVrq<5BSHs8s8Uoq2R4)qRGWesX0O6iaFsw^Y=r)#?#nnfm`v(8Nk&r=A5;qCq0uv zDs&uRAOBl2-QWD4axRT~(i+F{12E5&^^5C&jmX`TtPjqU&c7T4RnHLTpAdP#I4FG~ zEYfmp=J`;4G`1{25bZIei*RCdl>y>&r5@UTcGbUOkG}`t^0t9>Ap2r7R|NT~**1OUg|Y?${U{cex2Y4H1nDEo0M`3FDL>c`)aq6bH}I2` zzh4RGVFP62qq_uB1h6jf{AspbIY)pw2!@C`dlSTsV$~9sMgYPEfgb>^ZU&CKV;U@H zH6VLVK>Ko8r2vnz2-WE$=|(?zR+2;fI3(3Fg?j^sR}pb zG3lV(WzJ}SK%u2P-q7kDJ)cZDX@t9Lu9{bhIm+xnW(_JYzwAVk-XQ5V(16B$)?G~7 zif0{BsJd=+53GM@!F-#G0c%UPQ@K)~@bEn4^|}L8J@NH6`m;~HGlb=HT+&}Pj(S@f8UCx}Q62xzqyFy@ zLyjDYYONAuH)FuVgKmW#gt+S^7`pbHScvp^Fe)#EL|uQUE5M2IYD|x1cV5TTH92#n zq*Sn%{H*Kk#*-(`0c2bzs=%b)bEu2?yadc#B)91eZ=YtuSu+wkJbaJ7zT(O9EVNfMeXkRpd0VWoG>^PDZ`11g zFTau;5g9x#mX=5Lv^It1j@XMF$&_2na^YDMZ6dZzU|j)TUIb^<;E-(roSj)XS&NO3 zQr3=^;cS;XNQ`ssoR! zPDX?w=K(~*#|#X(t&lkp)i}|;7Kc;N86EfDjR>vzK-ivlX2MwEUWq-ivj?Ni?IL_Y zRO5&ENv}W9`N?guiG2H zaqP{4OiK4ePr&Ul|1DVo`V6Obc59`0gi_gy3hFbfxD7dP>o_&7B*%T2mQifYoiEv~ zX~<-iU%|?M8wQUB3WIq0pMd}KhlcO}xB$H?a75iJ8T!Mp+vA?%s3F(8?y-Ie5;nJT zHXaKuD|`G_ziIWogKzMS$S52gShjWhmx=luEin;ZAef(Erec$^!LkxooF|@WIx#I zSMkp9e@Q*-EHkdTs}t7|BDS2uIs}1PKzc?fyxQqXTx+0#zD~1z;W1*sq8;TojO#%eZDy@P9l?z`o4$^Y!0J0Q{xq)~}DVg+L|hIy51E!JHEYY6`SPznAT6{n`oNHD z(Atc~HQxQ)`mOy9I@pp!t#(C5T~?Veh90T1S2qxjx};X-ZL~bOe?ZpFx0`$9XCox) z6)y}bc>IDcI5CWxn`s)_u69sAWaMktq4~}&W{x3;my%wr5<7hD3eQ&j@DHAT7I%Fk zOAIqCt5pg+)6G#I%{SQ{TgZFycCg~fLOp`$;_HNRW+>`Dxn;AhZ`_1xGKO3W_i;B^ z(NUpnKPUG$u3zk2H}o!r;{}^!?z_zA&cD=_oS|))ULc?H zNKg08$O(pEO%ZBPDkSZE^J95w6`b-RluGo_n~(f@xTDrLke+G-WoJ0qKFI#k#xtdm ztV^%eb&01$$7go+ok=}pfig{&qF-mo`}F{+zqyjk)=?)%6Dm}4z*-gl@TXwz@2x>c{XxN#sU;{ zu9!_`Zb&l*h3P~VA}F3LAl6WQ#>f3O^F2eh*ozJ{;7{LozFxdZci2Q~sBW|Tsrqjh z^WaEebKSI5Auu@MnjC-?H?$Z1LM#mJWM&p{z^~WxHMcfZuCQ>Yh8s zALHAmsLAJzZ56Mb336LJ$~*^6uS z%ao-t(>TEp={YigsdiSm)pZdP77y=;3eI7gtdPOM=55B za7T5t0EW_(11LK#{+>Lr`>sxp0*>1$x?2nXU`p&L(C4InbF~lqXd7`T07G${8!wf! zy4Y)Klp=o4Ae)iE7Ej}@TOgV7Hzdvx#>>D%=E0V8p+^ZtQUT-Rb_0n{XN#laPrWs0 z-{CBIjOS`pv&a;o#ykvzfII0QYH`=Jp2YX3$99U(&f~f3TUm$D zUE`>#3MBIh%#%7q=C3Lv!lwdupd+52&#Ce+lZzC8II~8HF)3snsQlgPWo~}(rEPcg zl_vSLKURcDID6kUrv$Ukz#_~cP9j$l(*p@dYv83_!!SD(ZrfdS-{W=Tt5@ZyPpm|v z^kZ^3OPuf=(6WXJ^j2b_1XUSSjyipvzUZwRhuLmpiss7VA>$wQ7S=$EZKL*CRV>PP zj+RYxH&%HfP~?Pea%#BJ+Fv&GzsJx29kSs+38ve!yHG{fxGS^gyC1orpfD;U(e;_U zS3~fw=g~YCI&oE^tL|2+b3T9q8vz7Z>@7=GrVM^2o>37RLi#AolveQ6wIuj(1?`U4 zn(c2}rS!r^&dahDb12{rn62fQ4nk3AU7#KF3iwUU_1uNRSMhs3FeDmet{)hc?2^36 zn7#*nGM<1QyxH%}|E&F)%Fj%Uev)_}w+6GliDeStMyP~a8Lz&V4-ur{@`#p?Uz@bw z-~S-|()Fcreq?ghxz%O+;5|KkSDkd;?iL4yX};~?qww6}&SxMsB8D7<)&vOq)yZnY zNm~Q0T=yvhjc}_1ZuXI2(D>j}gni6;~dY7~+eQ|!o-o{BpI->^2MzS)n z2+54My4QA9YUd)ncZpo~tkV?7d=~3ARCXm?Iaj{Gc*lRIy7;;@CnCSWBxU+^>OBKp zP6|ZAA@cdDcAiRLKm`MI$L+!rTq~b;Ug{Qw^8Gz0>=oFnEm1;7 z;iy~2q1Ds$fF=@dE0QfgDslPcvX=>x=6;qW+FNxUD z{s2*ctgX-;CsyG%PgknY+upvQYN{xWsk(oaY4|F?)*&)fC^Kl4BtFXX#tk3ctpydQ zf3iXxU0y%Dy4ghDV_);9s_Z@6HW~{6i!e}JtX2hboCau?1t%RM#meqSuF<>g9WYDV zF!q{$t#tO2X)7}RLGfV?1s!UY$ZFClEdQV(@JiKRAVx1}+j3BaB55 zJZS5St3|NA$&U*Br&o2d4Z(J*vS|0^&c!{m6CSGkBWi_AF4^YEc-IG=Bflh=ztYsO z&;7N_g20B}zp)V`QU#!tnOsXJ@JQ$%BO^F^BQ~@oNP<8=hJ$yRKY)O=`#D5pDWSnL z;+4n?3x;#}6&=(A_GQ1cX-mOCtMPq~g%2K_b1!>|Ew4-D+yu$SCbmAvE222bNcz|e zl5B`5J!>=O$=1e>(Ky>Ye+trsidg>OkwT8uK_bu;8FCR}Yv0Af9sG0gUc`+c{{eUS z0WZM3gxE3nk9$&BI0}}!v7wzhY8QxW-!#U?t9x+LWlHKXYEbPXF3_{=Y~Xev;G5qO zj2sA@pn4;BBW!9|$*=)%7grqi0_OQ^yCxrBA1{WqMYQDh5YzJL`g_(!Yhuwn7W^Ne zYIRVujKlOg>|iKRUa+jD_OmrO=?2p5R|@pV$YZ|eNcR$>pS7KM)n?IrD*FtPUl1QF|w8jMLH~hVFWfHW`wiu->wra_79^nM7KoN3Mh6yj@|Hh-aG9$iYqA$2l#L>Qq)X z^L(s6f-C67@<(6!iY>UVaHnkssX7%<4RimwmqNV1lV9U-r?B;mVKlw?aDmv{Y?hQ=E=VrvFqE zBt1sGpQIoe>XvZlm|zmBUUqNA`5nPleb-LB`m)aJpxNRzb@gASQ`N1#~?qp6zs;j4rekcUo>UTc}k$)uXCSscZyYeXB_*e zn{rr|9TMpS(ce~TfOmTbh%(x#J!1q|Q8p{EFnW*w90IMP~@q8m>} zKU;Ws#Jc22iTiLS#SGl@8rYn*8D6r0vq5Gfi7u`m-PxU!U=|yA+saj?KF7Uequ~qd za9rtC)h+Pqmx&yuRIIb~f}I87C<#BH#$89Iy8*45Ofn5@1RDxf&H-KW2;`vVE<)_? zpo0qzS9Y9@5|GBh&p(PS<%(Jx?@`_X`md)%nngkx!|)dF1`T@a_s}^icYA)#eyX^YiJ+BNQ|S^w{WmH<~oXglTd9hOMLF#9)eLSeIXM&{%usFy*s5IIu*a5 z-NTxe4ME%oC%*U^@Yw#r6>a{}y~F^d@Di{uk=;f|KJ*dU3OB3>tDpGdo>5*=UgEXw zAd|kwPyUYf`Zvk3<~-|pz8R>n?O<2B^jKaYBa|##>^jyiguc^5)08p(UU$>3R&wGQ z_uhsRRXnkLkg|gU;FEz6m5<8U(ZfEMm{~*X9~`yt<{;FZ@BxQyhr;rjA2SCX9VZ@L z`c%<$D128>xfggl4JL~;V8ie9=^!Z~B$ZRe!R-C}_9LTxl+P)1vfd~lruM%xIHpp2 z_tsYo#m>&|yq$@YlT@vcVt%6(&)Y|7tIlo!kvcz*O?FV#!i5a8Qn|!jJO6y`_J@!V zMm9i_F8iEL7&C})9wTm;>J->pFe~s>5N&bitOdV*yVc!3A0sj9w);lfaxsthd~6i0 z`i~MudFU}tGKLSGFdn~D1*Sh-Yf{O0`c!gc>aoMIrH%(fW!~H8qHK)h zi-d5t7YVz{1)|5nvOCv&Ve92|vlARtu-P z%B^!_T3Y6-L%O|F&|#+el| zq6;&pH_u8~_Pb68`iOn7A8Ee%*ihc-Ky%|N$5Z(fMcy~5pg%t0IXKHyJ6w0;UH+yM!gmad%bG-7GoLSn|7v zEp)y|dc6_vnIZJZD-#mk^9nD^6_TfTS}$H4<0>XS!_+#YqrI7mv=T-V)1eHwZcgS+ zGNalQ_SYRBtJPOe4=|Rono}~1Oc;)|;CULp!}s?6ly^;i9>YFsFvtAVllfO~-Td@X zLNVENJ~kAqO?nTdUZ*>8cRgpBx#!ZX;gf!j^ab!XngHy>o0Xw zNVD+;%31qDBlfNA=ETr_bfu-QN6CLYJN@q$!3YurAhvoK)rn)O5U81X*MRD+*bs&D z%;;Xm!@F6tRP8Szxgv{|=rV=c(oVgatoH{pbT2(#mO9DL^Mmj&1je|MSTg9m(=NlK zxqO@$_6`;=w&jGsFfcmnB8f9`9R@Gx3noY=wk=z3(Lo5ce%5RHI6pmQ&LVZEVS4?P z>{-sB7BwGpEYW8$O#bhBdU;(K1|L~c8LV;qtAw49mo0PI$k$?4sI(~j&3#kpgXK1- zq`6QU>&M@L8XVx@1z?>KYjlEjr$;Jmzzkgc6?%?$H}MDw|} zShwr@FH*H-Yeei}r@5s6k7R!-zyN|$%~h-7{p`5njW0a`@+-ktx#p4=NesQ72>+4N~VE*7n(KWzww~ z-;&yZb5#^LHo4@6U><}mcTRXnD%+qB-?KXB3BRz9PPsG!R! zbvnhVX3Mc;m`-hNj?H6hhRb3|*_38c<&;~Vt*S|>2a@}g29qD@cZ3Dq2|LBwLjay= zFE!BjIJKaDgqzgbH1DXs_8B4KFBmD)krVdn2LJfYd&o+^mDTt1?Jm&wBX8tK+FN5) zOMpRius+p{WfrW?xX8(ALa;2@rvt8Qx60nxdfeHi;Gcr~{Tjh2Nb?kWIOugKke}(q z*bDHKAdzeWL@<+M#8=PHGGSz@;LKceE2p0-_#@5sgAI4kJDBvf+)#k~qJ&1^^21r` z%vFXzCq@?*O^~D9VW!jToQ5%q%=bMX(zW$IpB2(fmN{?Fz40C9Yo8{M+&GQ5uipIe z{h>`lI%v23CwSWbh+_7?{O|uQ&g}n~^5egI@|MF2Xjtu#Wad+Vrt%md5rHJOnt81F zLJ?bv>7!YmPfxmDh9$hl4y?olVk1B_t>R0tpXZm zh~+{VYgcC)DboHxtS-s?4(PMq@acZPZXKX(-{0eSaF70XGys2XXLAz$lg1x2JO}Y& zdNZTw3lEgCyJBnVrMr)p;Sf-rmAR?PzRb-_If1I>OS|QW7nkq2+M`R%7D+;&ca9OD z%|$(C%ir3W`DQ+lPP{rXx&QC^?(`e?{JeKzJG=pDUhF@T=CQ^(!c0Cs>Lc)shMYnp zIl6TxSAS?NcPA_|G0p82Gj{oWT@*Fr>eB~4wV(Hdk$Jv^BF71CGI(`v4~%AO#uC29 zTyBSo;-N^qKw3NFar*{>cr7Er&+GpC2Uw(6`PXwr-%p=VoP+QpbeVh%Bt3{K2!8A~ zrx+fAQ3b~U9K$F_%7ZW?ol1n&#@w@zL{*#W@2Z-ctdmQsh26m3)gDLAXfL%>svFf4l_M8I^th@O}qH3i+e66KEynnh0sjtx+I=5b$z}*^0DG zigyedC!rtfwd}3AjsyKEB_Ak32o}{(VuG54$+xBUp;|kA#$~PjP za7-lxTW7imyk0|ez07v7&mE}FLGUvZe|9S9WSUBLmwVK~eKU`EY&9MUDRNX4xTK?I zh0oX9x=Kr9oM4u*U<=S{{VqZr*SLb7SVuQAPrw20iA~;Q%klK0ZUI7umbdS+%y!Q7 zS(le1tEeTAd8@5VVkck#)0i~8fY8_RU1}?g%HmETX*O7L^bLNH!uiw^s%h$;H}-n% zk-zjfIHT-cyXDKsTLh)FNX0=sC1$L)$ACC!e8oUoM}Az;n5pb_<3}} z2P58i`XE6LMh++bjB*7xhW4PJ@8yf}??Zx0|C74@d)K?(ab9OTG=1Y$dhXMRE$~wQ zofl(a$re-Tydgo?ZxM;diuybK`aXcds|Lqiiv4|Rbe`d%SeIc84xmt$G^wyLqqO+oEh4zB6L(9qF>+62q=HQwPtQ-j6I*MzG3a=22iRMul7RQh zM~rHY^#K~C_hZJug8E_j{;OSXai8vPpT!t;q)Y$Su=2j(x^pRY$<7kTMsh0g+tIr? zSy9vn6vNFL#5N;)DX|;#VrAXgB-_*zGZ}h!2mU$F$!lYGeHwe9=GM{re2k# zaEaGXTp;**{Ja)@Iv-2B-*KGX$lrl!1+&xT9@Pbe-#U0rjxQ!`t98->%l?7tNdK3`IUa zp6?!oY{q?|n91-xynos6(-)X70VRV`wPfAIbm8|8vKNPen>*thqbo(wi9Mxs6;%+)S29X;q;vFDeQ z=&XZ@@YLIi9z5}n(^U$2WX~}+&cu@@W*LJOOzQ0Oq)3GWOs`p7R5n`@i?V8h=XHA_ zbpiV_m%k6^z7~YmKM800u?seWsz_^AM-Vit;BpV1CwFHhuQKe6bIVf!~j*@ zC|fgg!_0V@%ZEREyl|1>d-Nlh7v|AFDMG^Pe2;l4qvi!^e{f0L0n zr^_wD-s!PFX?*_7Td=I2gl-4bza|kDGPs0sYj_R!1S@4^JagUd0^Ot>uRO30+XYkx z7b-jl&s+Bf_;B(Ctp>{nOK;!Uxzi^t$#76L5a!-w$_mExZD}t>sZhHL7t)Xjk^H*w z@89MJcD~RUDySV2FOwWgwA^NXyYZBgTRgjl0`R07bxUnzs^9sB23mij7FxJ?S~*uF zaptS5iA=_WEL-h^v}BdjlTWLWFJ2SN)o1&O8;tXG=n{aUY_4IJAmH(=eJtJ^5WpiU}O$SHHaXVWMLN{{F6s&dIqK2Dv! zdSa9D=!^OYmRv?Gcmk8c-f7Av@DUFW1bZ47}JzlT(v*J6t1ZrrYnh z!neo%fRPhOb%+jB^y3aF?IMt^xP2vVw!g(ZmbkDq=R!9LKjmb6SBSHG^js;~-&bTu ztjhR0a}jLSwoF&L;*5FM6Cgka7Ej@<56n!OXwB66i}Q9YRI9gNvBwH^p+v?b|C1sA z?R@1XHbyRIU#~yPA~5h5*6y-HJogI;ijn3HleV!BgE-0%WHTe!u7yqu4gTz%D@B(A ztzNxM?etAm_Dw8+A8gLXqp1RvrjS37{bzvGJRNI5MaI&X*^l;e8#~y)P6bx~6FBC< zSO+dHR|vz4J~6`8X1k8vW9!W%Y?~^t0se|M!p%+AQ_r?u?BLB%g6!Jw03L-c4>~Q| zbM*HJ3pR@{}2#@%bueB0dAb7D%y{b{;!W&ZqS7`eG2w5XjFL#h~C>YqKsLWMUy zpeMl*q8{-+yI%{(Yb)LV%twmzb4{jM*$oSD>W4;jpG*6f9WpN{tJJvE0$HIX>PlZR zciwS@7o5f=zTdrCG~z1lJN*Zu{dK-=hLiU&7fgP-xD<^VSnF7%<&gE|D{rCeZMR7y9Fxu{u@9Cqd~n5yl- zmT?k?{aFRr85T)(H^6_n0is6&bW!dPBo~{+hHOP;F{8j!m||Nru|Mk)2(+gl4Ws}R zX!I)rIfJk|*-2uaLx5;VupTfLNt;VRmuDcKI!0IpulI7k%?HL7!_9WJT|kkSJLR=I@CRsJ(Pc^%!~To?{G_I1 zEvjvYI1L-z5Ey68ofWXmW?)&r2DlZ3%?;)s$VU+n&TcGwZ$4MR z15o>z;Lh>#o+J^ht1z@H!wu1>pIun;mU(w;(XMHKuJTEh6^D%@ht-8Vx0PGpY;3Ii zRrD{zhnwvGZvym>A%%_V|ACkQ#1oTeUE~;DIul4?bkg1}Ot#FPt}#ZC0L6N8e_ikQmzhXwe~vj=qwm1>)6UfMclrPQAl z^|nh%-#_*~^gl-)Npm1?L>L(YS^wSkbd^Z1_|=Zr^FSqs{~9pHtPPAkSl2v4)r*UB z|4r0u-JHH0V(p^hm)2WhT#xZtkHz<=lE(6mmPIgpe$=myEoFNA)&EcdTV6A;tv97( zRxFuFN?crX2)VpIxV>cJ%MYiQyS{rHu`F*7#Q!ii43it-`>hGEY_v;+ab7Zj@1PhD z8<BjN1(ke(_RM?`%E0?B3+)uZb*uH1V#9L(Y=F!-mr(DalYnepL zr4eB3S@Bt7vzHDBUdII#67%rS)+8d%GI5w`*{UEhs+(Xw?Nzt!$J)h%6OyDXOhEUi zt^!PgjqR*Y{;DB^$VtN}1&_@-w81A887{HifVgGSY^@&s`}=YF0$!p1uGDegb2^ZC zYo!$77gb`(yULcCsvApF^vAQyt;+(zRSX}bH0BW6fUcPoy|#1ss{MG=nKyJp>F)<_ z9B(1+^jVC@#0*%cAb&`Kj(*r5h?^i`0>(H^gSQCZKON9)@cA23rzN~{A;^2kxpwY~ z5HG&ND_^aoBQ(Zb;_TI{KzAccDWz@i1@-M#!&OJgYHzz$zVe*sdz7;S6o8+@pK}Ed zgI8xqB`1}=H;yo|O5R!QRfaYnd74o@)@11&G~xdK{pOv-sje))ZbT-ZbW#XL7c)eH zV#x(q$r|W5SSqhPJUFEB%?WMJtj;yboSAf>SYI$Oji?{K9e?C0R#=(ms=pRWNu3K%okZ>e6e@1NqAy zp@kvSMf>hAG+ee(y=;H~-A)KkFvJ1=-y4poe+u0V%GCddT?+nt{qP%){>k`3m+uuQ zrGim80%saKac!TIp{zsK9JnvnW4WhFwm1OYH$s>@j0YX|$}0ufd6FaUyFeK1&%blc z4W>F*Ae80Y!4lwp1a6ifHRXTlU`v~hIXQUceb7P>Z+F%8qDY01*XmQIZ}=!*VklNv zavqj;6}AtJ31p$#awj}bqJ?L?X>dwO1-L>;!7o84+?J|Q;_SO@P8_Zu%RGc#Dhpuk z!?bWAOhbx<3&rCep|;6d_f2&b@>(&=ob;?K!+qk3VPmOFGvyA`qOYpsO!L9 zPi^x{>b+QZ(l{KfHpNY6i{pNvA4{+Rkl>md4h7le-3}nuwf9YPQCQkV{GdgScTqaG`)zgE{hW|bL+nnE zBa<6i7~3O9*EGd_%Ri!DQu@c!829N#OZm_xyYgS&06VI=M6L|~c>ThUHu=&x-9uV} zS&zF8MmmsQB%SML;#fjlkkQC;#h8ZV{&vIbr&lk~B>GTWJ&N}x# z<@qf&R?M`B9z$5%x9h$0?fnVcUGm~FS2q9V1k6JYOV+^TA)|mEbd}aOl~Yvy#XI1c zf1m9JymI8wZRwh+vZjHyqjzOOyI0pj+hRF;^P&T4}rxc=L`}Z;8hJ9#R~smG1-ZH|I0_qx4oB^p6~EtdolTFRg4%WtQftMSyyCIFcW%B z^*}@Q*|}ObYmYsSA2X!RN(sy6NV`cJIaJ!f(*wn3u%IP!H}??&BY& z_W4*!lyy?SKHGZ-CH^W#Fbj;;HFYobR*c)_@f~SIaYPzj{6UkEJ_SWUda+*wZx#N-wvS}?1u=qWb zXA(y#w%&Tk=Hpi-L(jKMmtN`#EQPSYyy}h+=^w{dW0#6)a*S$PdQmvn*j>Gp=rOkK zch8v|q;-k)qpmEJk2k$fFn!b7V&PnB^Dc=sWAUeZ>;-dWG5v`5`b+&R9cCvs2D)3f zWHNWJvy*WeQua%Jz&ny=NxI27>mZHD6P*JQ7xHm|_f}y5a!bwzYaYBSH+q90g}$0c zn$J-hu|Kh{&f&9f9{5g3JX)>UKmH@ydSxc)$TURy79w~6o_f5dS}<305mu48M5TyK zq@u=I_5+79KT3(dIp!!gnQL$=O|$dbVjs)YPc70-$J=H2#aIv>!3bf(>2>6QKaiz4 zy2^!+Wq!{e}=)@^7xrN-b5;h(~t5?@|KkpvJs1029-(J`DH=O9Ey>EY+9DLm7keyRE z)9iL}tW+=5?n-m>uwv8Tvu51#+%rO&%+F82rg>a0IR%lgduc|hK^`=1-hrl#IOMW# zhY(#?ioeog;Y!L!wH^!p{WQ~kV)M3u)0GwxG(MYK-h z>2qCqo~gGPof?2C`f{MP+^?oC$8l_}1JMIzOSD}chPlFI*TbQNe?Bv{8*`aYq$H-f!6!bRt z0ZG@-v#KnEGR>sZPwVrX{)aBb^)|1G1OIUO_^Zss!v<}I7I0|#4)WET1eONKmgkS+ zhzo4+m+&#*!ZFKj!7?24|3C&^5H%w!`^?$y4;69{)#0&oO@4kpZsY{dl|JI(Tkq@r z{SB{Pb-B&2_?oX!eG|ao!aca%pl1l9Rvvi4ziHUC$* zlHx6&#^kWo>898d!C!X2DdS27miU;Xpne52k|=(#>T-&pXJ0p0fq_W$Z^D+*Kg6*BQ?Sy#|mTg z3bPrtV`GRrHuqCB-@8^>J&pgZ#gEF>`&-yaV|;^;J24FCD(wjZ3n5U+1OVL~w609>L^S&u9w*Kb3PQnOrqn6wBq@*n^_x08-b2J}|7c&JD26om- zPqIT9DJI}8*rgno=V8Z;V_90~TF2jT{wP!?l^r8jl%47)qXwCccZef_oAw4)$PI4s zuo?MAdU`*@fP}r9E8bP)e0JuwUnY-5VXW8q(S-2I)S~S_At^Asvd`pKznh|{ukZZE zyjw6R%*>w*t#Yi79F*^Q{xcvUNFU+~q(vYl>&!Ti^wJ24vkczB0AU@VGd@e=1ATsy zx+1C5bbG*de+F&{5f25u;1*ICSA%BR)*mgkqD7#K*l>Hk>MlCQZ|DaJG(LNRXxxYF z{WN}NUn(Sjp?{*)XED@RLt()01U|zgA`cJa6vMpxCvXgt#sPnkc!T!A;K%LvdJEiT zjZ3q0DuUmZcUqN}wVl7M;G~^Z=RFfo^J!+CT)AouJHp%>LyPu3=T8mVw=Qb><2QW^#Ha3W-IbdT69|res(j-jQ zRG}Q8Yvq?tED-%Xr|pvpoTvNUZdW!bgi`Wuj|iny;$Aoe>eNf?Z5^Qb&5E?aiM%tI zIC?JmcbR1UOs?o8rwERJBAVJM{dy+M)ivOb>wXE>{fUQ%+pau$P_#2+a=WK#qj^py zB3cyN)3+tXd5+`$Tvlgr38MP};qLjGHcKDD>7RBeO2Ia3o8`*0k8Nq=vI(A~30}s1 zPMwL^G|{;^w(cB^1I(kb5bhjX+K`OQwo+ zUOP{~^g0ibhczb^D!wEZt&|f_Y*mlcS=i=Fq6Nv=IH&+dnb;LV8|#pTMc+!^R#u2G z4djQdcN3cpS?YG61ml5Se$16vkxIgnzrmOXH`?WGuY0eO({oY+j-NG#Rs}j?rxZ?`t^|6<@m~Cg(R}P!SyqI z-h9vfPdl||Ep#yDdFCynu;d)@%XZq#!_=jRnQU=uXDEa#WYr31+UE#0uhvlQwepOe z>@P_u8Y`%Zdpjqjm?#hmpi$6WTuHP%?YqftrsYhTdMnZV{iJY4ikn1wlgofhgbid1 z=-LEZjvY*l~cJ)=W2UI&gld2FFipBUb_3w!YW zp6ko*K%-RT#M`_NeHZ_);@tn-Z1SH)&i~&Co8x}kaRnPOn4@TWP6q7ecsCLijS%IF zUVj$&JMp?=t4&S21^W$54hb@r@=*yjxBcD%5KQB_Y@F*9;A zF6Gy?;WnoBU~QXOcIoXiQ+X3x)j*snBUXI@e&WB7J=KqITpiTyeeio-?yVQ4=K7FP z;@gVyV;L$D((7ev+#p@p1ac;mdIc;?;hfiK6?%{4m=W~Ne2z5!d0T3gW&~%}v1_00 zw|6?5Ij4WG4AZ24FYGuohrBARxV6UspQ<}NDm;Afi@C$ujU>(V$jg!Q;RZGZo<#e` zTMET_&K(bssy)`cF^adAdXtfxd7N^zDJ{;9(^O3uFQFubp|8wh9~1c5(83$@IXh`N zFUD$zZuGG}6yo8>k10vgAt~F&r!Adj*9qfQ*P1|Y&m*`J+9DqM$sWXl_2%jg zm3v36)$%|2yiiNsMGjmm6ASXRJu7);a@T<`7UH-|p+^z@+U&hS3@HwnvEGQVGFA{~ z;+h)zG+1{!gw3wJ__kpCsrSv*3DN~53VCPG`BMju$cPt?T7)*;lla1IY0!8GEL1ER zbu^aqH-wOOiIK{pFZXX>V zwcV?0tnl#H{vDsyeY_Wy?&*{k?N7R`zT(6%iv_y>CfaeZaT zSwt*%H*m7Yf>OI+u-XONzNUvpu$j1$gdL{Cl!``;v2CASPvux8cSyk=MCjZ1Fy`;i z!HP<>#e%W6gxqsOxU%0`WmbF5_Py(uQB?GOoIX2jyJ&LYtC67>qhA!^G~U+L?{l!d zDVa~>)WRv;oC}4KPD8d_bBn<5=YGtsa%d;x(0I&0f2@jf=Mo2OdWa?j_jaE_mcYD|_N@dtQ%~)rrr|ImuhzV4wQ_g{#2gt!t0nJZKNj#j-!pcS_%XURDKq}(+m6r>RFK;P znT(#xSv;NJ4@5!F{l`PqcQg-Ed-f^wz${KW*t_3mRGX{F3~39J@;j_^_RaLUPNdxz zqhgV1h#z26iQBc_5aJ|b-EdLRishv!H<4DV9A%?j5GsP`&r?6msC9S{QuL@!IQ7#R zWMiOZl;XALxcc6+GgG$8O*M{>Zyk}p z2C?jM<@?PUSRr)4!VS7wp-lbN4z4P_^nQ{A+p4jOUf!%>w$rKE&9){z$OTf_Hdh1@ zQk9l?@h&8BDz~T2ikkzZH?2#Ve(+J1OSm8}q0dsUboSrotJ9;~aYqz%;7rvuY{lCl zD(pStV^;_C-X?y8krKebYM1vte=z@IY&?he4=)0uc8z?T)SZB# zpHikwMQ*cP{L&bzoc?-x;eJ@Z*Y?!6#=Fdwuxe}^c!w`yTg;8lvao0q`n^ApJ-Q)Y zf*mH}gxbB=CQ9SBX9vC(3ptzFx5@R}*YgnfNuYu+X;@nt`zar2P>j6*nrb|_xyRgH zq~dFszG*Q|Ho?o-!o8X#6pU}X#`Kuo^*zw>R)V&-cXe+O713iENj0(rYP{I0TqYXWr*e!fH)5;}4BYNe3`{VwZrb=duitHH7SI8oKLRike%u`9HxNNt-P>uBb+ z9?qL(X^pzPT{v$laC%~*bWF(9C?Y-ukt0O{=Z(o##=E2?80|c4!m`~UAwDjGj_Qzc zq2Uvs4;|JB#NTRYNcxfRK0z-16VV_{bPn#0rg%Y7E>QR9KD+*Y3W&&qr)JKui7V zZV=21=?pk&N&I2SoBQp|tY%G=9A<=*MIgr`Xcqv=?Xrm~V5Sf*5a@!Rl+&EYHjoC- zd-mQ1h`}2&j(c`}`R0px24SYa$e^2Cji6o$BxvT(W9`eKiL4payJ<6`pS%U>2|)B&G_g<+?Uvlx?#+g6?&jJ3^OV zT`(vZ(iNgAZR|mR>3GK32_ZYI=_6ucv}=DLn?~4iZEmkPcI!Nn(SSFLCM1ZM4fQfo zKA{0dG8K!%jvrtC12NabYGG-X%S;uEdU`K+R~K!J;+52u8^aY&va4?BOq=0&mFcpu zT0bsxKAU2$kADm8#x2y@4V49i>dac<r{j zG9&P+XpM}m>#9h}AB)z6T0&e;4sphABf6ZRot>qgv%@#H5=I@F1wy@CU1lar4Ks-0 z0c6n!^q`sF={p!@ULL;M)qST28TyXpWh$3;x7A?I-v6jGd*WuU8D5=%4o9%}k5Yc;K%GkxVAw%6(z?n+Bb5vChRJ@B zc9IEeJQs7<*LV2mR&4;gKH$ngpocG0;4LU=W~0^D@^$q-XQpw$e&V)iMErTKY0_uI zq|f4?PUnQ(bfJ>sV1h=1^tPGAW?B%%D|)!enBpW;O+7ta0oU1;iG=i+9sbR~Kb%K= zVXSDKfRe{yI~n30z2hu>V%O{#bqvQGY9rB5on`NWcYJr6r8S;niO=OyDk}g&a8rfj#7}?YPVjZ3wC$vqX#DjmpBH{N{U?QE(+HA$fAjN7E8-<|cj-9tx18*in%C2}G z|Nf)W_6IOGoBNvNrdQVB$PQw5%)$XcrCkTWJDU9HMmI>)6Txw9Lq*wlQWq$#V^=em zf>-f@9-4bJbK5>mu2AsPoCx#<`g5>Xhi8$3$G8IKGmdNU7=WeImiXCLp8_Ci18bkG zyMta?oYU~R-X{BILnZZlO4FlQPC6$a>mm}4t?v3*rp@r9M^qCL5!KclHE!qRSs$Y;^VzUa`valsvlP*4oOerr0&lDzD2sa(FbNm?Wl)>7KYXim>+{?> zt1D3%>|Kic^TT-d*(V(-P(7jN?WUa?Sc9jcpS)kWaMn1L4J#ArfNrm&qKd&=bQkL( z*#D^R#_r5^Vqn5dueBaMxbc|jD{Hy4wQZkT#Ee@X`}6f&zoqiRVH~@S;HpjRn(3m7 z&~c1X&a367141y-CMn66eud17>d$PJ{cF;C8lIi!%Q}#F_Hv_Z%dDkbH9v84!;i!KP{1$vFF>SuSL%;^6sM}m@kNm zq)38Wu1Ijcb#5n^PMxo3B^=j{;T^JB`1(6m=ybUbR51VfC5qwH)xADnMp$+)L`>5L32?yLnaG?OTXLm|oZ`H| zi`5JPCKoE05xE67K$EWbJy8D1EIv8!y8X#36WUyH9%vipZ%{nP#ElgW{FPVC6MO` zn-;=doJ8(!kXF_-W0>=VmECm!U+2M&fLzb}M;+wYTw}+j5I^l* zEC1Y#;b5-|m7w{SIKYU=;vYstXcbxz!;Y)~EEEd(C>1#n6J9LX;|U!cdR zmyTx%wEyt>q}V2wK2Gg&c9?o<>vQoV<{lT+>?<}pUm|CJN3q;7oi`MRjP&XH=Z134 zhRcjh_YM7O_>sI+DHswF7Nh7sWv18_e$m8rGL4F@%E@GTEd&v3Ygn#`;FI^fH8V!L zPSI*=+LMdBSCt#;8}GW^MWo!0=WpGYn~iVGOAWvHD+nfSvKQ0&7kq^p?+Zu@M^s>A z*}ME`jdsK3HS&twJ(GqLdKVxhlT8;>&tCb#14>;#pjmjvNKcMBm!Csp47HQ0pk297 zxck8uRP{t+6pjpP1?x(DgN z<=3et_x)IhaHs~`l)R{Eyo5H*i*&6TsqS3eUmtd^qW+3cPx6y;ONp&IbG>uwE4y7* z^=EP;31pmTaMEd7R|iT0t&0+w*AGnfeE22T1-(^>^Vs*eFXCpPb%=*joZ8@yR=j}F z-NvLi1yDK+PY({EOczl4Cl=fBJLw`%^x%r!Uz@JD1-bbYRG*Vz$CW6fPTo0rchzBk zviQw?oON@P3z|O8>{6yFIG22Dq7jympsVf0d`&SlwsvB6hmi z*o{;WcFM{hSiT3}(j{L%;^r$9v)`64{eXZOIcZ2%_5OJ+j2d&1aSVhi1~ay09|tfX z-{^JWWq28np=6kOZLBA{5J&1aZ@msias}aeUHe!#A?K#n!Wlzndq zTXOv-o+edpD9gE%mwRwQ3TB8m=8cB0HR=ha~I3Nk}?4!<6+gBb#0rH@^?{>8erouKdRQ{y0`Cm^zq}p z0L9qV)kCb@EIgC~v&Qe_PJ;>y%md0)UG6|ltj+{x6==8QhRfbDz{a32_*OnI$(8&@ zv2#PJWG9y%zrT}Z<39D|nT+}VI(gO&kbd1Gkh2k0HN9+;G0dT>!OuGeZYT-PkOcA< zvtOjWmWeiZ-^VA@Vt>>06L)VC#ib2asZA|r?LZ%-OM-^=j+ab*dQfLBzuOqf$P{~v zoGamEIkSn=+X1h+zCfF=)ctwjcuYaYAqdsBS#C>|^8!nLjR4k10W{oM=x*38&?($m z-T3SE2kvks6TP9}FSNb!=P>VsRd)T!l&MzLS!v#%1H{la0NPRjGoyq%a)F^eevILm z+iGxrpgySsCXIqsytY+56gYppqkPI*_s(7oH=d{OR=WGKZCP04GO>~xki=9|pr%s)ByxTOnK!rlcQzYE-xDck%47s|>8T z>uYdhbDz&x(Z%*le5IJr02dBps?;s{?K#UbVk>{uwO(ft-_!!C#PFPw==sb#3#Uw_ zV|S{fiq~-jWIJAl3)+ZPNuBB1G(QSoB-@cUI%c58$%!VPc^~o3ik<#lps6jhB%IBvf|eA2EYx%|MJBBrPye!R2A$OlooR3L{G z61$41(FcGZZN^uoOgZKYwmL9EDjIzl*IDdyZ)u>x=#H*$$3aoL^h{dc_oEbOT6w{;yikgB2ABiKaNOZTC|S6A5ZUYGHs{uv5L`^Gf|~R~ZZY zijl~6wX-GAT3rW99s5KSrHi95_E%sj!Vvs{(kRe4=voM(nvW~K%$1n?11YJ&zLbX% z1vlZuY;~Z~u+NO{!c9Ul!T{hO6jEE0m_8yMNn6G^A}CV|fP6&&HbNwPdCW4!9%k8@ zW7L7(5=H|%K*a#LB|8Gs28eSUU{?HrSk|ykVP`G^?Iz2uMu*L>K$dK>gb* z&a}fMVZRszAp3Nf^j90&!SfHqK!m!3T|~Vg!2}23e{;=$`)mtT#7|Qo0``?R`MA?oYh{V4tR7F@?KIeJ~`xmTQY9+Vk|4FdKv42Cw#|Y=zF$!bZ_CT}hpDy@s zW49iNTYbg-is-=%kIm#hS(k%KV_Ux1S)d@A+d2i{3CRSo!INQJ!Sy8Wxst7ys5z;c zD8H>c{|n#BUkD&PH7+&G9Xcez?gMk|-&{AJE4j?x2Tp#lKM*i?X^2wn$`z0TzZvpp<(6 z+Ye|jShmz6l$~xlyogl|aSVbLo>Sj^{x3hl)+8}c3UaYrurQDbU}}ATXHw*}X+~_T z=jhlVK9l~%2VgT`NLoH2R>4C-!J57W7WG3m>jtDAg;Dwb`P5kMU9e#MtIvQYXApNc zI3MSNawm8|?NQjKT2k>S_Y&j%Z|wP}4y*eA`Xm2pC_^vStwjCbVf&ZE#zx4)$g|A~ zrU5=4G<9dw_{q-f<`~7VR(GC>uzu9#ffuVCjG>tcr13h2h^Vxv2zd4JRIW4Aqqu{j zdj^UzyM4yN^s+DJ(yO4wm<-9%py&}Re9>BjIhvOx3ch&>d#j(Rib*j3xl}XTXzX^m z;sP-;!dEz>$|Zj1YH6Y1kt?Xo&sW?x?n{r6MjqYztBUhaw1+ zfBsz?Y~bdwjKj4GAn^$&{LMRsysfS((WM^O{K_2B(>y%q2bzX^2=4^Ax0^f)Eon~N z-G=n^=i6#YvY6fT*$w+{frRsP6Vejln=x?M)x)<)Ns zR4X=$*X~0ttuXGb`_VfPpNP4Mlu4YfvjpPMryl%305Z@s}pbGryNgeU;O z)D{Tt>d4tQP_Ap$1f@`|TXhb7E>VYTWhb9}8Tc;3VJ~EF>e1n`(RKDZ__;%vqoyn) zObek}yGuI(Z7!SkzRV(5iC?#5lKJRur5V`FM z?{@dX=amvk4R3L6xsMURy5}{PW`h(CVnlGV@cZ3C>yax<^mMp}iqKg0mtIHq-MeZ% z*F3U#exfd+-rUO9ly9!*H|jPMurziY#5Z!1O;p*pIB%-&45D3V%CnaSjSwEUu>;FN5al16mMu*Bg&P4@vxyLGsAK%4}1OWTnOA_=R`UkG?_F z>G%Z{Q(eCUHU6_5T7zg8*~c`yFw|r|^Ss%ND>3AL!}XoVF(<>f2G5>4AL_N3V3lWe zIziPKh;3NU=SdTLx%~AshpuUvA@ZmZywv?6UljY~t-wx0rvwK(b>R;m0@Q3JRQ!^A zKX%T2nK2o=?r#s(iuzq6!8E1^&DK$8Si-+BP|h3t3HB`-u0xv-U*qC3D#TrQ+QNEERit6*6RsW^FJC9%tHnWVsJxZ|1 z5$hOt;ju93^8F1XlkE?uv+&c~>)=)6H@Z=x<6%NiOw+!Nbwhc<&=IPSgIenLPs|$D zZDhk0Tx`>I@^zGR)llip+M}l!44qRcSuSBEI;T^0E7M(FWTDRj1c~a(`--R>^ca~y z3~9xm01`B}SXz$m2Gqhbau?@i}vdS5Mc?NwI1ZhJb!3t80NaK`+dEa*K#`TsILA*(ZpWq+*Dl&Pe-uTJX}w{WMD=c-nk-K~9Qz120bj_*C;jVzzy-n(b>6g@13eIj7Ya>2HzQTl$KRxT-^D7q8q(& zNYO$x{ZaIco|_M59=#)h&_hcwu7jSE-5akLPmHS%WmO*Yoa6VCB`kwp`e}=zEAd?) z3V5ZJ+{WH&H)rt6HJFHAiv3$J;m@PmDwrh@JtP2Jn^d1$g-TYBy-DkfXcWwY->P?v ziv@;2Cjq-lyUwpS7N8Di)$2QEVY)H58<$pZ$Y(ampwFyo$MatQ%I}loqZyU1)5l`7(avF9^#`z_|?oKull> zle;s1U?0G%ZM&$uy0t{_MVIZU5)GCSa6YkkPlL`C7HWL}Y=-;bZ*WB`-q-DG$B#!i zS;{_%hwTnlwr!mD?Q$M-^zHDkTmg=ETO+_fZd)G-0Ufn#kr?Qm044w|$OeS3es7eZ3tfJ5AE&8& zqVrLHHA>YNZweW>ZNh*nU`{yLTj1(OghPAGA5n@HdFe*2hUF_YQMh-FXHGcb1AIC{ z3~Y?2Ws>p@Qc?dD`RI#k6N~y-naXPIRse%AtV8e9WeSX zSn%XZM2?=iqooYapesr14Vxn$5P$CSOeD=lBYyb@+8B~1C7q%7{>&);0-QmeyTXic zUyRhzYV=Fxlff<>^_TN^%jQvW^63^lk{`;Jv>?0tqGe|qrmii_7b)PynRZ}E3RbgK z7JWgbHicB0{qh6+qlkNr-8~nsR?@h&oK`Z&Pu?}0(GZ?AvHb0G_fg1}f{$l&+?Z2% z4(r-X-u1#cA#hzas87#90!Q0$yT?@QP}GmkVa9M`o7GmAZk?}^oEkSA)d3Jw!N^@cT``*mU~1* zfb02uT&OP&UnZ1J)=$OU#|c2}o;$%?53RZIQhX;B^RD@Ia$rwX+L4SvF|_>+ zE03v-63vyx-asVX<6Novu&JA#Q6muCg$bw)OXmurEs?H=wA2-1Tfs3>cKkg*{X#2N z%#eG2M#Guk$HM?T%infdjb^|Was-L=weQ$(%cNzNf#b+>GFXni3)sycx!q^Cj{!&6 zze(%;m(H+uGl1Ju%-9^pUc-&%9Cw_$m1Sr`cO+!egI!u2c1b5KRfMz|Xu#`guJ+37 zUsXA8x;S9-ZqQH8CC*@)GK4OGHg#Ai;G8!a(BrJZO-g7I9SR1d> zy){vVo5AG$;rj#P84FkyI6+f=dJ3J$5cO`x= zd8f1(wPVK)GN(Q~uWETpQ!C5bFdqRat+W9}cd&9y*@iwpklQza4wIuyd0lR z?y!`kZk*g!*9TH8zUy0->>i}-FBkwf@y+_V`s>9_J8E#E?aEUY$LwNVCHdW6D}S#X zvgP1}>_X@FNU8NbpGIh*;BoT~{ za^JKQSLWe+yI|^NQ=F2wC2N8O`4r8xZ>*PM7s0$w20Cmb)S7p)USwq3cmN)#XcnPz zJB@Y)j{4m3@4f2iCq_6g#Ch=2b9^4(#;8f>N=04BKh}C;KmXD8;%xLi(7Q5+t%7#& zUvS;=IO4x2#=qwJ{Xg343^%LC=11NjZU?N2uF9peK0rSLaD*)c#{FapH{E(%lnqs%ip?ZptfGgrrR%tGSw%j1nP zK`FgRB(i~7?c%tD# zxK<QVBsK-jcWSNwU=LP}hI)3cJ6d{oSCcUqnJjT?1-V^(r?}f9sQ0(>6 z+1()^|J-JzH>}V~HeVi_H?JxaU@Zv$Y_@o6>k!k11w19Pu$B15Rmz0;a;x)R`bY<2 zw}p?_TgCS2k6-2njjcFW+hPSNj!vepCe4dblTQV*6)ziP&wRq#R$ja>`p(FOMqq$Z1qB&N z1aIWl@^}M4nuPcVOf5$RN;qJureqI|Dh|D?w00m~7iSc&PuEx!!9dM=Lh5Vi6u1+n~11<<_jyQbV=3!Prx|7b@vO&x8Echj#3*ft3e^ z$Ve6(J!^MdzlOHT5Qk>Aj4>V9%I6~;R3*ru3zR6u0VQ+wx(g}VHdg203~#&rie*lX zu^%6}`G)XFdu?*PEIdEZcl|qaM?V%7iUec%GS}tBIxvkVn31WOQOI_?dTn?AcJItx zV@(ZLrAI#1*04s6u0NXfR-I0Dz7NR%%VaD82`SjNA_O4+cC(8BgU~C>)u9QwT!`sm z_xm7rcu8egC+2v&K2GM93Bvtcia(yd9(qt>5o?eugaUvuxM)C$+7v5uk??LDDmq&m zKSnXBqdB!5Psse%=clF;E}!`3V_Egd zjWHK{R&=8>_=E6do_4I%{N&LALzj}P?ngF#lXR*bN=ozn8L>(dFG7S(1zuRG0ccT# z*B3nL$vu3h#weD}OQ$s5??S+IKe;&$Hp_dJ_)jhH`!h!OW;ou>Fn4)&`c~5J`=$L` zUuw@iLN-=j?0Q~O+Fr-(p44kY#35>sZOB+64RBt$gBx0n4n689v!Y8@Cj?Eg#40%> zgT#wO=?Q*!eoC5(8FncnLeFLX;$d8&Wyb&NgVa)}8ssJ`tH$bqDp`0Nug87$27=G+ z{?0>FqJ_pskz_s_gOLRvQPcG2zd1_4z*&MRT+t5hJ$!p#63|RMvV2AlFWG1otSjl(nL~bJW%|8u=MTkTjgCy`>KgGBBAJGfj{Z9{wIg-!PFc8&6ut z_?9jt9-9?-@PrfR&i0MP-f7*I@XY20@?q z!3n2=W_R=Hpx!G|H_R{RLxLIfKzLT$!PcU!2>bYIFK|5&oi3E6or zyxCqdyoc+Ja#q$kFPm`1IZu_BHA;V97_Qi+fUMYn#!o^fHXjYHs-DXc>epDy&bfUi zP*bPSY|FaFGkH2pEgBq4q0K^2%9JuwBzLN`m=fhjhRYSsCIy(L23CCcT5bA<>e4el zUc6et<~Q*tYa~r_LayK{4MIM*>A@+C6nYeBkA6uSaDeXHq@n^0o(TmKWIP?Wx@GLv zG^P;z?s7Y5)lW&ed=7;fnfYUPD>;#7_76N^aqq$tx!hlOkY3F|U5by+h<@J)^9MdQ?_1dhG@^ z`7A|QQdxIa5R@c8*nM)6A-AC2^l?gNyW1_;9(Ot2^c&Vd$RtPVN$Dk9;>1egt{rNd zyZ_XBUf>UbGesXwH7%~9cd+nm1qa6&!&tOq{drZ``fO+O@T9X!qD;z`-(w{#erXQ{$PL$Jl&m@n#m>d$GK;GTXHK#1T_dNZ`=P>nS|OG-kyV4ecF$ zx}VmUba!DAgRIXK9uG8XO2YgCs}X(}+xkZ@)p%vZ7HZ#;LHm8lx@ebSjWAAejMlV# z^P|h2w^cbQ@4a^TX^?2$QD*M0X}cFyok?$N2bHA2k6Jfyg7Dfs2$1GZG- zM-wKFpTH44&j9E5zHmGV5F==&KqL$$a0~{1NbsZ*!QJ}gtHEx4p4`gLU0g){J}F$e zAq-^RJ6MrM`yS@PnstLE?C%6_1Y+dhXPjB>>dd~qRU~s)E}e)rW<5fZ=z7U`QZ$xk4v&qOK(d}U3Zr5`SQ7c9rPFMSSecpj z`*}<#>@3f`^YgbJSwMVPC*etH-scHgZ9q3Qos1hT-;wpa@q^3WQh^$gUy6#!hy9W@zk(BV2W+PY5;T#vA-ptFCSiLCQ6TzsIga0c z6K3}C=bJRK6UV~{HY(djm@=S^r_x#rjy_^5aWk+6N0ilLp9HV5_6hM2M-F21Ki}eK z6TwSiuVts~0@1LF&eMSt&`3b*N`7IUd@&bSPxkuh5^=dcLN|B)F!j+jc}H&H9^6PC z4aF8xr5PnlwBn_jg@ebvcPi{5Mx*Y|-gtYW*Wr741m^1Vf;+c&sBo=+jb}<^lGm9s zNRocs*JdFmoc`rvAJd}IDzvDZpmdGD;aWaNxj|^((y518A3#w1_ zsolG#MtyiVSY~AWkIs-MY&4(Ta)E{;D@UU(r6M#KmrJ}tGL*u29x3{#E6NcD!j7K; zy@!XNpR$>`n}T=KKX}?58C*E#ma2g7Q=Zrh_AA;8>`ZyqE6@u4w$5;AWWB_?5o27) z=*d0JRdpkk?z6#%Mox0Gs>%K5j|x<@HC!4bWAs2^H{aIL!)w8L#%!X4gNb+92I# z;=x-*0hcgd&^U*U>X9_s_*3G>ShF!aagryC$2>4jH90iR-yBvdF&^joSNm*KUH8d$ zrEZ3sU_8)Z%_CwxbM`~jLQHRN6}*k*Pg+iHG(jEz93!58ygL7tXT)ewk8BgQSjY6* zQ#1jjj300a}EQ*WAt1$k^$r#B?ABz*(m6~>3h4Zo9XLtoH0q>ErCm4Pn zB>JAHogS0?BMQT%d1V!Y8qGwRdQa-gtspMGMXPPiVIrc>?D~IY02OvK-veRiOq8WM z{~28)bng!o;rv}G@T%@FQ&5(GFqJdNNCWDC!{^!E3S+Bm5p%RUEaA-7?#0|2S@Vu3 zvTIPQTpa{ytr{5yF7Loz(13XIw{r^)Bq6Va>UAP2mXc@-Y(5}EKRQEqTU^FD66eph zuQcaRIhlMNzfkGI8c3G(TM_JID&SuexuCvwgfLSgpLvSrG=Ov;TT5@wzwQtJo;5bF zo2HDsWy)>$0|D~ms4*D$H)2u)QYWViz6x$I!S|PjOR{t~4Hq1gB40UuZvKXSAnb^3J7S_n)e#V@bjq_}XVH5-FJumhrwgjsHBYC73zH2Nxr~ipgwL~hjM!bB}@bI>n zS0{L6Mw&84e#BJV$8}S1{06x;z*ucgX37NP$plAl#51%GXpeTmccBHK<0Pyk^U_G8 zOsnB(L**iZAF8m7V==?yN5qi;S(n&` zO8Ute35LXF;f+UQef*T0-C8S;l27vN$g?pJA`I(h7kM2!QZ|cqnP?4ScnBQc5ILB0 zHDOrV!{AB~PbxASUNOrQGng`?T9rb!HqdqwULrWYx5Jo7t$!yjoTPU;gmO3}Xhcx+ zq>@*~M|1R7q?YM13cyVM=D=YHffz}siHsYqXdjUi8vI4vGfxjL^)WqF?Q^*WTX;1( zq>x|eaJ#hVoxo)G3pQW*;f1>CZ@udGKT!W%6SDnD{ZsW9tJ46Xo4eGN9s8XiqcR)M zj2Xd_%8{^N*cd&y+&hdg(|8CYb#T%ImjDQ!sP_}OnjK0>ax==iwBQGEdvGqv+C%Ii zr1&2RI>3(N$M6E=8{-Y+PZniQK;cE>!ER@okmW>=F_P6Il;7>Lt8X_P75tM{)YLRS z+}H4s3;%#AKZo4K9>CMi!orc|xK1a2P=9pp8_IT|+hhk}3rwzi`erd4?abLd0`BOp z+K08@@&ZXB_9seDF#wP%A_3x|;0f8_cFOLU6rh{6WA}`EMA}ylqV2o{D~R8G`CMx> z`!o&i9DaAHh1%Lao3tp7IOSQgww96UVpR0{!$U_$(}CWD4^)->5-YR}IbPh%X=~<& z2Dh*dOl|1VJM>;CgjP%-3RkiC^a~oSxy(CXZWK3eudff5M<3!{87+I}Vt5&Z7eINA zMpi8^+ENf;M8VG<++NnN+snVe4;)3Q$Jk?EPwJL@v^Ut7qV|w?yX8a6hwihEd&|J! zJkro>3KrWeQ9-W>3ovkUGaK5~R6>1RtlRjZ;El>p%lF*E1i$#Xg;gH*&ObtM5zc5h zR|=FdiJc4mbdLxMJ)nHcdW<;$1x-nGG&ng>F40Z1{a<@5ymF9k*Q)W83|Bn$qj9Bf zo6c#}dEo0@67T-U6*g%u3*T8&sE2(A_aZ3YIL51VpQxda?iwk5FPjKcGF8No&zG20F^aUTW z#`P@!hP}06?FV0f(2ADgmI;4e5PmVCU$X*n(j6DZ)@M&z&~SvYqIN_Kmaj30u6F5* zVa6~l=5E#2zPBIZG$i!SJ9et=d?Ne4;^FO*nrd7)76ldxFZP?`CO?2pYfK$_UJN}t zHIV;wS*O@Bz{!E93Ut0IMd~?d0u6_jE@7O$Z}TTyD_cJf>fnjQ1AW(#UT^lDRP-R&S{96)yb>Vx z0b1x-307>~Lr9z&MCoP@9Q4(YupNA85p?%jVr?|{zEf8FkG#y_DC1JV?*bmw0Dz55 z$dxigQBrm?EosQ)DxG1Yy`Gn=kEM4u`g?2&KDm-%U=Xuf!IO6KU~J-B$0n;RYxx4q zuk&|tpa0MdY3EVVlRKf|Vs;;#uTg?bjL%W0edyYx7Cor^RhMFAZ+9mlZZ>1*sr0_3 z;ZgSaBhxpU#u-;wC2SGQH@R(i(8<}+P*vAoO0@oZH_3*}>-AjFo3H#&Y`^;oX<9zJ z9h_&Q_x=il7(*0h!|G1XIM3 zvfhpuMX`70O(BzWB@TVch|Dv)xAJ5JUEc7$oFHI+H00(Dv0VX1ZTNE-dGU7%G(zpE}r6?`8L8LMz|rf+{cKK|6o2C%2j`w=De&t>^pGx*{5{mAD; z9-t1&hCxwueq*3!ZU*LviAJLHn1h#(aneH{A9?TTozO@Sc;9!JH}H72N%-y)7#5Cj;SGKa@e||M6`Hm$xeC(;2OozUR`+MK>s3 zQ7QfLYGm6{IAZ}qZ8<`bokZBL377-TEfLMHqq%^Qr7H=KBggr}&fK|iz%>apW_-=| z3V%MVP0EDRF7fZBbe=p z-aecYskBQdcx=jL%^|+T?M_Wbs?%GY8yD^P7OH>?o?@9S!L#D>-oi_6*WwS`a-6rD z0gmA=j3g+XgpJ-Kv6!@_VP4Mkgy_L&wJG;>M$_Zn0!Yt8c0ZC0%z2Hj!4-FzDZDHg zQkZ6(O7vX>&T&ubuOezvm||RHJX6#Z`HRTOREV~XmgC5cjv&*7_g93%xOXAFch2AL zmq}^#1D28>kne#eVif403bGzEcW)`970&!*3k~L1ZdtvyK>6DC`30y*ZL^AdI&)2b z`}^9n+e00ZYUh3L+;J-ubG`W=I=&6bKmXtARs0wIjQ>XvukVuFPJC%JyEPUR>-_>> zbNkZE+9X$F$!kgPcDBmEJx72QvN1iExqF$h3yPD^CLneg!rp$cFmmk*zp>(4Zu@C^ zX*%uR8ZOxoOs%-NK`_>brm~z|NIcQkQ=in`SDC3ess!J-t{~tyD9KfbX#;(Mz5#4& z9g${(=kmI3OS>~mXx^5yWbcTLL0^9}>aFB9v+V{C7r+p?CAY)H+24U)7Ec6%l!4@t zCR5noh;oa-K6GAMLkPMxa=C`Sqp-x2zoxgUqN<_B?IC5~c<^Z+jU3V69GCCmnbCLq z)~A1SeDUJ%Ao8*$p}{VU9F#y_hRto}G)*0;4V5@sUt{5`!)Q41NIK}6gSLXoWwpex z?V5=6K1j)SQE!HH7+N2~ zzD1igqN04b=I$AOIdvy?^Fhof*2X23+Sb-S1^kO+%$a7O6r*Mf64>+aS#K#7G3;Fp z*qD^5k-AmNs>znKPOIUwjqJc(K`toI&o6zy?lIzn1S$AL5CU-Hr`iy5=#x|KM$(W3 z!*@j2%WeRLGWm=Tz_ysxrUNsYOsW9IelaT5{nl__`l?c)k zIQj-F(@^>q^DN{?x0wqlv1>%X{n#PKr7X99i z4tr?xWlwV?3qWm1&yE#!!Xu5upbh#gxu%2H=m3=Qbr9Jw^lJk1_!W|atFw&kIEw}57 zdceeK$a|9T#w#mn(l9&ZkoUP#`3U`dl7pg6LbX-i#-&< zAOe186;2TAGoBQRn$9#HE-4yS623aWb68>iQ8=d+2Ttbk#cFJDIlW*@Y&~m^fGZNHkl1p*|z#{*F99b%@;^-R0*c{q)q6QNxz^T$L5K z6EPRoC8(}juE8mkOy=(0EzSoZLBHcgNmzHgex`N-*0pT4M59W4{B_9r{0^5RZ3p^q zexP0};vWtLswD@&RPYun6|)T@v|yzVz8GAyb|GXn|@##3cUw%nKzXMR=$MuT}~1nBhDwB_)moE=%N%xv;O zBAfOb+`k{<>!XnNWA9o1n7fKW9}B}(Rl!-POW&GGiNT|`;9=NuR5IFxk`iU4POG90 zeRbFvI^=lztIi0lUM+W7vBHU`Zq}V!zyzXL@rC>$%Z>Ht1jqH@f%-(2J&o za%to3{6$?~S1d}}U(eq`ipvl7)Wj-uNJt*){_e(OaQQJ3!4wMrLbrtuf+4LP2@CBv z0~AyUezCDXz|kO7X?${SwW;LezBNZBpR|rkBdV6q3tTN^1#k0CIoUEyKuZ_kg}19h zx#Z(6T||+Xu%!TN(=p1~r#gruS6((+c^T|7OmH7CA5coYA#`CMn~uxFlZtXVjrL)N z;O~PvG{W#aB{!Pr8nJnUX z6H)k-i3cv&ku6L$>$zUX{WW6JI#K__YGsOz>NBJ^#(~!xtGS8o(iko7fLF|nbYZ12 z{lFuAOlY%p?a7k2qeF~E)zC+b=au$r8N?e$8X`JQ8cTCzkoIhCQS7MNbP6azy|xZv z`ium>Y8Y;Ri}~V!WZ>ch)HJ@&2L|p5eRsjx%<}_h)O2dw!Egz8#U<9*8Y&_SB$1$c zy$)S3!hEVHv`$Eq(??=l*dP0uNT1TNxyyEesp#qzr*T7Do$ZQ}4+ScGBwM{xF0z`i zw-94iWYc0=KI085YtRZ5gL$;Hd9S>ePBx~`-#8%K6dNn;1SY=->^!aXH=6WKPwH$st!epxj*V()}H*VN!uyWZ_Ac|Cro z0-tfQ>-3~_c*_=!@g0)hvI%x$5FppBP(_xxP6Nsp%7S6*Pb(Gw(S%x?36~I$0$Jd)t^XLX0&SErddy+4uZBm*x60J0> zey!n%uS;EXW45*NN`>W<CS$s7q4RH*e6pStZyT#IYbUPZwe|w>1ze zZpcZ$1$otfj@zHNBlE4tu@=D}z1}xddb6a$KYy0NPx^A+8fx&s4!)P;+B%5&f|q4t zf#P~Upx)tKvBLAs$D3)~{u=Gi@7qP(#n(m8_kD_=$f6qd#*rQx`ZdPuJ#UClK8>K+ zP$d|ePzt$^2N^64P)qNO4!c{)65&Vw^8Q>iL3CHB1OT_=6=|veT(>V)8g0bx*4$o>B&Bmf1kWP*Sk+{Nh`}F z?rGiuOrg9Ob*ws!CR%-%y^ksT9x?<((nebpS=O;**1l>8Rh0a7|2fxNJ`|WjAAVLF zrXHxW5FzB~i0j>a4?ns9FVkyB#pTi*5K?G8x~ZHicPQM&tj*&^HCgJ%zOW!LP_|Yr&EH~PR+x%O7x!NUMp&#(6QROz?20W zI>M3tJ+}fEK1jZmTN9b zy?FWt2$2;;{d18afc8H6 z3kMqfq_OB^6ttf@v-SdjjO(CCij+^2Va8D5c#0FrN#D|9_aZ?N=k9cUSb>I$ z!L6N!C0ZB?>!re9-ORk{dHf3B9F_!Li*wC(bR3*=G_#gw`PlZiNO$3d%<$&*?04GE zf<&r!`pLedG9xKj_$b*cu63KU|2a`cY+CX4FOb+ig#Q2yKb~UjhYM&f(Elj51fn3# zWyo$ZED}$74Bg9GV$Hzd3-Ssrbq9oj4Y9B%O(Z4gguJWXOyR4@XM< ziq$3Z2fzAYjW}0Ud$r*G2Q9+7y^WOQ_0%xe)@_xA=LXXrTRgsS*^NN^B-nAl&wJtE z9hIAR<1S4A%uMnMhwwXKUu{O}te1C!Z^O3ks#KRg0d7YprhJM?b@Gt2N4Znq zOsQedY;EfE@!==yn?hlBz^~p`7S@`h%u4$ct_7H84fkO}^Ug+e)~n(Gk&NJjx5Cpx z#O%_}D}3abO;%%!kceP8V~`aB`d8d|#Q^W{Pl)$7lZON|)(~ z*%^seA}4ziUj!YRm&2ABnbRT5`cDGn% zcUs$3&iw+d`1|b)XAvN$Q8RU1cRFnxj5DT+RdQVFO_PbX%}G{ri^hq$4$MKw5rEtk zG9qabw*A+J(WsZulA7f8A+$k}?491LkFJ^%o<+3njOUd*Yo}sZC@CF%7;zKpf*Q^2 z!tchrz#fe0F-5t~Gz>FcNuR9PBGQs;R&hoXu7k>+;n`Fz?b<*i#8X{S%ctM# zgoic<@F?a?=;H=8b^%k8bL8B_dLAkl&chfh5$ybV*>+Ha*(2JD3a$BU9UgIn8kuq4 z%rm7E&e1Fi^-|!`Mu^~qGpit-{CTM4ZURwvWRdQiXOVeLUi!{6bPJ_g%}tPH_tUzs z&lZLbV!Z`iG1Qm@8QX>ncUB$1h(T&oHt%)7F!+ep{N@^!IQsQGt^GFfW=)OLcf+!t zS{&ETd5jX+T~{Xhz)|VN=pVpNft%++^CYoj>{r}7%yh!~BinRm$`O*uR{x&d31%d8 z0w}J~z1X~kTjES#2vG>@a;=ZoD}|fp_5-g5l1}Xy#{kW46^p*J%TVFt5)Jeq~Z z+o~n@+{%0%Qm&Eebyvy$La%_LPn(^TE3p5(cIlHNNuuq!KP4w+%X~h|Et{gwWnDXP zP@{1~&iZ-($JdJ7^55^LE4d_|XP}Ud?34bh32ks0yqCr}l4941-HDc@C&jcbZ(}Nt zDAPV}rZ8?E>#D$FJ+as$-9YJ=-#`o2ae;<2WP;vEr^`%JfNdKCQZT>4^`nbC0{n%E`-yApK2}DU>0ODG2 zweT;QcXhmAURqRBF=W4k%W3@doD3nn;dVP!x?M=Es*V+oz1h<&4rP;)av$UmGUX|P z76DF3^Y=N9R*rFB_VSl2J3VC2X>apM;7@7@F33~+eyGG{W1bbRQw)!USI4y50!gyU zUX~u1tJE@2p=fs|R|3-1x2<1k4q?7=QH?&*RBe}aq)XKD%`gVJaH~<80$c0>)N$i*bXG5QvHbh$<`7A zgzwhPj7Xm#9S5HYVq8%K`TprU@@ZE(A3X>_EV!p==a%lZd%0+30W~^~z4KF$ z=iR=PF-N`S#bymCk}515eNv&NH}z+4_i=>k797mdQh+>mi9eyb9!Y@3$Vt#Z^i{Gz zZ&5WmM2J^A<{uDZcno0Un`# z+u z<w&ZZ9?>bF`9J|Yf=3mG=1*)3-kZgG2NX(VAuMer(AhwXzhrDmQSCkfju_JrA} zZ@`wmVMF`wl=Sx}VKwr(8bE@I$O2qu(sN|FGS!x%-@Fg)Kx>DK5Ks)$;mY1Ol?y%B zI5^UJe0zRGofSj3blS~G%GmW|cd!AJ7+~1s?9s{qWxZu97#w8#{d#%D`Q84Zvw=69 z&AbIaFMhs#I9N9=tQI<(ZQg$ z`Rf(I{EuY^%Z*FwPCR(#avL?K0jZfb0GeC}w!Ekd8`G>>`Na^fmzaiv%u(tt&4Y`x zLp~=5mx|&|`71e96Vv(n5~DjYEoeCCJn}MmAYGp=Bedoh`7&C-y9+1kZ@ydC(h1^z zD#j$wMBbdxeeaT1C%J=0nb-olm z(RV87@HwHB(_-LOWB5y~1$qRwH30|hk7?J~4Jg3xaY?^Y_An)qmJO|52OBuw)H+>p zWMg_z*Xo+B@=|fMO(ZvSc#8{E^~CMJaLHq{R5iDkIOcxmeFHE1b=5{vCwvdekCyL` zic8??bxug*(WNgHgM$EMpAmPx_DpA05!ySF2kwI|WI-!7L_A$*ye8tSmu3RrhO5ch zut#0`1#`_g*YAc>X+`86*>tz5py*~*cUn_ZZd_&Yafh9ST@a-yYX2o~zlV}s$!g87 z)qQ;4`Q(?ECtZ&gIq{ymD-+uj0G#xS)THe8eu0wNk)Vb)6A^+zA`^{ezLynuZZb$Zb>uxR46jBlE0 zn`wT=4G!ExJSjQ1e7O^!gcn^SLI;u5)K@))!345c>|iq-@|?|};UnXSe41?O;=Z*3 z`a|hw5;tu#K_!idWCO*`h*?bkopf81;@|^5=f^nIS_wB;`P0eW)O_1?rxbva`mFcgvJL*@ z;P>DCz;J+ssTaGFD4nt;`=_)l^18>O4`QcfQNV?HaD$I21ZK(^zd2?k_23Kx$N=DO zzOHRiKx*3+k@oQQlBNM4Bh?ls(!c1#1*nTt_Kb&k5CKgh;#JjDzI(W zSWP};+ANiyHo+al!OUnrFpwFm8)`UpI!-mr^%9pCmhX9YZprYkU-PDZ=aUJ!Dk}|| z;EEU<|J{29Xr+Hu9QeD>VHsEt+flPNc;zIXHr^qpI#l{=!$>N)w7s)h&2v{t=cmZx zg7ZrpOnT0~& zrRorT*z?$r;v?cJ@=2Vb*|DoKu*bKJZoGVT{+!)?LA9TMhUBEeEGHtBvSo%=0Xx(P zlDnKX*mW!kpTJh?<+?T+e9H8(#mrHjX|K^?msm>7rQ@8q`K7L|-ZSMf)`Qor?%%(A z!@`J3Eb(TSb*8_WnjQuc~ z*ml(q?GXic72_+Ai8V8 zujiL0#6-niTEsS#X;{)92s(7AFVV+nANx!Fj@5H-*Q>92~mJfX;!+#blkcVwWy*a7b>eQF+-&x&0C_;{HR5 z#RDF%e~4HAHh%r@ea%CH{TDFxmSyNDihVB94|pEqBk{keC}i_76f+XwK^L;H@+TzR z|2Pr+^T&@h?1sQx?lLGzUyNA%qo?tK<}h$V?nUT<(D)MA51_(;0D}gv$DQC0_xt_; z^V)1ijCRwjV<&COUQPt;(34!1KgjcxOSbfwn71bLYzl+SdXFV#->XLhy@iODU`sE& zP#alRgCg;uM5*Bmz|zEAsEIpMtxZBxYp$cF#tggI74y+=3O(-qczz_c#L*qpm#3>CSnm_b`3s-iU1`;8pgIL zK~xUp+t7i>F=4-9!L{5ap{>*Pzk2L3On`TyXpvffx#z>2lkcKml&BY*3B<}LVTw|POa z4bU|Cs|9;K0~Qff?5)<<;$CI5`MAgrPD|Y~5*}>fz>W{Y#~wFmg4I0djw7HRU};vw z@qA#-H-R7%TCtGIQXLs9FyD;&wx({-Hg@;x;D(6KVef$Y)=`DSaN`2Ig1>r`{{t5x zpDhD6=k}vO%MjpTY}q15;LJy6jKV`7jo$SQ?NM9V`pa)%9t~pi`2$_X-yGyzrjj>e zV}A`5<8dey>j0emnV z+g=E)EM2;1H_MHrod#FR^c{Jmi&)c_a;w7d@SKBePNvcER~FB3BQkbx&_*D^zKyt+ zfZAn3L5xM()BC8L?@srs_R?V;+Qjj937;{&zUp}aps(ZlIP~6-l}^lP>{qFEBO53R zESuXSieADYBMxk<3R)~+>je6g`n1LC(MX;Ll{bP{QB_CArMbHm^y6Ub=uUl}UqAt6 z0VHr4O^`4hG!uPb<1w8sWFXGZ|JHFs{auelW??X9f^g)XZW+fZNggljD(90-72XS* z6STf5vvybnTeyk_Yl8{RHA{S1e5|wwlkO$^ywX#j(FF}zxMO%<`ejarn2 zK-yA1H*xhNtJwSCC`si}2>a(8a!?mNvSrF9z{_)~AunKpn894wUAT-x!7Y{K5*N`` z@+`t48eB+mGPwnH3HvwYL-<=ws0Wy1`=D40*ad08Mf%pM-H`IlC0Dr#} zPPQFG5QOQh=Ok+*$81Rpl5W?F`D%J+PI)QR!j?xOAuhIHFW9uGx#qpqbOM#rm28{T zEMLPV!A6W6s`A!6+WnyZG|!fS<NJSZw?Jd#pHwgYznAC#3*`0h zbbS8d|9+r7esd`Jf;z$(CoMjq#G?!uk;H*JtCt}o!nPGFxX z{}tcu5(@)<^S`IZoq+%T`u~E)$G;qlf1%g(-+InYj6&SlrpyXT*ckD;YL|kY-kG~L zsWEHJRrR;s{D0$-i^lVxDnr?kg8uW0^#yZu+BkIk{o=Ck11uaq;D|aWQUH)vIo{kO&`h%N9Bs2`0TI zmS*j-|BJo%j%sq-^Tt6yq=`zG5)}{-5D}Cj64XNz5fPClL`0f^h)9!=AV`raD4?M9 zrU;QPMMAGC(wmS1(jy5aJd$$W?Ong${O&#H{@!`#&a9ca@0vectR+c!o}K;dZ~K(S zfLTSK?Yj?DxDeBNZ6kKK<}6I!!@imd(SOCbc0*vC$Jq0Wghr0S{*pW=O5?_#F(0eM zqM-{o5c_lu$E^%ES@qE%^ilS&tr~;)DAIwY9|lz>kW7!>rh-Xy4i6E>+_w#?Fhl88 zk$>`+BOaq`fONtcY8WFF?Qw=7P3;+|nx&nj>(|D#wK6pa2~Uj)pB+nEPi5BdoY;S( zS1jj)rKNqd1R=~9{ih{r#DN@|E)vY-!ceve33X(rS&de7jIK1f_KC7g`Eb<~ZWlsF zt>sY5Rn0>Q+!ar|n@>K~J2V#(wnzu3PnaTjs()+JS;mpg5EK1OE>N(tj}Lh2m{Kk( zobs~jIyhe{-V{OL+ikWF(UVarc6B?z9QIAG$bXbC;acL4JJ{sJabR=k0p5lLA9t57 z24{mi!ydzhtL%?>yz#!fkkJybEmHfe8^vf})#xd|mlWQ5h=rSmU>-TkcmiKM57mO= z{wzpq%)4nE0}K{(1jupdzXj6{puYllMj+oB40cc6|GfOy$~1zdxDT-1YZs+}zmFD0 z<3i1v4;tSG<(e-WSv-QQhOn$?JbWV z+?4{&mbZ-YS1T{TZ5Q8C)2`Q!mgB6upPf6&nPAlI{7XbrVxQspV;#Rp;Gx_bH0)1> z>*%fTa?HnyU;hwD3Pb-fAl!dzSE*k>xPNb``0MxoSBH%#xXI)R59~IkbcKhq zFo@e3nQLqYRE)>yZGL)@&gZvDl%LU(WRV`f3~ z5gT?3zv;D!S?IxPuKgv@nD;1M$w_wEllMl4RqufLHF)&{?9ZREOq$S3UvM&UO&f5X|~_Eq>UH`e;UB0}Y3R7V^A@X=eQxwcef%k$1V(Y^_?y+#!IG>H@-*08vQhRg;F19`0G8~ zrVE0DtcNT>W&JRyE5Zl^?H##}HQ~5Gk7g+ev#L8|b8}oWlcIKZa}s@rR{cx7JW-}A zh)N8g@iDno#YW-y9GGBP)4sq1bXTTOQJYCt<)xdb_sg~)zHKBo>iD=wX?_lY(e?nz zNhCi_a_SEDz%Maf6lDlty!p}$ zj%{hI`h01alxx9Nf+K*F$wi~d+cl#&>2hRh7MewL{3$8YBNpbK*~$mH+ByxB85hf;%B5)TlY{pP4aonBq`Cr_?Rd z#+I_K!ttk0u!cHrxwzT5t|RF?S7S`&@;Gv!oB|MBRMBp*@l<0ZNORd^puTs=v$aps z9Rbp%!=k4Q(*B+(E36P}>fRX2TP|=^yPQpETVvH3lp;A56ilZFe7us$EskH(<)C}k ziw+%Xd~Qqt0ym-lPt;?9mEU2fLE$qS>tSEGF%zdY3`zF1BS>~ewXT3YQj(GVKy%AW z`%;hlJ#0hc&<~&WZ?>0o-JCC<=xgcR0}P2MV#uQyJ{aDcn{*d7G=TWr^^00K4sFrI znO`S?%+eZ@`xFKqoo06x%gvOrkIy1Mwd?4jI@31oFUmGwHCy{=A~emA9*fY_)U}a( zQdk!Dw}ZsZzd1M+4rf04zfD{JpX`|Y2{O*`+)c5+Y{utfI;tE))=hcDe(uTA-sAI+ zKfh^y)Z9B)c7Hcvs2I}>QWCi$`hkWPL`48e8V&Yu3!|<{8EFZ55JONuyetSmD)DOv zjwcb|!V^z5Vgut#)U{(mb;g}jCPG<#A~xVTDt{Uu)zxP5k)vW92i6+R3T&3umQ*CC zQ?)7om3ekDw+D?jaD);_hSeH838fnWtG$IG-@(J+5BNF`Ghq6jS0@d<8lu zSe%j#&RkFFr;CSx+xhnKQK`2KWwZSB$;4Sx`{%Rc-)>1Lm6 zURpxbKe)n)pT_`kvG3p@fT3{Cw^731M9X;IMI-g!J?*Q_0icd0`@31cAUQS(3L+J?vA>pXIxku~rOvRWi%O1O2_t0CSWf^2Tv!D) z2zyht_nFp*q8We*lIw8YPWdpiztGM&5ud#DLR$l-W17H$DnVsk+*k&D%4IgP2p6l>V4a9M@E=n&=6$;$&f~nZEU-Gv_y! z5a*ERC?Q#GfST-ZX82Zcw7Q96?9!1~@oD9+b(UXt8{Oc=Tg^q$g}s^jYZDvHKDh`G zF*Ptl{FA;+Q(PC0sUBQcnkTq+v2F%#`%Y@ z5{-`EOdw9QT&>f4BPS}W_o=gBwJ~yFXNUEyg5po-LAVzf=ciE{C|wCU9yBZFQ05`X z!ZANEVcUcp6^WF4Ymy(5+S?wT%p`hv7<#<9D|x9rDDJxK8NCkFTXf4&uoAC-OzqpJahBIZ@!^6&&yXXpR(}Rn|0cfw0uE2 z_iRy-(nh6$9TiFUWIRXTHJhMOZ;^!)bEX&3!l&#M`5Zzn^F%d)-N=K8qY_(=?CTXNz zsn8dy{AA%T8N(K5wa`T}p-Fha%RyCGOErY7QHMRG&VF6fam<@pDk**YvTAkZdYE`bU$ES5QTJ`*D5 zVP>&RZaI+K6@$+^cgJBD;{#Hh($_u%AHGXJua4Q@@$L)Leh$N%c9`A3cz<@%;kv6B z=DQbE8rmMkOF5es-v}!R?7M7Vvs3*_BnfjvX;ETtc(&$}6?fWQWt;3fT_#s$PImJc zM1+Fd&BG8@h;r?T5V+JU=$I^!z~T#WqK!f1m<`iWnb>Au_jo03)7Oo@mz7q=icfs2%;Ds?_$u|Qw0PDxWg~K*=x+UE{FmaaHo$Ssp~HCBB*$#d0+l*=(7Bj` z$cy!W-9`6bD&&WmsblsrZ2b7WMVcVi`S)Lh7sCh!XrN(rXQ zGYXL=6y5dTSu886D47wZ=K>@@GIEZ|ATRT}e*ad!@F3{`cdfa(&iUBt$ZWI$P6Ce4 zg&out11Gc%^B}6_q42Yu&jfxkYo3y+sp#Y47V)1nuHI4*COER4P?>r3L2e3GA)k17 zN>pZto9O_kC+3Ne7C&Bdb07`SC9Ao~-jFeu{O}GyLO3sdsHu5j-6@+D zG8`r|8Ih)wc`s06Q;6iiLP<@cNH^QcwR`h(?S73JpIR1fy5osHN)XoXrD{qa!KMIT zHvZKRqW5F$d)K!9gC1ysMq}bjIv75QHH2UL5R^OOQl}wMy*iZ-Z#*9>p%Fl+(y5UOn?)Pk9js~74ZaKYQT7BcsiDE3ydW0`q+!j!a>Sb z&4Bm+Bn!-)Vlp~2l?Eb9XQxHlu#SU%W)(KJKNA%hO*5)us>^KtBOku`=N9v{^jD+C zm$M57!b=Do`t~?IAoy#}!pSno}e@+j-pe!r{`V3FzIiG)go3GN|5}NSBXV;Sjut;43}>y^#fZO-sz+3!Z%KU zgsfN^8FV8q>?{!9>7m`Fp92aKE*SvOfvvg|ivSfj=aw8fD#2?<%)6BH2j-fiujHI{ ziZx$BtyeQX*bAiYN65 zz67FbcZlMrPsMWlZncP{fvDG63XIGDnz~(}+v-22Zuj3>*ZrTY?)Xpb>ci7iCg7ic zbyYb~j7qp$hz)yu&a$o)6jBJ9>pIzkB%PIfseb9!_J^gYrm@*=`T;D7bC#bJx)VhQ zJ>1sKF*et-0cWRLG7vU-gehn-7`}DyIyC+l93+_Au^?H&XR9)vH9FF6G5bUidz(a; z_q(H1zgC;T4g~rKCny8>;O5|rt#M0h{h4f_m@RZGpBEO(RH7rmkzWdI5IC!06Jb-- z;HYM)BNW}t2h*+{lbKjopIlr0d%OVx=lHNMl_Js@whBp4lj`oNA?h}--!BSz9r==S zy@;Ta0INW+*5dmku@xE->c6u{*$18+GaJO24EwPb82c1W>VL;h6e`ZWn|r%@;CUSH z9GMl!uer9s6VMVM6lc!}Gb&{uJ?yRrIACiPUmHEwSXcebl5%#MxN1FTA|g-G>#g8< z(z$e^;eNIgbtdL6k6s<*OvVw+NTwh!qEglpblAv!_}ZYnSqI{C46e-hu9hurgLH%t+S6ls!~H^p1SL0z}0*J;<|mA2M(4C|7SDj7r<* zX)tKwnF~{W&ew8K5B_mHxIoZAqQO%Ho|OeD8m?;4Y=AuOYX~6FICe3jJ-w>?4HFGCrKx zf}S*?ssK-|UpYu2WC||Eunq131rsvPxK%6Lk!q5D@*n&n8uEJHE~;~jmULnvA!W;u zE|f=Q!-B8-F|%)$Q2TgGaFA|JSlJq_(sDj?IC)t!Kei{OS?S3UBa}0(OmI^OsH!3= z;?sXfOu>4(I^f}OvYiKdf(aA!$MDoT;j{ z-srZ2&(C(I&R&H}f=a`CB9_Vh6THz7@O=40(bZ#N>hMTF)~8FBh%=n5hSrYM)eO7p z-P&-B{gUkJByjOPCN;Xraf6$zxD#0Qk&KCW=S4rQa`Ta=V*-1Ev89cEsTJ;|vSDXGbW~+kl4gLsG>lTEdqN%sfl=*~??dfz-=a30g}>(Scje{m z6Lbqt2)+I`>v+j^#Vn5tM8w`3y*1SOh^l0**0;U+&&FB!(10;g%Nd2nm*T3j!gNe& z8gzTcOTfNElT2cZbu@?{b$7m%r3kg?UVGo8K42oyo|l$#%j%|v0z2Ez1Zdj<3O!h_ zCCF;F6h;K{z7e*%DP@ZyPjO4zPoJ#12<3M&}em|&Bzy>Y3vTJQB%yV#dfHeMGhHuYK@@3Zvs63nDxX&Qlv zOzr^UniPZ}WOeaRm^Ej@hLn#VK^zE7SXzB-=E9>CfA{IB&Y#g@mim4&EFJr6`4h>rgA@&|@l7(o}ZFJ6&#ci=GN_xPg_!!Z%f7glCRcdm#2f7S?XkF1N~~QsGtV zZY`gT@FiU4k-OgOpbZ7DEOva;O6*M>zX?hf5jjf8Hp!2y(u(EM(RyPo&UczmmB1W* z>C$_-;@x?@BY{&T^(UM~<5Oz+dqCB@LUjjg)QoPCYqSG9IRa5J7OIYjb6};MU92Bk zJK9)V8R*P*OXrPjM`KdcCoaJsQQz4$=d8B8_4lSuE{DIMn=`G&eM%cz6MIwx30sQtAx#C7m`yTA=9@gsC! zm!CO?)#pzr!*O3=G?usd``Iic;yk{0tFe5R?QqGv96Pr@u zs%Si@tyr2gkhdUrJVFZ9ul<{@4AA_9i7sU-hF?g1P|X}n_>?50pcGe}ST%P$ZkFv8 z+XAi%)6x$Pm@P(T<0*=`Dft0zkFEkzo#I#f1gToMl13eS^T6|T%|v14%wd!8los|x zC1DO&;#j@>gP{*M=!cJDL@%_NXw8Tg`zEt3^vq$`on#JYUArMBY1 z>rzi;(zot;ccIgLz49C;ytkPP?g#A)qw*)+(gTmo^oo**oXH1dxv|-$+zOq-!^c6W z`%6B!3ky6YKiUd@MYrhw(A8LC?|Mg;9hkf43GbBVizEg7 zl+#<;IoRCr*VL2`unEI{cm=M_!jD|DTnK!fk zEIDDd1^O9(PoRPY{oh`}_+On${97g&|HUJ?4G?BHby`ZkfjU*8+RmH*Q|j1yU|Xw+ zms0FU>F3K&*LbF0UWY51lu8TBPBaMKnPV-O`HKyoct-k`urXd9I#?F`oOEqu{T86aSar zG!HRBTcf30+a-_s1jHoq;luXLhVV$ti0A|Mp8~JP-0@}esK~uTw_Zuf^GD+=4)8&&`gd(Mn$$gHn7L6`2=EBY17*OfO|dcV9$yJK|!cNRbCCu*reG>L0(CWCzt=hCiS%}sNPZ1g$| z@K$T8-13+&xJhx~j8vP48kj_{2GA5Blgkvdgt@Qa43-%=?$*Y-JT3A z2`)zMe|ztPOlWIC)g$T6i+~@%NI`q@Pgc&tN#cOO0TV&Npy1X4x=ymDUqh_DEs^L# zyC&uLZZw+uM51Ork$AG?=pMzS7~+GhsUm68?1WcE6Zseuqjz&_QM-u|lSpJrpv4U^h^!ahA@HN`BrGP#q0N^2h| zC-&2>LLER7>J2Oc=~Q^b7K$;M8sPhpDEqNj@9R5rPMdExWRkSgOdGTo9_`R*$DuxI z0Rq|(nrGWoAFMfZDTswjV(zpR= zOvKN3`b>u#4b`UHuh*(6`8s?Sz1p)&;HB6Fg`3&<{h11xa43bmMlb}~AVCSrDzgp@ zHaXD6Iaw|KxF+iJbHYBgr#z~JYxh0Y!3Z~=h~^KJqYf|ws^M|kjrH~Gr64`!PrIHJCGpeJ7C*AG4Hmq}2=H7DZXr)r} zgz<@^r(Uvta&9fZ(tLZbW{H$M^ayXpHtl|0N^R7$>g85n)zId6*@BV7k7ct4-ec`z zpr-LVcj~9Ccb>}X5V($#XBJhDypV9(g+Q%|Pd@Zg3>1>~N$&2&%Bi2O@M8^X&r&%` z42fZP;8hxQVHE<9dx0hr!a<_4?V#9jNiyo%n1;>NF4T?v2O4}+v2G<) zhxrZu4!B@oNegW6DXa(tYugCKSPyv?eyAKMElim4SjxNcj!Wi4yjXuP&%U~4Jxnh= zI;*ye)In3b5$H{XlibMx=Lb-dfl=4r50u@lGWA>Al9Vz%NB@xDXi%HL_h`bL{zhW= z;${77g?S&6;V#9Pel$qOa1_qfj}#38rWLn4@W?`!EvEeqhNMn2Q5fWk@5tBnyuu98r0D&f1@ArD>1;GV<_PeXc;6@vB%*#P9&cuGPaw_0*3EdA9}2&nOzt}ST=|Th zePoQ1arwPVH?#e39H_hb+NkJe`OcDqh!?;N9II#K{y;lhd&}yoRSrl~cvG89g3h-Z z>!hGHek=rb&Qw#<05;eU2|#m&V26Q;w37HFd#n5#oH}`n4~kQja?u z&G|~d;V>7hm1ZH-&=YB=0>_B`32=P%L()PQjI$_=ZthG@of)JK&TRSdXi=9hQTjbj z{N}`Cu>9eH)dA@+)tJk<^+x`1lpE$1XsRYoo@v8YsH~2G({$3X!~g#BX$QedQva#Oo{Px^EOYBZx`apWs1f`JQq8M9VxW<2ANV*C5E-NWil7^D*DUK~ zWD-Zy6*sb|ssI;pgzgTN5OEU$)HL#K6dRO>FIM(0D^>bo`Xej2$iYW)q-sj!`}BH> zr^M!>&xUJrBBUTk0y4YH>7C&;Rl4HPaaapV4p2zV(4}{hZEenReC+EuI4Kx06Py+X z*BatZTGO+ee?v_IcGn}r8Y03}|Fr=B6+v@_0DnzOOEHGs>#f8{ z1o;|z0#0m&vf8CYKaA#)(T_K2pD(XF!(s-94>wLVy{C@wJul_Y> zXNfQpAQB+eAT@AGPI5YoG6x7S!5i+QllF|~ruWkL-&PcqU&%Xj<76A5y|r553f28>|Ah=b5^L zy2sEh$|yd%@mF;_Z@%%BOj)ulsVT+fZ4K4@a{!N$YJ}yLXkANIz2-$lS<7|bAfdiD zG!=>lH4d^Ohj%sYf!t>^>b$nl(N=>?lQKT7gGyuj+MS(U9QLuZ&ruu}5b;>DIsebI znDK39*I5%l#jdbgP4N)3uO5Gz8*{8(24sf=2gj41 z%03DGaOIvq7pLQm@t$Jyv~TVgHIm^)S-S&WhFVbkgGkAnzPq2fay)kx{dq_)d{gL2 z!0#-w$NHI#`G64<@$e5Mt_}H3{(r32`EReg_iw3X{r$by}jH_+H&C{jSy zSO=m60ZO4{wa>tS+N@(xX%&1@j*!orsvGcwq+^=D@St9w$^FA{p~qjk&nd3>Oj;3F zG*u0ILHqg7q^%GkGHey}%Q^NyHZ(7{hiOsxiaN4uJC;jhqR|E)e=|tCEYUwpF)U87 zXZPBc@3GJ}F>ahZZM`Y^)t~CM$$Evk)p^9yWqfEsV)hc`$8qZRZ3mfwweQ2yGoEM zu%e{spuUR?VH7a6Aq)Z5&L1VBanPka+l+X7V0CuCns)!KWHPlZ>tr`pg%hYO;&x*l zare^(;GmdgFe|H{mpnTkb|z5JneF0@4;LPF z!-^f?5EuCjvF43e8Pb&OA4}w!I$6rsnd8LQ{-*K=VVbLXRh#?sdFqhkOc#-#Q~+e3 zWXTx%zNxO6eT;;%A9P1T>18qGIq!!@X`X54=U2^qPTb_|)si_D$|5J}K@GrjlJ=9~ z)Nry_DgfelXuC!eX?&%h-Z=URuIoL+^vvgbcB-XF1LS|##payZw0fYgYkavibi zTA%7Ky;J_tnj#+4DDVI;r8Zaw_{BLOQ*7{{cu;=~*8mwGnCQ&uXNM+z2n+$n?&e)u zVn$yGoi+hZ^O#{;;%?~TT5D8unyvG@^ux8ng)O-jUUTv+hTmuJHnWKP#92EPL4G@i9e(`-$(p8QSE+RMOI6tfa;K2*g(m}MD{WG5_- z&YzBPbJ3&ph{v6*Ncts+D%ufb7KL?93`HFn_(6vgyxA9R{StDHeD>q0$zu2Uve+`* zEyUvWj!j-?m*$%{rrqx2^iD>tm?2%zm#QwdQUb-oAo=vVen^4s(61X~c3y1vC3;!0 zF~>U)6+1;?jj%G0bnhoyua6I|nGeKPM-0C&>AQ2=zU&-f&Dz^%yMkNp^~ zrWE0Zf=P$vgKCYbB5JM4CU@5P-raIx&yfzZ5}ZDR`wrRBAHRfx$=F{Lg7FdntESAf zQD2OBA{8Ae$$jTXbrehQ4DkfpmEWw?KbvcI7t7W zbVzAH`ft0+7JZP%!a9{NPAR+l$=2llR_dklEmPg;`%7tAjS_jd0%kHX8o`YcMcNJ> z9%xbs3^1h#6*Q=|2^V-s_I7?caq)>DNrq*w>MQ<>*gWJ}GJoV6Y0GTyA`l$^T<~HD zpFBCKnXafeMSnh?=NbHk6wZ_BV{Cyw{B0dR+0}`yOnby+^LWM_Rhw$VOIli791&jD zDv12;qpr2VK82?W%U@1NU%8QDDT9)R_&b=wj0CI)SY*Im`DM}_w?V&3mTR}avN)Oe z=(wMCH6d>)`h|8xL2u<}Z@wBHPk6(z2oW|-X2P1%>Xx|H1CoL059J^Gx|FUK2Tt=_ zI_vBU*#o}+P`GRVBN(?|5d9pvuO(eBYVAj)Y~9f6;o6i))%eBGS&4m`MVhBa9>1hd z-e9&nC@~t)_&m@UYh6ORYDDUYJNpo59}T{YJyF^6@$zLPJIjiOq)whwuavXWwPx8u zd{qIBlIo*a%C9j`6s5KqCW;SHBzCo#u|bLFC}k4y(-3Zm&eWv@$r2^Cy z6L~za@O*&g2!yxQ&lC~u*)CR94Eq(vZ}Ai#zLvg}+kHVXlxLN9D!^4t^X*ln1{8Ao z%{z~0ZvK2hL`5I>w_m)z_5`z>+n3aYuPEQ2v5XRz7#MVVjS$fu zwdZM|^AoCwGU5Tc1q@-mu@i>Gg;n^hSp;?$(kVX>D5wc~X0>HXeSJ_jI%}S}a837$ zOEv6cwBEbhPrIK~x*?h@&9LYe3n0@B8hBOStEb?r8?;l4sour#iU7%n{cs~W6)LiSOjpP8-`izjl91FknSF2gqX3*}#qniP~ zSP4cl$wxX-46+D8r>NxTw!X{BvDU|BV-2d551JkwZ~P~5cEEqAYFyb>`L2n&zP;Ev=c|U7`ECqt1EoRs zL>@*W-69V)f+b%^JVDg(w=8G_U@3p#`Joo!`Z<-RHjCkdM8%i!mHesK9vg@R_R_ye z99+iYqdmBR&DH+=CsxW7la7LDw&dfKemu(#HzSZ-8-Y~Cd-G31W-YdUHAw~I*$Fd) zUu45XpZj0a6GUC>=Y=k`X7ZFjyF^C4^eC|Vte$pK-<|t>qI#Ea-5wUDuGF)p`-@y& z++aF01;DoOA$wEDAPU(DSY($FF^|WXaumOXSe;Wo9;NYe#-k5N#@mWY{p-eo4Jpo- zMV%x_vzxB?uGXvxyEYiBt~UJ|BSR<1mvUsGJ>8j9(@=V2IreQDcs?Ze>kPAs=6k+1 z>Ytv-N@Lq>BWkpIN8m&@Z
yn&*GOnKZ7BR@s!ALd*?a`n&nK6-VldNYwNP_d0E z2s-NmdV@;u)XfwKG>$4RzS^`yZ7Qju$3^c2&OdjzK8}(?eYu3C?2XUZr3xsH4Yo`x zO9Hk!q+L6JzMl;*7p=0lPx<5qfiW^X-g|*PVBcN;E`1?}m~9|uGgXQg#5P~2yon10 z3Y#q;hUf38$-J=e0Ch$Dw8X12)z9aOe?2iJffDqSBkE^(68DO7V5qlK7e_PClLdOE zT?P_%pEz@6$=zF#qrY1PlJAdcRCA@-pt?|}8-|sM16iFK2`ZWTb|JL~gWnl`f*+Tt zc)ai`&WaQtRMtTDCTPUy2si?@xJ>aO?@d1Cn|8{&!MB5heeUdP`+iu|Oo>J0?g>tz zrQhb}MmkS>Ma$=NNOp%u<@ALS~Z^*OL{k{#h?2~Pb&aPmpoK_ zsQF|NviYUSMTlBmqM?}`ucVqWTLU(XLxS<93qgkzPO z!M#PRWz`#IBrWgF*sW` z^Ed~az=;(dh~hvGG}8ejsFgbVJ4-xZtL!7wxq*_>BM8i!BS@f?bg+au615(?++Vky z$zLF^1bAwx1XN<8SJ1Etc#Z&@8F_#s2&TH5}S`+8i%kOP8ya#bB1Qd>_=B6k~rT$7dp7^jfx0BqwAyRz2uTdo!tr^ z*<3W`DY_)#YE!S_fe$K%-*hNU~?|hkm+9o7xw0wxow(iEiaR=u8XW!I_lRx2F zRyO7U@TfIn_9H4$Pbb&FvmoK-z*}QEv$q`0mj`xHp-%&sjP4`BtUEv7ZZr8h8EJmq z%s%2;JWRr`!D72~trt@yVAp0Wq=8{_ags%B5<9fbT0W|^@ARIgVky@u zJ<8S?^D&gU@OxYP*({VsLSDo>Huirh0A~OuaD|E)b|J&)2P_D>)f6zH`sCpPoC0{$ zHNxNJb8-}Xd?jsKdFZFPygwV8q{qtIIE}fd;T^hl9&~lR5}{)naRRf5jurly#q@ho zJpinBe=5wq#_)UcZ?giu4Hax&8=NoD57k4&jmXp#?;JPb_v7P}2Ic3x@5Tz68-#Ng z$Lx9wgdkZIFE;K#^(iRj_?*kC<6!7Lul~Oewf`uk{GUdg>tE~N{-6B*|F%&3|60xe z|NivVRR^kC8UU$=Q2<0YHv3b5{2x776qf86K+j&oM-8=S?VyJ}wsWx@VC+7=_De{I z|Nj%x`9Bh@7FyUwuU~x!#6Ek?7<;U;t^U?Z-jsC_V8jl=KM64T!BsskggBW!*HeI& zb;vF3(_>658o%ZJ^P+bsdNsuzu@f+=_;Xh_|NrRa{L|~@{Lu_o2%lB8Mmj~d0z&0t zME*%N>f_P8uR3ys40M%)y{1x^%rV1*DxYO4RQE*ZDYbLc^@p(ddO*fZ@VrF{XoZbC z5t)^Q_;X0oX(siIm$pZSKP_22VCITu3*zM?(L+h$=GsrA+6NED2EztQh3G4wvA=}9 zi`zMYh{=Kk+=0-}06-lVf_~3p3+fmeZ(8&_i+~N|PT~gUBY(1e;$m(L(+?yySk)*>Pr!;!mI$6&8US1zw-q zbO94;+z1@JB{q$pB0x2y#1}NZM0-0G>7^I7(Pl*-+k1zdN?q?HTGU@qJ7Zvd%xg0D zMj{lVL7MuV#ejzb`YyY?;81!C1>m)|sa=$=NI!lK2x!i%P}D33PkFd|u6dSyuSlOE z&NwT`+&Ad9Od3vcmS*=c90j?XHtp+ulk%USt;WvDD1v)pCoQOAkR*+t*>VXfkWJUD zAI31{kiQK-fPN4`mn~ze`+@yQp2u#5^>sDLr_*oT+VkyZR0sOL5WSB67q}?uE39In zl>x`su5N)(#Q_XAjC;xMb6G}_iMycuV0nOdL#oRyV;7lps@b3KL$})S=h#g6KfJo~ zm<0qAW=iu1Qnq3AYrnHJXP7aM{+$K;!yihd3l~urql2C?n{h&6D#{hXR-XL&Jjo#} zD~JL>4#Yw7Yt5h$fNf(qb-=g|k35hJZVkXrppKGjqrLbdt8)=fx|W~VU+rdD^^>(@ zzt`Ja%RRi1FO!x1%61xt8qgQY{zZe@^~ZnzTXBQze~e}HKO!*y?Al>}I16F@MRW-P zLEEk{v`g0RJ2A)AUwh1A!oAi=uori4*-X?Jy%xLA*&B$NXf=_jJ&FYd9=-`U$?-E% zu&W;f;ozwFi0T{brif4uFFyRh9|24O5!wL}f^%1Y>GjVogER8*>eg+)c!2`^9{xJh+!UCQQeYW`_(w zgfV%bBJvm>@ub?`ti9<_xY2MUJmge$1T+3N>Bz^eX50~V_wswi#)ro=#kz33EVl&b zxL*$W;5HXe$xsi(C|H`Duv`v&*Lxs_0t-b{!CNttt}OxReY*Sz-JAlOJ83)W&vZ=r zcN{X*?7n25V2?=J!jpo8~Z)E@_>6uHh^3h2thqGb$8plj{wA#4|(sP4A-4g z*vGn}!z%4g-jm1knlh)IpTUCK<;%l@CRs2O_4EgfEZ~XZh>b;4^^ssgRb=v(y3W6i zN&6`gh$FzT56tyL-pqnU)B95%6~y>!)cE4xCO7|}9PZcu(f<1<*Z%v%PJ#7is^x`?OJc zGWW*iWMtNZ&k=kbEk)N~Xz!Utx#($L{w zUd2sd{Ou0g->5+}+In-E3;Ck-;BJ~}{r)P4-y%kL!;hu*aJmTr4ZZ8=U)bw25E`#v z)GEn6YyYebCQ`+RzGfGKU99&iEnPP`b60xKAceIHeuDT8HGtdIqzwI$WvTQbR)d+a;V&%hWyuWn3PrmAJksYihu6S~OLYMdnIR0X!eBbp&aJ zvkNKkc z{Y{;W^rqe8fq8h&*i~IN=CH0J6iYw|o#S>(-&jt4_@yAnbk9+Xy}88>hUlm2zHwc( z*|H@#_#H$-R#j2+wB5xKrYK#i4xrtpVuPO9+V9;F93?r+c&@)I+hyp>a}?j=jC;k) zipYYIFQX^Ahz=QmAbpkbu4*kp1kKuX5bD=^Ka@>=y)xC|6I^oP=#n3sv%7SMM>O-& z$;ev^5(?dg-=pUJO+Bc7V>L@mVLR@gYZYL&KD4e2NDpCbs4p{TL90;~))jfh#)AFvIDmVQY&_qL&hfl+x31fGe@X_llp z*c#%f&n)^eiUE==YEc4sWd2l#{_U~+A60Yy51acxH_rm7F!>hH=qtyy;CB3LF*r$0 zC$muW&?)9MU|Thdw5Ny=w%Z`550v?aBDbVZ{+(mn=2MC=z1zP1H%i9byR|8LuqJgY`~w*IUeW3lF;=S(xoCe)6#zUym#`&8q;J?VD2nTFgj?rP;Oi4hy4aiIT_vlQTYReoERs#xTPDQ9-F46{YdcaKf z!V&rFr|ai))SWAn_tgC+yPH!XENH2DA@`RxE}p0&JjqV6B6;8wxq2GJ$lGDcYbPS= z=IJMTi^UDT-9NX;cL<(culREfFrUJ%0@9Bq(&vrrb_Z}+;E!{VyC6Xh3~rrth`#uW zKZ(?R?TjI>boX7uk78JSDC#gJq>73KEq71cq?|*-LcmMD+St@D-{!nvb*I;}#qaN7 zVauFuhUYAAc<=iqZ%B{}Y_uen!Y;YRl*BeH9zJG$o9$!gNo^~_J2csIjV^$(+NaTR z)|+US@I5y5nFA%B9TsQk8~_hIAvQ%=AkBZrKO%JQhud4)bJvn>xMQjI+RH1wp>Iw)(sR z-O&iSQ_!8}>RvJy?z+2&f4lD~Dflg4l6m8mymT@LYTg!r5d0Nu`b?@GKWNp9rAIv2 z^W@;;I1aY=c%V+^0W@DwN1^oIMWi$%uIbE5K(_e+owI8!8|Q82JOPE6=Z)5)RD)Rrs8ic}jitL92#jwojFNDK=T0d`hN zlLoX#fl*P61f(XvI)Ze3gc>8HjoH7xH4Vi!;NH)@ zkpuGFor#bNmL3lHyxuew2Y%2bz#uBN)^+shMh~UR*a(jLf>vK%6ZDUdPJP(RAq(P& z10MlBX$A6!IJg^JklB|moVy-4)>0$JH+P^FY*5Bhgu(Tsh;=s+kUYUl&ZK1!joG^pV+A*(Y;9)dbTPx=4 ztr(_W$qoZN_#@cw3Qerw4_g+XmvGjYIqp#Szl*%Ou_lSM0kHXcc^4>+Xs_7T1K-Je zHl+os6#B?EDp}4EC2EtAuC2bYhQ-XOM{GHzJWM&)e)DjSv{%v zKe%#n%FSYa3T*N35Iro~aO*7Ppm!gL-P~`3(gv-3_?6#T%8pj+fGqA14{T>U_IH+Z zf;+6`KqZqU_y4g5_>cJcZ?X{+lBnp|6;ydbU$Un;n@b*}GtzV%DgRY^qZ%i$blNy> zK6u0*VVLNn)lN+*wwBu2_@zb^mG5&}G*ECPWp z9Ee}+k>uY@k5>X#-ZA@(^%~pf0cARkx4mjN_6FgdGN%>WtGyx3HRhh5MQe$dpd7za zqqm=8^8*8Hum`+u5FB`pCNf_AV3Fby)ns9G=3-gxn0rq!5>MI4#1gP=h&U>eCkMImWkHpdfKD z7n@~==0Uf7X;MRLSW|w{tneS4IsC`%R&3)2cFWCAsb7qE>g2Yu=WdkHeg1K3UAqP}N65Z!!Y+Dj&MSJn23+UcI29?rP^-7E}dE|yK4$pw&s9ET~=ccBPc zP~Z>!Bx|GeqY;JfCf$<_fiKE99WI+mOWYd9lF#?O_IE!@*(Nzs7VI|m5{ud?HplV1 z-oS2GKaOJ^($rc2p@;(9Ksa|6uI)Tkd^M`$gU9Th1P4I$saSDQ!O+_uJ|xJnzuIg4 zs0VLFJP0B}1MGb&<^cZ_lnnH8R>8u3<5Z&fa>d+RPfo6rdrulQ&uU8e*h_(R+6_2+ zE#ePi`$;_DWJyi`gj4za7BMrOl?LA!^c{Yc1@x7(QHo%9Jw`Rp?0S47aA7toRNcDY zpowz)wP)kLkGEe3Kh@3W{^{EZBikb&>9t4mseDeY z>k&P7PcOtQ=}3dG(i(i`Wk}qWE{&+%4$GByy6HS5{c2vN;o0%^kd3jAA!-EpxGz95?)C0Ouu!{y=#4+`<%7*@9cZ-K6n3Nlt_~I zopWZ+=NZo!s-3vomNQ*=@Rp_0ExWv-hF2 z(4?RxL+C9jXWQc{b_zsgztOLyDSWVK&_x*cf{jTGT zHC~?yym#y>^;5g$wHHakNfh`qHQ1bH%D*gG=uUo#>avQf#r1 z8h4VPGhqU;Kqf2km%fsgjN6y2Cqqrz7Z^Yk-2c&x=LZ(${Y+>A#^QSb=CK@lN-?DI zrcYo}Fjs)fov@};o4ItEAKYbtt#3zh2`Tb2eXm;Don8F$3U~{F!7QdX$Wx>JK&V%b ztjwbtw&jSSd};Z~!}^3B<6mnZ9B`mxRqy59vb$y^4%h?S8X$eu_J_}hQbJizi{{pG z;EQ!P1k1c`KMBSxvjsu9n!^y5X$fq~iE`Y${Z8zH(vOli)@Pr=!Yqc8qLc1AahvJ1 zMI0x=q1|x|k;6?_VV2CNrdu_)2(6Auj6>-8^MOI7f{dSRHyf_0gC2Km8v%{jcT@9c zC?AHevUV_F6#KXfR_Da`bqNhQtA;2=vx}**^V@L0*IzAu4Ak%%2|ih7b&eT5UYAZh zOO{ko;eE#5i2!Exz_5OQJ%V-E*o(f5nbz-hGAsnRmDjPKY-oEX*N3A4&j2!N{g>*- zFXOi4hPU!+Jx4`;qJ9- zFh1~+4Z%zBM>%*sa_B$Y3=t_l&E}YNmYocXdsmyRcrGV(X9&RZy zFtm|SdfSBN#pd)0Tb`MMQUKS&S3~i|A;dy>f(1k!Ai|A9z(nDFYBDwe z4rvH2@X_ICk+V}x?%NwET5*qC6AM)7?z!75tLXdK6W&?hrYmOBKi3|ZO3XWyUF<%! zPk(tmqoeaG5NoAB=ye{RS%=lYONAD2p>zk7F%`3*^%@FbZ719|_U%%M(;st0XilB! zdwZs#@0AU^R8{3xnadM4C$n`BJBPvcWe+?WBv2~V%u=)*tE}B>AX(5463fwrUy+zlieR~nAmac>{V?H!jLARq}0W#>pabt=X%`%ZI z255A~=LU=eXcl8{)wg!0yV!e|;-t0JrS@;=e42BR)lgljvAX=ABHSgyWc6m4^v!c7 ziko3tv$#3Tf*V*pa0)OHCJIGirA#?;$B-*6upRo3BMjQgzg-W%P+M^`-krzb`(#k0 zX<60QQtK^s1$N-jxsUFL3BrA_ac~~YRd^(VQe3A&y+~ax;&Y64j&sZyaTw~w`5`Nb z&ue$bnCRA)9v0_@Y5kgVUUL}~7bSC^gL0Ed%hP;KvKL}l^c3y8IXrgh-o&TijW4czA4-P)?J7&U5f$4EM}u-Y-2wMUYm7fPc=``%mGSS^ zuCl*9^x6ZD1t15cf|-|Av zKEsigk@5DJn*DvZ$&u=NIw{wgUbd{)4YAnlRA?uH@z9LGOl5)MQ{UJaa(Kb!F#i*| z3M+TsJ>U9+&8AqgAbaP`1ys;*3h)pw0-v1)CwU@(v7BCpB82i&%+YW=8;2IG~))!+1VRY?F2 zQCbwUO&@aus%Cngn~l_?o!I(Vyk*k8+^CPUN55NhOMO(R;H~$1o=$IS=6i0=1DFoF zKuLy~9EF3PU80l|0v&11W?h52l8=yx@c2_YOnsyu~_@E zA;Q5x&9-*Ck7FrZmUh}|*ygSzIIp=8@(N-HLz*i8bmHl5nc76|`rbv6AEU1L%1<8S zw~J3~1||tNiM!)J(Cw)rC+1UQFn<{Z(1G-?gT>T+UNEPRdCDYDrh2Rx{_a)^}-v53nh zE7-dI3PX&}-;5Gg6Ha^se+Q62@GhW*f3;;&ERtlV>{X?m)2baW{t!$rCtj_u9}`Mhx{+H31x$kLh`!YgsaumFPU{yVS)4 zzP$f;*X2n@26yiNwIaqNxhT}=_<_YelHF0PwI8wQVSzj9V)UtrH5hZ{ekgQ^Izzx=zS-L!I z)TB0y&u>^#C^&ovjA>LKs|US1K>-xzc0psP-uaWv{QQ=@S6&J`=%*77G<{OM5N=?? zhNkx*qLRVZFAiSPgnaTgn}Z1q)>M@dV>Ts7oOMN$;w?7?AGrf;{u@Ci99E2dP7GNM zvP%H;67GeFMQ^_d6DUSD-)uHNFRAW(#pDuE-YQpWC#*i$i5LmKc+n3`Pn@$LGzM(L z?e&L~31`2(xbwc^+W1v9dn*4+ZqTgJ12gQv5)`1Cd=4Sts~X3polNd~qq!5v{#;w4 z!4kK`o>R6TP?}! z)o?7?bXgV7*p6tISABwQ3m7q^#SUEDWtHwV@uTngMi8bSC#oE}{ou%5E;DU*KmdFS zJ)T%^4%oWy4p~dgN7^Rl+^D;IOF)G~lpW2LJxb`MI^_wbsgyvmV_s?3fKQafn=2z1 zRT=L(N}y)i`MDjOg*?a9ad0m#kmVnfgdl@j7SC(qjK~Cm__QHU_rB8FaW6+K zzeC#n>@{*z*Up^+qta}2Aa0|afk&X!HD9aF%=*%6fmqC*7#&4f-ENy$3ZAXqqTsnxB0s*;U4e5hT|dGe1OypTKW z;^%~VC-16$%U&@wmwlrn6s$NUu=00(DQzNQ;xii*kuE-Nl83$peAV7&lrYN3+bKG9 zSM7cdi`OtST0{bt2W~bFFci1HgJ=hE=w?+6ZLkK&}pLuB74P5Y%wTR;PKOt6iHF$>u2u7N^-Az-yzrnq*9G3J( z9T(rwD&g1XxwRDE>uucuQ+vH=?6E2O{1!ahw`*vLC8$hofP?jY6~=|!e@UPLE(A&7 zDOxgz3f&n)3O`hK7G5*j@)GpY+w&`|qVBl+iv|`IiJX}K2zJ|vgQR+s!)&+2153}n zWBYpgyiOAtSUnvAT{Tt~{J;w5&2*1dhHB{UtnLQotlcsJ45c)e1J=i%%}e+bqim$o zyJc6NCc{|*;4F;_?Yv1N#E0j!B6j4Ys}~^$&zwIrNoLA>3vSzQN(uLP!h(7Zz!Idj z7&0Z`z5RNUu7ZntyQUy~2#e3k$bZHBnnPU)Q9g+KVr+;S>>dY|^(Z4ulbWgMWECiU zdX=yrR;qEdkvUwEap3f%++tE|be}TzC`ekf7z%W0^v4Evi_|6rA7f8`45v_;H|;%9 zw0br5+vWNKTZ&55z5|8Nrw0ro+4&}2aPsVP$#2vP|CY`DHqQD}+y!UD5WizLECq1P z6qX=}Oe*Tp&@OtjPb`BW`lnwX9*nD&jxLduL5xLx81?m8Fa8h&{T<3x`x}QH+kQF_G)wxNS?+5l#+CKQ)go%O8Hb!BmF{{67WXuIi>EOF%by3$? znvv3nxqyJQa-!Z|q~ye40gmAN!9V*spUc9{rq7%Ay9;>yrT1(xeaLY==lbItbL9Z!;(@lL=tRs|E~fyWNG-5O)ifmwayX*JdM1pU)AZ` z9vCWq8U9YkY;bMaxS{?S^E6aLks)T{7DR*qF^Q6qHnCuiw>OrFaQ~`aO-`2@eQt4H z&`kaCS!h4yaVwl3kV}vGj!XAxp%iE#<*fGX0mJvpqA^JaoX($_*TG$3JO-6}GqBa) z#!UbCRaYQM5g0d8ArAs%WW`GHC1F{@;Do_l!M>y3uzlq71LW)o*Zd)-9dwXx&fsO{ zl!xsTip&=5soUkL^X2Z0#L07o5vY9E!3$nQAr2pwejSu{A#b>4Lg3JthGvCgjKr)j z^4%%J`vPAQBX8MpsV{T?=APlCRP3LICU+89xj5Od&N}e^e&u^QdjL_3(;3-2HDOkq z<|P@grQuPUbISUJo2{Sl-Zax&{+CYKoKfMZKxQVxFA|3NCJa--&0T@$q|}(T91%tf z-EQS*Ro3)k`*np~9!~@-HlC7lh99%|2Fb@oU?PwStq^i6^rv z!7Ypj7BaH;Q`3vv zYu~pCz?^vEZ}*)4klgOyEf3wYAJcExJ-E^Xc5475r*qc=pabaKezI-etSy;#N30!R z)n}|?nt(OdWYZ!jTw$nKR`%jt%Y@Ln=PXwF1`&Q|8GZ#MvryR}FUubjpcS1{7K#HU zUzKJAC#&DsF|!b|XNmjgn=}=Enj8tEj*W#~s$87}{Wh2iPAeUB1d1Orp&!OHg|dVh z#?Tb02;LSh%24aq6o}2|$>!MWcX<6Z@q`lp{uQnH3v?I2#R8m~ zsPoV|t(BS6AcBYZTHFN@VspBXlUH%%nD5Kk;80T*u*#WD3Y{e z%tx5ZI|4>%NQ!1Mv88b1(CUmqVZc;;Ss8k~HYgDBwd+^+Sv3MG>?hkhl~ycMxw`0e zzN2oCAHCa|_U+EiWW}Pqkx6tBs}n&viGTs~M`!ys`z;I=Y;VIh^R&Q-H%RiCZ#$zs z+v?vx>iycnGWS29Vxg_y-so8b zk5QQ`Y8&APSGM+-)#v>@UM3u1ZW*RdMfGklkyFWqges4 z#4N)2<#1t|Tv+0a{XnQ6dYZa5_R$MuY=n$WbX$gm#Q2_=XEB%R)%OVF9~)dgkO?~^s}v*6V4U&1Z#yn+JSKJ0ZGJhr=vj_Sdw9qN@;lQ?ZOdO(HmTO^{|+5CMT z6lWXlLSc?d(wx*AWDa6^D@OK1E*_;&Yr2RMe+?^tOAU4CJcE0c zpCT8AqPzmzaC`W-1I)|^tGvSuDiR>*Q0$zQCihAMMWB4pq-yI6zd_h_d7XBH za{Ks->5Xf1Ro6ASwunEF)3#T86-(Z)o+c^CxJ%dw0)GMlZQ#Ex5owFZQpZR zd-{g;X~UCg5@FATIywVbg6?25(V01AR{5CfxNMsp*`!JM5a%WKK_NzA zXbiOl%g@AZiX-s%MY%{ z*(@U)_hiHcCv-5Ccyegs16VWRpllTNgAq=RX-Xui%o|eYP9{G{R9DIGl&XSoM9x z^E3u1eVmdFrp=^|XMrSL15S+1)*G5Io}yLXxSiwCEhdVJAGV;7DhQJ zT)30&m?=)+54t{eN3ZKp7)N9q`o+luFm(t{lK^B0`PcSrC|Qbm>Tg*p{}S6o>;(JY zC7%DxzqyG*3|WxX{)t{{$T|oh-nCBn-IB(ozjPgKFb-fu7Xck>7G~E0N$nswSaNw zLlraZkDu8X;7pJ95O-klgisEiIPcN=M2Idlt55GoM1kG-&wAZID!Bjf{=fU||BzVx z->o12UmT0CbP#Um@ab#a8GuW#2?_d?K`qBhBMkanZk`=E5%u-dIbups!^PINJE}aV zY$oaj6MQP(y}czXo6=8qhcTZvutWAFyL^gzGmEch-_llJ;jy-eXCr41OFX?U2dM{@ zj=bR+J+@6`3)aYVu(an;y35wI4hVF&5TF#?)-(HMlDD3Dh_4`S!_Ny1g_71=8vw`bK9LC&0?qR7x5QJ$D@Wj`a0^?mnKBN3}Q zxzrRxO+lj>x2hIG9C{06ORP5`VVf5ua4ik;y}OsM29FtVR+Lw!Dpj_LT{tPJ_(YXM zxmIn@^^VMp%^NVa`gyQqt(&o67nne790>&~aInU@SUG#uX#>A|wvdBGweLq?4nc1V zm*)US^9b0+t6W(fFd+uOM>y9Zx!1GqFyQd%VN>S&lTG;COy77_T%Do$w7-@gDc0cT z;|$@?X;#~|YP){e{-BC3gK0VoEGhxj)sYf9|D7&_5{E{}FagknwDbkdecQK}VT;OX zlhrj@K9>BMk^+jU(G}<7PrS*snnV0i=v{@>!DR?CAJ>_I3{Aw%+kH1>^~Q&S7}S{; z@U+5d>9;3eC;S+=5T)G|io09PHhrj+2G|lMeCV-kHfVnY@QU0RD2IzfaV_}!S?IKh z<5H%l=A4gMXIp%q=g+-R8+7()+P&ILg(7qkYGZ@`5(IQt0HoYJH;)hm7)J7k=+AHc zKw5vitalmqlvOfFglr-CPc_+Dkr+>L+e?=g98lDmqSrvxAKH> z`# zaqo;wOX@$j04D;_N6vZdYy_4TK`~NTHxzaT_$tSDoXpfc%>o6T8ysnao9w^P)gAC( zxDmEp-T{iHH33t?ZTIwcLzu>a%x61YwYgXZDo?VqZ$COkd)h6>Cv{GnM+`*0mr(Br6_pKz^(vz-!pXX0rv;So*9XAnr>{ z&c3`Xlz4BvpAs%LK5&CAiKFrWk}T^{030-BKvd01YOs|Hop8^k3bilq+xhv!R8ok9 z$i{6Ge}fW#%P`knC!^;&(d*D0>j?8Ot2*l&Wu0+rFJ^9Bi^=IQ%}v%!17XfI1~fp9 z7>1O5Uiovv7R(Ti-@?~{Yjk~Pwl=`yWu1B&0B{+#AIz32L)}-PIGMhmVS?DX{hlpp zZqN4?ul@JVKI(D}3J>hX+IYXH*EdG1|hZH^tKIM?h&dh0#2x>}uv-HrFJ2G(5jZHD_qoc#p>9bOUxWC8IP z!_(%wVFL5E?mh!leCMO{33>{3N8h#`Njj*qGi&wri2~#8HaEXIE>B-S8k9u1l-dvi zoLDjQN-t2ZfG;cX2IWq9fK9UaJS?Yt+;Pl*Yu}T-oB@sZsqrhd_=aCe1szGpVF0*5 z%yu$Yh5U+V&E>wuCG6vg`SBoB;{oo0Qu_|hcclgcvf5vH=G;P?F_22hyviGJPuk@r z(N1Ww9)E^X5oDlYx#X11Nk7&*icgID<~%2$dbegJI@aVSDo-4=2A-Q(Zxu>_QLYNL zVB+Btmot_(`_A>eZR>lw+~xIhR^%X`Vd37uc&6Q#^n1Pq+_Cu!xoV-M_Y$#d-`_vD za&Iy)(?4!)gKAIaZVrep9kQo7wPtz6k_SNNzg6;UH>)d}CCVxtQ84Y|4H=l4rp;=x z6FGHZ2M7o*Q(E2in9u4EFCY`!Y(!a0V4<5*X@}Oj!uY`(ILa#(ambLq6Lk_`;k95q z-i$I_nH8pbuc7O%)s&$@7SFVChZW5vV(Zw%hY!|4Jm0?aD5c369^S4Pd}@Dy1TvbO z+PVb#*0!6up&_d-jmEa|tbQo5^?ju>R?E`;tPZ8}wP^R`B z(WPCNRD4S;9ioi4t2ZjiA#*kCWX7U7(BItonKi6EtZsK>$hP1Y7cv!P_R`8A)w38s``V!Pf@0)i z>y1cZX_p&Eu75Atd>3RZy^+e|v8Ai9zM_r57zMi`JKD1adP9mlTJbNjJ`rKBo%83D zx7bnNUb!srz5R{R5xiddw;%Z=@3y0-LTxjBL+ouA@)ORyn7_9K!FqsQEa2$LAj7-b z8U(&+RJf_1xZ_n(SN7m?l=l2)$ z22z?vvyd{ZBTr$hIxxhm&VvrFRgu_9n;wwkw1fJDMNx=8K-Wlrmt6fP)FS+!ksYEy z5VqSNWS{OJRGQ}caOKCJp1B1V!u-{t3C#yKu=j-JXlxYBUIa4(8hn(D|3AE+zvlq` z`QPeYpxiN;e-4m_?&kn4)qxT?*f;Jy08&8rQ<&*PSsdNqB?mAMRT%Q%y7nBW2r=&! zfvDSVvkkNJ3=<7Ld6uwzpTGu~z-qnN)up#i%Uch?e*U}dO8zGYqxUUKp`$E2mH^n; z@-RfeZ=(|CE2w3}0I(y&`SrELu8b>F?32u80OCASQ$A2EJ!@-rXzWtbNGE?WQBQl^ zd|HjeDg6m)%1JQqxZ9W<2=Q+(Y2sE3EzqD)-?GsFm;*iUSv>?O0K~WVz}v*Fpg=5{ z=fTXwQpT~M>Bit^%;k&NsSz`+qEm}#EF%OmBogonL~KF>2kNlMQ@(cNGvlckTIlk3 zy4mDql_5fNqapYElH^|+wV_17jM0iQ&S`|<9VVVZwZt94Ms+3y-cBF3K22~rcZB!y zx%A*nT~nmxiuuWM?H!#c5cCTDfUW~}AC7K?A=P78ws|aACuNG2oJvap7Y#U1j7;Yi z?&PHXV*6GRaRF(967Uh~Fr(>Cm?lGWDRd9!y?G*?A5u6Kzz!i=`g{Zm>MN>7j-Bw~ z=e0aDnN*VwGfn!QD1uYW+7SBd1z9oRk4arj8Q1~{$79!ivTbNq2A4^yc4Ye9uyto$ zzH;_bR?Q{{{W7aNB7G zWK*1d$n4P3kTTlQ{9r@tpSsYpSQ$A!)ux%r538%oLj?|kn>qP}GOMOkyeP@@<@-?j z5tLgURPcKCB}@gK=9zcytd+=JM;?vS?=@R9L+>sU{bVeHKy@r&KJX>F1y<%+F#r7p z6on9J^0|o#*ZN*5k<`v|ykC-0YR&b9C^t%Xn(F%NaHT>R!1tSB42eMy#7qO4j`tG75FlBff-!$t`Lk;0b<)$HJX`#nvODQi zg4bdN8gx-?#V*G$-GPJIB;_#8&pH97!k^b>$wTZFp%hGc976)vylFMRhi=4RgAyq# zjhnQJd<2r*jui=ZAkib>w!MoJb>43;`oI?Modd=xH<*F5&(PIeL2SVO=S8{Hv7$F^y)tc z`u<5L`=?e&V%_qQKq$$3>BZ&VIQvZyv{O=4vu7_h8 zvB6#I0AcI){k8FNL};M)UtEkC036-FwD#gSF5yhYIfuFXRY6m zw}z@3DJ0zacH3O}?l+fV7*@=QIK?neC3`Vhvb(fNJpso2} zGmr3E%YuJxSamAP^jJ?xV1URYu7?5Ggn63K@K3f|&L99$0fD+u-mf1m-Kb@tZ*)19 zWuG^<@0a(W*^^pWT?k}P4`cNaDsmX21?1w_;AA?TjgUiknCn!ev^cq#C>iJ63Sgx1 zj^!wYj`>sx8Xj}e;(Nbvs~=8U#mK;?I~oiyM2Spr2&|=-V45zVRgncp>L^`zuotzG z@Uc%{9ik07RvDuo?A1HG6F)sY*mmxm9ghO%$-iB^{8zVBz>bA>VHZN^u4r(%@2WsI zJQX-YG#5*f!E8PL`U+MIy&EwcMhlnAZ?B06<$B~L`%I~AVU#~9aQTJ*ewcaJ;ty;V zVi-6F2;{Rqg0n6Ug46ZUjR*Ght>;#9@{l_ zYQb-Ho~Tq|jEH<(;VtOwN!Oed#&wcVtUPajg94Jp(ojF#U8K~2t$S?lD$%_sOzz#b z=2t3_IR_Cbju?u|QrvpD9?ChIh-q_4y?sx@H$V>W&E;q8a>);SMSJmFUM2BqTPRT| zUBwT1|60FVVPEPRGuU{_aXa*ge%<0cS&xb}aU5LW+hFd^*yFB8K5G9mn% z2_cF~cSd(**)O=!b)hAi-@p>$SQWK6G4#dbkApW|Dq1}q57hY&4f)jz>L3!Z;0Ob> zr3xX2D3JTfGPA*t1v&iwSdtKhS?6q#1N?I_1=Gv;X3i7(3vZs?yt-4~!wbIJDrmpR zy`U6IKR5&gx?88_i+o-nkStZ<^2CUcag%L|BARPyd6O`8OlD8$!dOfv`T#1JSq%5@ z!mALuO($=*<@Xfe!(3 z&!ZRg#t$idxw_o`F=M{Pf8G5NSHoUP>Ou$+KAcIbhBN>R9e5+YtcTV~@H-8P_S=w| z2dRQ(o^QtSqqBQT+c$fNRWZ+DK8=sibNss_tNmz%6gc3OCV`fVKr9Q8v7&~p_?dAN z>eM`5BV)?4PF*fF{_5qpPv45}j&hy$VOJilZkBtYikE130I^d;ok3s7#FhqG#L&7k zMc_@D?JH#B*=VcEINNH_md$08cJVxpX9^=P<=giaZT(U|$55}Y8uWE;KjjcK zXqBHTTRVXD20e)p3wNkiBv_QW@*H+sgD^uBo7yrc0v#k*4pb}zc{`rFovCMK>f`7C zf~`>A<=Qm*!Z@oBA<+QzDync|4oi^=P~#MR`{e9>)dLs$Ock{5XpkhgL^z&~?ZtZp ze1Dv(y6B;>-Sw_^;qqWwK@`>(4ygz&^i#daDlPW0&N*#Mw=pt{3n8B-D3ck5h2G9z zwteq-sO){@?lGCN_w1*W`TuQt!;1`17I{0SP z#oe`VCThomXYKO2iJ0*K9XSr;H<3?A6EZ(_+0kg`O98-h47+jG$NXGCXUW>oMi(ub z`4F_H#=f7L|MVISdM7ep)H#st;~UufJgJiL; zVtXNQS)@pQkRirL>x8Hi*tnchp*EqRmR5ykC%CjJBHv}N6X|7{&TQpOCExUf+U1mO zu&FaUb?l7Z!tp|6Kv!F#eP%wa$Qf#BCX{`BO%#$vV(lWhTeFq>m*(SQi}n^EG-GUy5UZt6SjLYwI0w9c(pN?ct&F>eiU2}>-x5g0aA7OO zkSx%JLx?g?_n9k01AR@|>OH1MG;;gK)_9a3sPruqJv&o=mJx@4Jh^B2)|YL#8rC38 z1mlCj`?7e&^8pM9^h#ZF?hGADdopR)1VILhv$94wWvCZMZ>@Ox_;ZrR<%QhTso1n6`)|K<{7H%4OB4db*dtxs2e@& zE3!I+;+_awEld^Nt9#{!j;Euw{48g@t5r2uD(WCv*8l3@YCa;8B}l<7qyw0>$^$~oPQhE13HRykkwXLkrblb51<5T?95t5aiXiMjA(;ZV!Fk|{$Lj}U1= z9zLwqPLM#wm@86otyn37Z&u>w(^ypDHt9E-H%g2+l&ke*z4~9@v_<4r z@S69!zMNtl@uNtxk_j{r*ES1)L>uVU<41^EOd0F~pefy!0h#u3^uPuvRwG%#VSoG; z0-%7m08r2IT{6Lx4PK|(vUVFl;E=Oiq8|XW%>}ZwbgRFEgf z{`RT-Au!G{oS!fs_b*87H%tMJt{xDr5R@k3ZSYPW@yv z9~_4b&(%NxR1lUyZ3O^ADDxSH5{+Tm5;>rBaj%c;;D(uzYbdc63qpC z73W!PnDRlKnHJ5rzr1S)N{xz*XJ@tcvUYg~dE37!U9)xax@U-4%=Nr%s;ZK})cxZQ z=O`sCK`qLUJ7J)F+CGR@a&@u9|743qS&))F4tz66E%#Z+;%V0&okzcN-XkX?^*QII zLCaL{YsY<~8WB9%+!-1L@0mMKo;4S*ul`o2^>WrQF1SkK-Nz}Z4&5!j;SKj!n*Ywf zU(o*dV))X3Lp;A{mE&T9>WhDdkNQvlj;m8DW|Al8SMv|IqQ8fac?1Xk?o$6TyZKEU zf`k&YvHTJS>T?=3=V4^qbtiLkic-S10R2Lq(@(an>%8hJ&^RFHCr-w9EeKHfTHfh@ z#gZeoeBS4m)OW>^dTabi^%TA?+7--QG`iVb>SjFSFg3Hu(Xz+(i}K*rzK#=ymwvc& z?FV}LAu*>Zi*<(h3TxZBmq*`Sft7o;#Jj~3Wm+>hQgT*C@O8d!;B7X*5ig4}Q9Bqx zKZrc#n0^g*>FHwJr=-ainaG9g`VS|#=e~ys+#PN<>R%!=}W>2+oN1KZ?JgIy(J08U55Fe+1e}?CP>63|zWd8_i`xQKu9~_()=oPR^F(0 zbzP};>&=UTI*h$HFYza^fqf%70~ps!8Nxu16cbQs9ay820rT*qF@aT6uq^(IBis~- z0SEkEI0#I-;EpgR)o@Boo@^H@HSnYB_3B5E;}D1kbX-5 z-QNXayFmm@^MCOt&ZGQXRoECEY(+S`_Lk?%YV-jC6y)UIPfE_#9V_R(w%l< zudnCxD^8D7_j^q7(9+u3Mun=b2~BTFV2JdAkQg|2va00j%+;Ai4AH#g+I&C5M&Z$S zsED2)Zij2G6TX{|y#iQhFt}3>{fiwFh>_wS`<_0^XLV4NbFOrx-`%Zs-JsD(HkPcA z(g>?BcOuroDM#%gRma!-JxCmF5lS?Cx{PVX+Voz%l`6IKBKNKjjK+iz0z=qpVxR=A zRk1W;Le6hG%cwZJ<|}heJIE;E)4`-I++qh;TsQ$7JjO`8VMTD>bVSAkyp!F>8$nf&B z#~Ht<{hTB)bU)Z#Z&3E~Ls;!Xn0HS2X{mMb&N;L>MJ3i*k*ZvAq2STVcfQ9DCkRzL zMrvXM)1Cx>ENKB#AMK4H-gA1bP1TAHDGIV-F|sn_E7UX9*w}+sm^~@%#bhu^(Sbk* zB7JZayCpa=^}g29V#%c$!sC?kTTGb&j9=GKOG`!$UVay-Hy834a;|9V)1}e_r*-?- zvyGinkZ~+tP#EK^p}hH`Prw_J?(4eTRlJQXITm*R2|K;KT5B}eo!d;k9i<0;OA#z* zIPyTDH@0l*c2L3znZ(0E*Knqf9<3~7M@P1SYIGcim=01P1n6`4QIaThD^pOBxftzZ z_ys=q!Vf2Be1iM#Tn@#p3sM-2D^{w+GIJR+3WsV(<_8Hrt{!mpL&Z|$(tFq2_1cgP z6<`6Xft4U!E^$Z8+<5r2CuhlUu;0ZwnZUtM?<4AwU-d@v(J9k}clPsIq#jietu-5V3o6fW%YS2s zLPNku9RV+wasPhRRD-Ahpe4jBdbt8elGo-f-xoLC5_I0T4-Srm(#-$s7c+d0`70vGl% z;8fsGF$)+9UPOC2H&hs*FkP_Mlv(#O=Rocw?WLF z@&*E_WcmvNLb&N_`*A(2NL<6v43J8CPDCEdoXFtpJSdhl1H0!US| zl`vA)1QXC~&_ZcJuRUKBS7z}iE7#{fRUXs$$rf@InXzU2$3uJeU$kMn*nr@+6j%IK z>s{hozYn`MwCZcqrjBWWy~UQsE!5IY(0F5ml2;7Aet(r(%6!krmCeg}p5{+q%`AW3 zW>H;PV?CLA&7n#is17#=8pN3Q;Q_EAGOY$efHq=6M6UM%fr4ogOTrd=w+uRzYFmDN z9V&C7`UK16Qf|xEtFau*1QIvSek6}ZrpRI|a3ELkCh#%DdMoPJfto{>X6&9Pn~|oc zC~{>`(!{^NM2!1VeCi&zqcNU^n~(437wmR)W;tv1U5qHT30Z~b8LLx#Pl55PMjfPT zF+}4?h6{X@^p>oUrMA?h@JV@rp4HlD71}ZBC46>on37*lP32+XkQ1aw=PVZmB0T73 ztgk?AacU`!AQmjN0~Nx|1&3X{v_P{frrCl-qqODU7OqtY<2aT*WTksM-CLPh=^^{t z|A;-es`9M9jGIe+?K#Zd{@{hr#ptY)h!|t1Hm#9p7VkkSb^@J9--|jOM=WVxirEBT zrerm4?TSG zGjLUbHJd!%BkJCl>vC3q0d`VmVX^ZA>b=doc2}$09SiuR7Y-S`C!<51Y}`U@7pwd# zx4IjS9A3MZ!14wytmDD$IRY1~xcLkDDDNJY9*~?Z?){D}i`wisSoUqPqNCO~)AQbp za{OUWe&14dS>ami?X{wHMG*p&!z|t^vY!cyhx7(bAjp^dMp(6*3(y@CU!NV~;FrYr zSIo|I7ju!eXH8h&j?pd5@MORCeAcsn_lCpqmwBFJ3q6`qDasn|1CBMDQ}n|OLPZ#sp;hQN9w)}+iS7? zrdthTH6)LfmR3i;@b%U@uQRqm$y>0Is+{0{RL!1>w;vLMQJS$_I>4dEFb=yKC7m_o z?T{CuIJS^a58qjE=CfnPgVdrqsya(h2@oQzP~db2p9HYPndN4zpYQMgqpT71l5to= zJjzEzxyLO{{h8$1@h1iiA`fN`6hOcZ{cnGQ{!OQ8@yy?QQ`~%uQ`PUx)hVd#RMm_*-A79%;5}f%EC||;~?2@OZ$xz6DG+=$5qsV*jtQ@ zh^fuD%z_di+^-xG5JZ|x>vYp>ChSw;g5_(Eck5lBNY>ps68N;Xm@5!>bh&FgC}9J1 z`hon8gmV>eyb6iT!y`Zo*nVy_`x+Og?tXu4%M-L1`{rTb-+_!~@vVS4^rLh`So-6U+yJ0hq5Q;nj(R99a#`P%oxk`Ww0Gwa#Y@ zAmK(QuuuzF;uH*0GA|1N?hyK^;>bIT592TP*6F-*t&QP7`%0MY(8B&EBbKuSh7#yC zpIL&_gJ2{?d%$*{$z=~;p)KPdOKsc5)O82`AG4$Q>PBF_~pA#QF*o z*k|^BAvpvleLM)&EYwMVyl<=34#a)><|Q0C03>VYkoRDm4BqZW6_mQVaWH+V4SeOov7Cz-VjQ+YGQQ{wTAx|V+HbGe zyOCb`=#6p)&)5^ooj=(eb=t)iXgKjZ6QBfXxZbk<;q*OV%Td96p-Fmkn*%Z@u3=Wo=G{GHxIDv!D(lO7 zt*<3LZrqk#Yx*8GPL!Q`R^W}H_%1PoamP}4DlqJGSs!Dr5w21lOHEsnJnIrazO$m6 zSOLRs!WDuvOTc@}jX{*l^I^V=N5&%dJh>tGeQ2A6!jH*OEB&JaAJb9bW9Zl^9wTV(c1K{c#F#H}3p?4TmA&|&i?_@=&VeNktP%}SW@-lu86lK? z$9FHf`er2K>`8b3TYG072xY%FeBDT7N!holtcC2m3@s$pv?yg4LSjyw5q^_vt>*d!P3`?|I(y+<#c+=W@+8zwNvH zzTeL`lKk2VMBsoG8rsV)2wYNHAzQc|_Z~EI-W}(}7OT68we7`>Ct*ltOWe=71SvNm z7tc3BQGU?56QidcgU4Jxk8CxSN*s9YbBGQEk|T^WFpa^B4b9-}W3>_K3RYF<`k=xrF09mEUxyYWGz;E7P64kqKSoo=SiH; zMXi{BObYzkSEu)UWIxPU1%IC>OM|MEL)!=05Qfh=WTB8|rFR}y$Bw=ix?jrV!ja{| z#M4xRs)J`WsBp#9xKK;w7)e_oZ#IzmM`f9pXNF*hpa&Bws_L2K10>B-nSN`ocJG?0 zon8;R39Symg~3Y-KNEu?@X<5Ty6!uVQ}-Uz-=%Lwx_-*HnUkx+wVXS8ErSUl^-cF3=DM7Z;dkrL(=C+XPXxWi{YV;XHR zo@v8zN==zBd|GuTk8U-tuj^AdCi^l(K1@jbUV9#T9he1oo`LlNtg+yRt7X!FQEP8w zqPo+3&H&z5)Gxy>dao?S{g!ATs>^8%Jp?L`plbzyGKp?u@b zj+BdNSbRmX{YYm4Ip)Co*@l61(rnBk)RjWutorcK;hl4qU+A+d%4p20Pc*++0B+y< zYSbGr?eb_QVy^43GGusn(#53k7FF@CxTLS9VHU$*HpzB9U3^<;D;EP;z?Ba*3bl*% zO~^~*l(6?%Z#5qg3Jcp~!bw@_j>hNuq|GO@o_gRMKrI0i+nvC7jG*3d6*<(cr80}- zE}v~Voxs!aK^+m_!GSPT{Y<9wfREqpWJfYXLJ9%}{?gM?VCQ`z^7+Yw^MWTyGHC^O znM(|7<4p~+c-kux!5p!tmui^Jz*>H0s)}nf^>7%Y)(R^sIO&DMboa2a@YIDz--#Yv z2}LIVJ8Y)IXvh8DQhihmcV|cD}Fb>A^5soMo{VO|*cjc(vzU zhBDMh#@Zwh?j)FviGFqt4&p!F%I9{c-0@5ItX&TmAgiBq2iwIorcuq(RGKWE5>wIC z$!{<+>2ZC--epffZ3m# zV;W^vK_EGxfpB>4&V&-Iv`(d;9-PhN+d@XifX_Nb52`zOV=z^=5y@(GeE!8t*V~Vu zHMVbJ9c)W;(Qoa_WP(Zo8f3m%0DauoYeVou4r5Kx|a=%_>@ zVMO(#ydtyImM3}zc!75DK-};wC%`Ff(5cqq zgByr|B>T^pPfO!VbUmxVrPwq;I-kdEe$DJS%?d2ezvSKjYv}2zaJ)Pvn_ltF5y<+S zgbk-{Ck0PihntWr?%97(Hzz9yL=+Ec^zWWSR&$>A_2+kt=zp^!VTkx zs{jqiDnCHGU*C~jH4$?^?IcOykw26BrJTFY)l{2VR>{hsrNh_bwUG)hPG(Jh;lV4l zGdQ7h;0+fbwFyLoUwUOo{#BrpdeaZQUq2o1QJhz&bw)eF`Tlcc1GTz)ze-1mKP3*z z8IKzR_zqK23V^pYJ|DKCsX%v0i(s+&nMBCO!(S?R?{kTy$Y@<$&arx!gHJ%>FPxZ? znY5iQyNhsoo%yLgWni$6A$l(4QG43x{FkY~aDYnY;3rG0Q$jw#oj{I!C z);+D8)7P)a@@ryIyURPWkuRqgE5N#y9eL9eEGnfK&_zPDi137hdeT6UOh@XU^WU15 zKRd=>RIkR3(8RvLa!y&AtuD(urX(Sdq>dM?fp1H?^L3QfQrIA_yV5yDy_ z`r?Jqu4dwdn@0|hkB`{x7lE-XQsSk7g>MrA?9089SUfvw)c4JOS-g4pd`ZUgaLF?_ z#u4*piaQ6o5wGZ<>4`W`q9`Q?asx^PUKPPp6^h<9lcFVggltfJ^9z>?;w9Vreu0q* zL>WFfS5iN;QkMe)2f>XN7`G80g7h@;4v9*+`q7)`&IN&&bC}^2KVn4&&O6U2QJPpE z?ck#FJV({IudpMOw~W>OJ&kR0EE-ekL)%#}DY&3AAuglDLkoaz5~}OOC%CvZtWXkO zTjzB=Xm!{`jO){8d-m!MrahJ$RTNpErnoG{VoG6mmw=n~aGL>l6_An0TvSN2ZbntM zTeFG3_Jx6ofnx`XS>hW)uf_So_nbbp&~LK+xc+j|Z8s~6ymCLE8OlPGw{te4AZ6{z zW-IkPrtx&W0Qy}fH%R?)sO(vy*$^<%BE^-NZUzY=?yvR$ochD5*a7V+|8TuSzQ@fM z4_=pla}7QY+^6MALBQI_Fa;>ni|V)7256GvTII<(sGD|-&s85~L=p?tPmki8Ip~i& zw=%68a@0gSsGcxsvd~CQ zY{}R`&t|Zo>cPZzS4n9`%ag>@pvVD#b`4Rlb|qai=S}sLY;nm%R;`J zi+!0|=kq8L{u{Y3Sd-fAo|aF%GdznDmuHJS8?Hd`x5X1N?<$PnlW)T_ctUJ7$Z@ot zf|H2WrMmz+ST6{@ZMEa9-vd>yw=smtS|R0C1KCv`$K@Ekj=p@?B-l(ZOAE(!zZ$YM{9SJjg#Y5$}-w85|zu$!0CvpFkS-6N$`h$fen`+~5?}Ios{B_JGw)x3a0u zU0A2=?y1LI!8lj`N2kmuXH6~ik?J-%Zt98X<0HD)N{%Y za~|+_e#;0}eNNlC3iq$mcs5%j+Z+hLG~l_nysWJ6a;ooFFR?R6#f%<3O51YY@Qp0f z7F#Q-0^?)mPZ$rSkjhLL<_gid&WIp>Xm0UC-)YS3n7PU`g1SDovK(K(8)_U ziF3WAnX^0W1br5iKElxi*PHD)Zz(a+B6;e|a~zP&TLoK`AYE9?)-S~dFmyMb<;cQaq_^S~U-KOX9zAM%8rROO z7UD?Yl%cUFwJqnX9fYOa?)6Ak*y1cu>s@2$A5okr=ELQ*|xx z4x{YnbD}-lhdUQjUpg+)&bNg+e-yptB$$HFfr6anZl;II1W>#C31`YNS5S^8OOKv# zTwJ;jR3lgA%?oQdV8T<9`txFF9#mw&)w+xKLj|-fJ>o~b5->ee9g{&O67^ZIO0qBfqnV;IVndUt!l~>#Z`EZ?@tz(k$YqB3Pd}~>+4)gPK7t{upAXpab`PqXTxA2w}%D22mJXRxQUGZfv*S6fJ*DVhu0T3 zldTY?I%aeMU@8LZ820*OLZfvnpCG)U7;n?4B{<{0N* z;vRgEqwCO)h6XXfc3&0+$%-M5K^I2)U71;^SG1WsE0uA^49@9ZqN?5DSnynx+;~A} zQp`dEK0ZAL$#CbVsG{}+_)eHtVHCMj2#SY{)W&x_do9_hBg9jQ3oOU{gn~8q7U^z{ zalCwQ^m6g*IxnWtto_jvx`P(L;)T(Uc#}Xkp4D{;#kgSc%q^Vp?AEiRtUlY+s#p$o zYKh&fLP}-;S6KKO?eKue2PGCp3$EOTT(j|Du}*?=ePQ^O@`Lom_&zD_hYRwXV&J$k z(*^WH6MD?Dvk)anVS}FBI3O%AOg**PZ1(=xxvvpgLSEgYLgOw>Q?^J*u8Jm*(<0y_ z2v83}YugUd;(umxLxBp2cdCMT`0Woob6|Z1j6KlWMULLg3oN zucgyZ^L3M%Y~x42gR$Iv9PKAkogZKc`Y>MK42gu%_VWbSizQq(d%b>dO*quZh;X-V zXW##wvKi;B{Q-AWbmyVs%6b#(j+4 zt+y?g*LhrMxn;Hc5o+XMsX5`>9Kkd?uPMnv;gq@hs*wX5VQJ&(dv^=Ueu;Z`X`3iY zwCn>e5FnTuPns~;HK=Il&Mhe>TqHW}xHUHZL8j$ddA*i!Auci)&z zHjC-WFg7N;v|EZz4vDWyr`g zgp@5dHVQBKCz)rCBQ{KQSIP?{p4-BkbD~I?B0f3Y_8(+S%38m{7stHM4dlyA{+sXeKJOu z=pIR4qTSiWsBNXq7~cLqxA8wuV}9>XG!y9IzsK#Taf6Qidx1rKpxpLfgUGm%AeQ(3 z`$r5~<|fyz2VHX@h*G!@K9&g9T}eXP46r(WSbCIsV;%H1omV1}s_@x#xg7?`{+^D1 zBK!M0k>n|t>u{8LdSv}B@VJKXD_!mscL*mv@*p|4XXMgV%Uc;IDUY67DktCm4WT_U(S3s#)@-61lg5^h2JFLf+V*01_(O(?@s-yDAFmfq93B|cH%1M zU8AOCS=VuUFB_Q@QW_yFf($cPR^E1nEKeuH!U_bl zn8g&5rs@JpIRZ$%JPFt|+nP;&-EjDgibv%X4z|o1onL;8?p6+Ikx~#)jyJg$K^jP7 zi$*T=A!-J4Dv#7>45&SbG!5dOc_n&ySG1R{;$-(HC5jZ3L*C91Xm5!Nt=~BqaLj4N z?)2*q9$!;0F*UgP$Vl(r5~r*wzXFPi39B)^TQ-5k6Co^?l3frysC--D#E3 zfzFY-C%oHYe8_@^F9xMaM4DfI^7;#fw)YfRwZ2~Xo4pi=4r~YRm&5U^(J7$v@YCJ` zb7l|ycBUiceIyNt@-Xlf5(!W(U(iArf{^Rqw76mHFY96Fr{LJbrBoNYSXkNT_%0wQ zxM+4eULa-1abfY4mD1RP8-@nqEMk5Z>KG6jr=5T13!JDz>fHTV5W)H-$u9`?jo!bA zgJc1{`9?EkZ)TevKSK%{piUID0(gIl@(2psrklimb#U*{Z6go&Yj;~KisYM)h}Xa3 zD4cb^bl&r4HH?kD`sCiTs=`A^9u9r#D{&i)G;%R)5(Ws=1PxQNL+C0-tA?D zdyz>*PLAEnEK9Ogr9aVUo8SqrvA7$cRxs8*0pDd~)h+RCN0p*Rr;u~9rvO~#U69JC zZialGgUdqTOFTuD^33c$T@kAaRg=>Qhz0~Jt_2Z|U&{6uYOE>Hasirg0WmP%XO0vX?7xvUU=o$9R??($6yCNB|t4_4AnQG6qDyh z=)Lti>yw*ni|JF_&M_&04)Y*{f-#?$wF%LT>UW8@4NA#nbi=mka~0AqL3u?N>z~#O z_c#S_E$i#se*o+0X{qP$YHRcou5#yQQ|yB-F%1(ZAdnL1TnCG;p@VwCoPLv8nH)tu zH3sE0*zO_4R*+WON4F*j0oVxV{Jj{Br`2bB%=lYaiHm#UjfSO88MPN^01zu~yaJpu z3D*mnlJzKd5bq@R-oWO?S01jPqO7buw5{|cW$`ZFW)V$yZ?bF|IrGv_%=)@_n%5cp zNO8%i?HyuPf(wxjN|P@DzL)UK&74UM9{aL zGxP(fi(@=1EI<-{#C_RUzvXA9m*?{^vtt*rQye(#7C`*W@V1HWlm)Z0oev zX8K^WsLqqfo)Cq2BjonZtex97_{&yl@)Vr;hw`qQCtme>7orvy7AIU?f}MH9;$PuA zJ5l>hSo|zH$I1Xvk}8Mpz=xGRL%(NmSCIPLdy53!m^;Y6`&((RCgUGT9_mVX$|uIT z%0F^=m(aOL|2FHlRVVc;mD4qcE4wak4338rEvS;9C}x=nG?edNb2dPyhqkq{ik&5f z@4AVj3&l+#S#wymqbO`-Z~McpyVeQv8>>+3IIuT$!AMyfh`2K3A}_A#ti6_D4B%%x zmI<}}`N^LhnYYa~87*vJXTE;x5=CIWO!hN?52C5N4l>C~KQ>yDO}=-0=r!K)TJcpO z!}qVoA3`+uIe-)?8w7AFHNtYhKB$jCwZSw;cp-1q1dQK1QW>tC{@h{_R-?_vpOlA_+b;RH#_63yDN2I_#$mLJw2|y8?i zTh1YY^n*ROd#XCPhxb*rC1&zXX=FQ2sA6sZOIbDU{!tanqi_O(fb z3)D6@7%afP26`|FJnvjXsN2heRb}N?{&F(O6QAFRKNVZ_4S3mb-3p8R=4}buRt8KK zrPYVdqVW|t-hC2ni^u6Q^v4*-xS;~d@xBT!I0a2gF9?q{yp1 zk%^UaOm5uX!k>f~hd058-V#>rIO;iEZ-YwitGiBKPrfj;+>*joZ#p84%=@I4?um1j{Qz-3})_o+89O2u0bSk$gA^vA&@ znkWP#Wl{o2Zje98@o*)H66v{!?aF()7hZ0A?8>7J*_7J~?jH*|Po6(pG^dz$Aj#K0 z=ac-8;|c#W&~TE$E>4zZ;AqP5gEtyD>UgNF@bepYe=EpP%(2l@8!(+jF?4~7{=u2{ z?z`XZcE44(#rZdg&s$r6qXPMVo&5AyTlN>%zQO{>VFxdKzRvD_t>rJw@*U#E-#seW zR}KG>FIW(qbYqN`vvm@L+pu9~@uRqaTVoq+R)8_4&Nzf)#{J#;drUUp3+J?tAmvXj zAA^%+gxE;`5`2`u`Ay&k{Tk``fAadDhJ*aaVOrQ}o^KIiC`{u?oI`hvtsWUk+lgzH zU>`IPp&c8LA@dIg8|*%MyD-VH5QjWC!s+Kv78g}Xz{sFwttSaPvtGIlf4kX9)iWO0!?e3 z_?d})wkAJuyvxKK9N%h~xyinKk>F&OcCR?tx0~-sxObEva{o4)w~w7H$Y;RN#)1(& ziyp?j3~#dIMfot;<8tf*j399`TdU%sSEuZFf&9_;f3Xf$)p=#q6Bm3)1EBcEin#ew_ zyPK6x#5GmJAqxXekPR>WGgEyU&F$ViV-Os99Ne|S7g#dL@RR*24|37&0zm7=vK@66 z*B1B7>p6el^{&iVstuO5igc;J7{`Vk2_u`y1x` z;;oqec=SJd{oi21cxC9lstV-)NjGz>wI|Lp@fVXE0a-|Vzq}U={#f2W+{ymfnf{ju z_+wN3-xGcRttL!$q4lu1Tda=JU=XQr`}42`@z)ZykD>A~S! zZyor8_&a2u0EK?mH9PV!{N1h}vd$Se%-@31vv_{~e?|-aVRg{&tiS&$fB(IR`l0^- DLO^x_ literal 0 HcmV?d00001 diff --git a/docs/source/automatic_prefix_caching/details.md b/docs/source/automatic_prefix_caching/details.md index 2d3214e28ed93..17f806217aa65 100644 --- a/docs/source/automatic_prefix_caching/details.md +++ b/docs/source/automatic_prefix_caching/details.md @@ -25,7 +25,7 @@ With this mapping, we can add another indirection in vLLM’s KV cache managemen This design achieves automatic prefix caching without the need of maintaining a tree structure among the KV blocks. More specifically, all of the blocks are independent of each other and can be allocated and freed by itself, which enables us to manages the KV cache as ordinary caches in operating system. -# Generalized Caching Policy +## Generalized Caching Policy Keeping all the KV blocks in a hash table enables vLLM to cache KV blocks from earlier requests to save memory and accelerate the computation of future requests. For example, if a new request shares the system prompt with the previous request, the KV cache of the shared prompt can directly be used for the new request without recomputation. However, the total KV cache space is limited and we have to decide which KV blocks to keep or evict when the cache is full. diff --git a/docs/source/conf.py b/docs/source/conf.py index 96ad9a4c26b09..e9d9ac68c9560 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -10,11 +10,13 @@ # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. +import inspect import logging import os import sys from typing import List +import requests from sphinx.ext import autodoc logger = logging.getLogger(__name__) @@ -34,6 +36,7 @@ extensions = [ "sphinx.ext.napoleon", "sphinx.ext.viewcode", + "sphinx.ext.linkcode", "sphinx.ext.intersphinx", "sphinx_copybutton", "sphinx.ext.autodoc", @@ -94,6 +97,69 @@ def setup(app): generate_examples() +_cached_base: str = "" +_cached_branch: str = "" + + +def get_repo_base_and_branch(pr_number): + global _cached_base, _cached_branch + if _cached_base and _cached_branch: + return _cached_base, _cached_branch + + url = f"https://api.github.com/repos/vllm-project/vllm/pulls/{pr_number}" + response = requests.get(url) + if response.status_code == 200: + data = response.json() + _cached_base = data['head']['repo']['full_name'] + _cached_branch = data['head']['ref'] + return _cached_base, _cached_branch + else: + logger.error("Failed to fetch PR details: %s", response) + return None, None + + +def linkcode_resolve(domain, info): + if domain != 'py': + return None + if not info['module']: + return None + filename = info['module'].replace('.', '/') + module = info['module'] + + # try to determine the correct file and line number to link to + obj = sys.modules[module] + + # get as specific as we can + lineno: int = 0 + filename: str = "" + try: + for part in info['fullname'].split('.'): + obj = getattr(obj, part) + + if not (inspect.isclass(obj) or inspect.isfunction(obj) + or inspect.ismethod(obj)): + obj = obj.__class__ # Get the class of the instance + + lineno = inspect.getsourcelines(obj)[1] + filename = (inspect.getsourcefile(obj) + or f"{filename}.py").split("vllm/", 1)[1] + except Exception: + # For some things, like a class member, won't work, so + # we'll use the line number of the parent (the class) + pass + + if filename.startswith("checkouts/"): + # a PR build on readthedocs + pr_number = filename.split("/")[1] + filename = filename.split("/", 2)[2] + base, branch = get_repo_base_and_branch(pr_number) + if base and branch: + return f"https://github.com/{base}/blob/{branch}/{filename}#L{lineno}" + + # Otherwise, link to the source file on the main branch + return f"https://github.com/vllm-project/vllm/blob/main/{filename}#L{lineno}" + + # Mock out external dependencies here, otherwise the autodoc pages may be blank. autodoc_mock_imports = [ "compressed_tensors", @@ -112,6 +178,7 @@ def setup(app): "tensorizer", "pynvml", "outlines", + "xgrammar," "librosa", "soundfile", "gguf", diff --git a/docs/source/design/arch_overview.rst b/docs/source/design/arch_overview.rst index a9e7b4bd69bc7..bc3f509f0a66e 100644 --- a/docs/source/design/arch_overview.rst +++ b/docs/source/design/arch_overview.rst @@ -42,7 +42,7 @@ Here is a sample of `LLM` class usage: sampling_params = SamplingParams(temperature=0.8, top_p=0.95) # Initialize the LLM engine with the OPT-125M model - llm = LLM(model="Qwen/Qwen2.5-1.5B-Instruct") + llm = LLM(model="facebook/opt-125m") # Generate outputs for the input prompts outputs = llm.generate(prompts, sampling_params) diff --git a/docs/source/design/multimodal/multimodal_index.rst b/docs/source/design/multimodal/multimodal_index.rst index 30f543abc20c7..c6d47f90b62d5 100644 --- a/docs/source/design/multimodal/multimodal_index.rst +++ b/docs/source/design/multimodal/multimodal_index.rst @@ -7,7 +7,7 @@ Multi-Modality vLLM provides experimental support for multi-modal models through the :mod:`vllm.multimodal` package. -Multi-modal inputs can be passed alongside text and token prompts to :ref:`supported models ` +Multi-modal inputs can be passed alongside text and token prompts to :ref:`supported models ` via the ``multi_modal_data`` field in :class:`vllm.inputs.PromptType`. Currently, vLLM only has built-in support for image data. You can extend vLLM to process additional modalities @@ -15,9 +15,6 @@ by following :ref:`this guide `. Looking to add your own multi-modal model? Please follow the instructions listed :ref:`here `. -.. - TODO: Add usage of --limit-mm-per-prompt when multi-image input is officially supported - Guides ++++++ diff --git a/docs/source/design/multiprocessing.md b/docs/source/design/multiprocessing.md new file mode 100644 index 0000000000000..b58456ecc6da8 --- /dev/null +++ b/docs/source/design/multiprocessing.md @@ -0,0 +1,195 @@ +# Python Multiprocessing + +## Debugging + +Please see the [Debugging +Tips](https://docs.vllm.ai/en/latest/getting_started/debugging.html#python-multiprocessing) +page for information on known issues and how to solve them. + +## Introduction + +*Note that source code references are to the state of the code at the time of writing in December, 2024.* + +The use of Python multiprocessing in vLLM is complicated by: + +- The use of vLLM as a library and the inability to control the code using vLLM +- Varying levels of incompatibilities between multiprocessing methods and vLLM + dependencies + +This document describes how vLLM deals with these challenges. + +## Multiprocessing Methods + +[Python multiprocessing methods](https://docs.python.org/3/library/multiprocessing.html#contexts-and-start-methods) include: + +- `spawn` - spawn a new Python process. This will be the default as of Python + 3.14. + +- `fork` - Use `os.fork()` to fork the Python interpreter. This is the default + in Python versions prior to 3.14. + +- `forkserver` - Spawn a server process that will fork a new process on request. + +### Tradeoffs + +`fork` is the fastest method, but is incompatible with dependencies that use +threads. + +`spawn` is more compatible with dependencies, but can be problematic when vLLM +is used as a library. If the consuming code does not use a `__main__` guard (`if +__name__ == "__main__":`), the code will be inadvertently re-executed when vLLM +spawns a new process. This can lead to infinite recursion, among other problems. + +`forkserver` will spawn a new server process that will fork new processes on +demand. This unfortunately has the same problem as `spawn` when vLLM is used as +a library. The server process is created as a spawned new process, which will +re-execute code not protected by a `__main__` guard. + +For both `spawn` and `forkserver`, the process must not depend on inheriting any +global state as would be the case with `fork`. + +## Compatibility with Dependencies + +Multiple vLLM dependencies indicate either a preference or requirement for using +`spawn`: + +- +- +- + +It is perhaps more accurate to say that there are known problems with using +`fork` after initializing these dependencies. + +## Current State (v0) + +The environment variable `VLLM_WORKER_MULTIPROC_METHOD` can be used to control which method is used by vLLM. The current default is `fork`. + +- + +When we know we own the process because the `vllm` command was used, we use +`spawn` because it's the most widely compatible. + +- + +The `multiproc_xpu_executor` forces the use of `spawn`. + +- + +There are other miscellaneous places hard-coding the use of `spawn`: + +- +- + +Related PRs: + +- + +## Prior State in v1 + +There was an environment variable to control whether multiprocessing is used in +the v1 engine core, `VLLM_ENABLE_V1_MULTIPROCESSING`. This defaulted to off. + +- + +When it was enabled, the v1 `LLMEngine` would create a new process to run the +engine core. + +- +- +- https://github.com/vllm-project/vllm/blob/d05f88679bedd73939251a17c3d785a354b2946c/vllm/v1/engine/core_client.py#L44-L45 + +It was off by default for all the reasons mentioned above - compatibility with +dependencies and code using vLLM as a library. + +### Changes Made in v1 + +There is not an easy solution with Python's `multiprocessing` that will work +everywhere. As a first step, we can get v1 into a state where it does "best +effort" choice of multiprocessing method to maximize compatibility. + +- Default to `fork`. +- Use `spawn` when we know we control the main process (`vllm` was executed). +- If we detect `cuda` was previously initialized, force `spawn` and emit a + warning. We know `fork` will break, so this is the best we can do. + +The case that is known to still break in this scenario is code using vLLM as a +library that initializes `cuda` before calling vLLM. The warning we emit should +instruct users to either add a `__main__` guard or to disable multiprocessing. + +If that known-failure case occurs, the user will see two messages that explain +what is happening. First, a log message from vLLM: + +``` + WARNING 12-11 14:50:37 multiproc_worker_utils.py:281] CUDA was previously + initialized. We must use the `spawn` multiprocessing start method. Setting + VLLM_WORKER_MULTIPROC_METHOD to 'spawn'. See + https://docs.vllm.ai/en/latest/getting_started/debugging.html#python-multiprocessing + for more information. +``` + +Second, Python itself will raise an exception with a nice explanation: + +``` +RuntimeError: + An attempt has been made to start a new process before the + current process has finished its bootstrapping phase. + + This probably means that you are not using fork to start your + child processes and you have forgotten to use the proper idiom + in the main module: + + if __name__ == '__main__': + freeze_support() + ... + + The "freeze_support()" line can be omitted if the program + is not going to be frozen to produce an executable. + + To fix this issue, refer to the "Safe importing of main module" + section in https://docs.python.org/3/library/multiprocessing.html +``` + +## Alternatives Considered + +### Detect if a `__main__` guard is present + +It has been suggested that we could behave better if we could detect whether +code using vLLM as a library has a `__main__` guard in place. This [post on +stackoverflow](https://stackoverflow.com/questions/77220442/multiprocessing-pool-in-a-python-class-without-name-main-guard) +was from a library author facing the same question. + +It is possible to detect whether we are in the original, `__main__` process, or +a subsequent spawned process. However, it does not appear to be straight forward +to detect whether a `__main__` guard is present in the code. + +This option has been discarded as impractical. + +### Use `forkserver` + +At first it appears that `forkserver` is a nice solution to the problem. +However, the way it works presents the same challenges that `spawn` does when +vLLM is used as a library. + +### Force `spawn` all the time + +One way to clean this up is to just force the use of `spawn` all the time and +document that the use of a `__main__` guard is required when using vLLM as a +library. This would unfortunately break existing code and make vLLM harder to +use, violating the desire to make the `LLM` class as easy as possible to use. + +Instead of pushing this on our users, we will retain the complexity to do our +best to make things work. + +## Future Work + +We may want to consider a different worker management approach in the future +that works around these challenges. + +1. We could implement something `forkserver`-like, but have the process manager + be something we initially launch by running our own subprocess and a custom + entrypoint for worker management (launch a `vllm-manager` process). + +2. We can explore other libraries that may better suit our needs. Examples to + consider: + +- diff --git a/docs/source/getting_started/debugging.rst b/docs/source/getting_started/debugging.rst index 0c1afcbd7c0b9..d6c83014dc69f 100644 --- a/docs/source/getting_started/debugging.rst +++ b/docs/source/getting_started/debugging.rst @@ -136,6 +136,62 @@ If the test script hangs or crashes, usually it means the hardware/drivers are b Adjust ``--nproc-per-node``, ``--nnodes``, and ``--node-rank`` according to your setup, being sure to execute different commands (with different ``--node-rank``) on different nodes. +Python multiprocessing +---------------------- + +`RuntimeError` Exception +^^^^^^^^^^^^^^^^^^^^^^^^ + +If you have seen a warning in your logs like this: + +.. code-block:: console + + WARNING 12-11 14:50:37 multiproc_worker_utils.py:281] CUDA was previously + initialized. We must use the `spawn` multiprocessing start method. Setting + VLLM_WORKER_MULTIPROC_METHOD to 'spawn'. See + https://docs.vllm.ai/en/latest/getting_started/debugging.html#python-multiprocessing + for more information. + +or an error from Python that looks like this: + +.. code-block:: console + + RuntimeError: + An attempt has been made to start a new process before the + current process has finished its bootstrapping phase. + + This probably means that you are not using fork to start your + child processes and you have forgotten to use the proper idiom + in the main module: + + if __name__ == '__main__': + freeze_support() + ... + + The "freeze_support()" line can be omitted if the program + is not going to be frozen to produce an executable. + + To fix this issue, refer to the "Safe importing of main module" + section in https://docs.python.org/3/library/multiprocessing.html + +then you must update your Python code to guard usage of ``vllm`` behind a ``if +__name__ == '__main__':`` block. For example, instead of this: + +.. code-block:: python + + import vllm + + llm = vllm.LLM(...) + +try this instead: + +.. code-block:: python + + if __name__ == '__main__': + import vllm + + llm = vllm.LLM(...) + Known Issues ---------------------------------------- - In ``v0.5.2``, ``v0.5.3``, and ``v0.5.3.post1``, there is a bug caused by `zmq `_ , which can occasionally cause vLLM to hang depending on the machine configuration. The solution is to upgrade to the latest version of ``vllm`` to include the `fix `_. diff --git a/docs/source/getting_started/gaudi-installation.rst b/docs/source/getting_started/gaudi-installation.rst index 68c1a56660fa4..249e08278ff8f 100644 --- a/docs/source/getting_started/gaudi-installation.rst +++ b/docs/source/getting_started/gaudi-installation.rst @@ -4,7 +4,7 @@ Installation with Intel® Gaudi® AI Accelerators This README provides instructions on running vLLM with Intel Gaudi devices. Requirements and Installation -============================= +----------------------------- Please follow the instructions provided in the `Gaudi Installation Guide `__ @@ -13,7 +13,7 @@ please follow the methods outlined in the `Optimizing Training Platform Guide `__. Requirements ------------- +~~~~~~~~~~~~ - OS: Ubuntu 22.04 LTS - Python: 3.10 @@ -22,7 +22,7 @@ Requirements Quick start using Dockerfile ----------------------------- +~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. code:: console $ docker build -f Dockerfile.hpu -t vllm-hpu-env . @@ -34,10 +34,10 @@ Quick start using Dockerfile Build from source ------------------ +~~~~~~~~~~~~~~~~~ Environment verification -~~~~~~~~~~~~~~~~~~~~~~~~ +^^^^^^^^^^^^^^^^^^^^^^^^ To verify that the Intel Gaudi software was correctly installed, run: @@ -53,7 +53,7 @@ Verification `__ @@ -107,7 +107,7 @@ Supported Features - Attention with Linear Biases (ALiBi) Unsupported Features -==================== +-------------------- - Beam search - LoRA adapters @@ -115,7 +115,7 @@ Unsupported Features - Prefill chunking (mixed-batch inferencing) Supported Configurations -======================== +------------------------ The following configurations have been validated to be function with Gaudi2 devices. Configurations that are not listed may or may not work. @@ -152,10 +152,10 @@ Gaudi2 devices. Configurations that are not listed may or may not work. with tensor parallelism on 8x HPU, BF16 datatype with random or greedy sampling Performance Tuning -================== +------------------ Execution modes ---------------- +~~~~~~~~~~~~~~~ Currently in vLLM for HPU we support four execution modes, depending on selected HPU PyTorch Bridge backend (via ``PT_HPU_LAZY_MODE`` environment variable), and ``--enforce-eager`` flag. @@ -184,7 +184,7 @@ Currently in vLLM for HPU we support four execution modes, depending on selected Bucketing mechanism -------------------- +~~~~~~~~~~~~~~~~~~~ Intel Gaudi accelerators work best when operating on models with fixed tensor shapes. `Intel Gaudi Graph Compiler `__ is responsible for generating optimized binary code that implements the given model topology on Gaudi. In its default configuration, the produced binary code may be heavily dependent on input and output tensor shapes, and can require graph recompilation when encountering differently shaped tensors within the same topology. While the resulting binaries utilize Gaudi efficiently, the compilation itself may introduce a noticeable overhead in end-to-end execution. In a dynamic inference serving scenario, there is a need to minimize the number of graph compilations and reduce the risk of graph compilation occurring during server runtime. Currently it is achieved by "bucketing" model's forward pass across two dimensions - ``batch_size`` and ``sequence_length``. @@ -233,7 +233,7 @@ As an example, if a request of 3 sequences, with max sequence length of 412 come Bucketing is transparent to a client - padding in sequence length dimension is never returned to the client, and padding in batch dimension does not create new requests. Warmup ------- +~~~~~~ Warmup is an optional, but highly recommended step occurring before vLLM server starts listening. It executes a forward pass for each bucket with dummy data. The goal is to pre-compile all graphs and not incur any graph compilation overheads within bucket boundaries during server runtime. Each warmup step is logged during vLLM startup: @@ -257,7 +257,7 @@ This example uses the same buckets as in *Bucketing mechanism* section. Each out Compiling all the buckets might take some time and can be turned off with ``VLLM_SKIP_WARMUP=true`` environment variable. Keep in mind that if you do that, you may face graph compilations once executing a given bucket for the first time. It is fine to disable warmup for development, but it's highly recommended to enable it in deployment. HPU Graph capture ------------------ +~~~~~~~~~~~~~~~~~ `HPU Graphs `__ are currently the most performant execution method of vLLM on Intel Gaudi. When HPU Graphs are enabled, execution graphs will be traced (recorded) ahead of time (after performing warmup), to be later replayed during inference, significantly reducing host overheads. Recording can take large amounts of memory, which needs to be taken into account when allocating KV cache. Enabling HPU Graphs will impact the number of available KV cache blocks, but vLLM provides user-configurable variables to control memory management. @@ -321,7 +321,7 @@ Each described step is logged by vLLM server, as follows (negative values corres Recommended vLLM Parameters ---------------------------- +~~~~~~~~~~~~~~~~~~~~~~~~~~~ - We recommend running inference on Gaudi 2 with ``block_size`` of 128 for BF16 data type. Using default values (16, 32) might lead to @@ -333,7 +333,7 @@ Recommended vLLM Parameters If you encounter out-of-memory issues, see troubleshooting section. Environment variables ---------------------- +~~~~~~~~~~~~~~~~~~~~~ **Diagnostic and profiling knobs:** @@ -380,7 +380,7 @@ Additionally, there are HPU PyTorch Bridge environment variables impacting vLLM - ``PT_HPU_ENABLE_LAZY_COLLECTIVES``: required to be ``true`` for tensor parallel inference with HPU Graphs Troubleshooting: Tweaking HPU Graphs -==================================== +------------------------------------ If you experience device out-of-memory issues or want to attempt inference at higher batch sizes, try tweaking HPU Graphs by following diff --git a/docs/source/getting_started/installation.rst b/docs/source/getting_started/installation.rst index e3dbbc9affe66..9b6cb0e80d60e 100644 --- a/docs/source/getting_started/installation.rst +++ b/docs/source/getting_started/installation.rst @@ -21,7 +21,7 @@ You can install vLLM using pip: .. code-block:: console $ # (Recommended) Create a new conda environment. - $ conda create -n myenv python=3.10 -y + $ conda create -n myenv python=3.12 -y $ conda activate myenv $ # Install vLLM with CUDA 12.1. @@ -73,7 +73,7 @@ Another way to access the latest code is to use the docker images: .. code-block:: console $ export VLLM_COMMIT=33f460b17a54acb3b6cc0b03f4a17876cff5eafd # use full commit hash from the main branch - $ docker pull public.ecr.aws/q9t5s3a7/vllm-ci-test-repo:${VLLM_COMMIT} + $ docker pull public.ecr.aws/q9t5s3a7/vllm-ci-postmerge-repo:${VLLM_COMMIT} These docker images are used for CI and testing only, and they are not intended for production use. They will be expired after several days. @@ -89,45 +89,24 @@ Build from source Python-only build (without compilation) --------------------------------------- -If you only need to change Python code, you can simply build vLLM without compilation. - -The first step is to install the latest vLLM wheel: - -.. code-block:: console - - pip install https://vllm-wheels.s3.us-west-2.amazonaws.com/nightly/vllm-1.0.0.dev-cp38-abi3-manylinux1_x86_64.whl - -You can find more information about vLLM's wheels `above <#install-the-latest-code>`_. - -After verifying that the installation is successful, you can use `the following script `_: +If you only need to change Python code, you can build and install vLLM without compilation. Using `pip's ``--editable`` flag `_, changes you make to the code will be reflected when you run vLLM: .. code-block:: console $ git clone https://github.com/vllm-project/vllm.git $ cd vllm - $ python python_only_dev.py + $ VLLM_USE_PRECOMPILED=1 pip install --editable . -The script will: +This will download the latest nightly wheel and use the compiled libraries from there in the install. -* Find the installed vLLM package in the current environment. -* Copy built files to the current directory. -* Rename the installed vLLM package. -* Symbolically link the current directory to the installed vLLM package. - -Now, you can edit the Python code in the current directory, and the changes will be reflected when you run vLLM. - -Once you have finished editing or want to install another vLLM wheel, you should exit the development environment using `the same script `_ with the ``--quit-dev`` (or ``-q`` for short) flag: +The ``VLLM_PRECOMPILED_WHEEL_LOCATION`` environment variable can be used instead of ``VLLM_USE_PRECOMPILED`` to specify a custom path or URL to the wheel file. For example, to use the `0.6.1.post1 PyPi wheel `_: .. code-block:: console - $ python python_only_dev.py --quit-dev - -The ``--quit-dev`` flag will: - -* Remove the symbolic link from the current directory to the vLLM package. -* Restore the original vLLM package from the backup. + $ export VLLM_PRECOMPILED_WHEEL_LOCATION=https://files.pythonhosted.org/packages/4a/4c/ee65ba33467a4c0de350ce29fbae39b9d0e7fcd887cc756fa993654d1228/vllm-0.6.3.post1-cp38-abi3-manylinux1_x86_64.whl + $ pip install --editable . -If you update the vLLM wheel and rebuild from the source to make further edits, you will need to repeat the `Python-only build <#python-only-build>`_ steps again. +You can find more information about vLLM's wheels `above <#install-the-latest-code>`_. .. note:: @@ -148,9 +127,13 @@ If you want to modify C++ or CUDA code, you'll need to build vLLM from source. T .. tip:: Building from source requires a lot of compilation. If you are building from source repeatedly, it's more efficient to cache the compilation results. + For example, you can install `ccache `_ using ``conda install ccache`` or ``apt install ccache`` . As long as ``which ccache`` command can find the ``ccache`` binary, it will be used automatically by the build system. After the first build, subsequent builds will be much faster. + `sccache `_ works similarly to ``ccache``, but has the capability to utilize caching in remote storage environments. + The following environment variables can be set to configure the vLLM ``sccache`` remote: ``SCCACHE_BUCKET=vllm-build-sccache SCCACHE_REGION=us-west-2 SCCACHE_S3_NO_CREDENTIALS=1``. We also recommend setting ``SCCACHE_IDLE_TIMEOUT=0``. + Use an existing PyTorch installation ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ diff --git a/docs/source/index.rst b/docs/source/index.rst index 0692e949f1c77..fd741ea5e9766 100644 --- a/docs/source/index.rst +++ b/docs/source/index.rst @@ -82,29 +82,39 @@ Documentation serving/openai_compatible_server serving/deploying_with_docker serving/deploying_with_k8s + serving/deploying_with_helm serving/deploying_with_nginx serving/distributed_serving serving/metrics - serving/env_vars - serving/usage_stats serving/integrations serving/tensorizer - serving/compatibility_matrix - serving/faq .. toctree:: :maxdepth: 1 :caption: Models models/supported_models + models/generative_models + models/pooling_models models/adding_model models/enabling_multimodal_inputs - models/engine_args - models/lora - models/vlm - models/structured_outputs - models/spec_decode - models/performance + +.. toctree:: + :maxdepth: 1 + :caption: Usage + + usage/lora + usage/multimodal_inputs + usage/tool_calling + usage/structured_outputs + usage/spec_decode + usage/compatibility_matrix + usage/performance + usage/faq + usage/engine_args + usage/env_vars + usage/usage_stats + usage/disagg_prefill .. toctree:: :maxdepth: 1 @@ -164,6 +174,7 @@ Documentation design/input_processing/model_inputs_index design/kernel/paged_attention design/multimodal/multimodal_index + design/multiprocessing .. For Developers: contributing to the vLLM project diff --git a/docs/source/models/enabling_multimodal_inputs.rst b/docs/source/models/enabling_multimodal_inputs.rst index 49b5285c45590..5c1236e1a8972 100644 --- a/docs/source/models/enabling_multimodal_inputs.rst +++ b/docs/source/models/enabling_multimodal_inputs.rst @@ -3,7 +3,7 @@ Enabling Multimodal Inputs ========================== -This document walks you through the steps to extend a vLLM model so that it accepts :ref:`multi-modal ` inputs. +This document walks you through the steps to extend a vLLM model so that it accepts :ref:`multi-modal inputs `. .. seealso:: :ref:`adding_a_new_model` diff --git a/docs/source/models/generative_models.rst b/docs/source/models/generative_models.rst new file mode 100644 index 0000000000000..fb71185600863 --- /dev/null +++ b/docs/source/models/generative_models.rst @@ -0,0 +1,146 @@ +.. _generative_models: + +Generative Models +================= + +vLLM provides first-class support for generative models, which covers most of LLMs. + +In vLLM, generative models implement the :class:`~vllm.model_executor.models.VllmModelForTextGeneration` interface. +Based on the final hidden states of the input, these models output log probabilities of the tokens to generate, +which are then passed through :class:`~vllm.model_executor.layers.Sampler` to obtain the final text. + +Offline Inference +----------------- + +The :class:`~vllm.LLM` class provides various methods for offline inference. +See :ref:`Engine Arguments ` for a list of options when initializing the model. + +For generative models, the only supported :code:`task` option is :code:`"generate"`. +Usually, this is automatically inferred so you don't have to specify it. + +``LLM.generate`` +^^^^^^^^^^^^^^^^ + +The :class:`~vllm.LLM.generate` method is available to all generative models in vLLM. +It is similar to `its counterpart in HF Transformers `__, +except that tokenization and detokenization are also performed automatically. + +.. code-block:: python + + llm = LLM(model="facebook/opt-125m") + outputs = llm.generate("Hello, my name is") + + for output in outputs: + prompt = output.prompt + generated_text = output.outputs[0].text + print(f"Prompt: {prompt!r}, Generated text: {generated_text!r}") + +You can optionally control the language generation by passing :class:`~vllm.SamplingParams`. +For example, you can use greedy sampling by setting :code:`temperature=0`: + +.. code-block:: python + + llm = LLM(model="facebook/opt-125m") + params = SamplingParams(temperature=0) + outputs = llm.generate("Hello, my name is", params) + + for output in outputs: + prompt = output.prompt + generated_text = output.outputs[0].text + print(f"Prompt: {prompt!r}, Generated text: {generated_text!r}") + +A code example can be found in `examples/offline_inference.py `_. + +``LLM.beam_search`` +^^^^^^^^^^^^^^^^^^^ + +The :class:`~vllm.LLM.beam_search` method implements `beam search `__ on top of :class:`~vllm.LLM.generate`. +For example, to search using 5 beams and output at most 50 tokens: + +.. code-block:: python + + llm = LLM(model="facebook/opt-125m") + params = BeamSearchParams(beam_width=5, max_tokens=50) + outputs = llm.generate("Hello, my name is", params) + + for output in outputs: + prompt = output.prompt + generated_text = output.outputs[0].text + print(f"Prompt: {prompt!r}, Generated text: {generated_text!r}") + +``LLM.chat`` +^^^^^^^^^^^^ + +The :class:`~vllm.LLM.chat` method implements chat functionality on top of :class:`~vllm.LLM.generate`. +In particular, it accepts input similar to `OpenAI Chat Completions API `__ +and automatically applies the model's `chat template `__ to format the prompt. + +.. important:: + + In general, only instruction-tuned models have a chat template. + Base models may perform poorly as they are not trained to respond to the chat conversation. + +.. code-block:: python + + llm = LLM(model="meta-llama/Meta-Llama-3-8B-Instruct") + conversation = [ + { + "role": "system", + "content": "You are a helpful assistant" + }, + { + "role": "user", + "content": "Hello" + }, + { + "role": "assistant", + "content": "Hello! How can I assist you today?" + }, + { + "role": "user", + "content": "Write an essay about the importance of higher education.", + }, + ] + outputs = llm.chat(conversation) + + for output in outputs: + prompt = output.prompt + generated_text = output.outputs[0].text + print(f"Prompt: {prompt!r}, Generated text: {generated_text!r}") + +A code example can be found in `examples/offline_inference_chat.py `_. + +If the model doesn't have a chat template or you want to specify another one, +you can explicitly pass a chat template: + +.. code-block:: python + + from vllm.entrypoints.chat_utils import load_chat_template + + # You can find a list of existing chat templates under `examples/` + custom_template = load_chat_template(chat_template="") + print("Loaded chat template:", custom_template) + + outputs = llm.chat(conversation, chat_template=custom_template) + +Online Inference +---------------- + +Our `OpenAI Compatible Server <../serving/openai_compatible_server>`__ can be used for online inference. +Please click on the above link for more details on how to launch the server. + +Completions API +^^^^^^^^^^^^^^^ + +Our Completions API is similar to ``LLM.generate`` but only accepts text. +It is compatible with `OpenAI Completions API `__ +so that you can use OpenAI client to interact with it. +A code example can be found in `examples/openai_completion_client.py `_. + +Chat API +^^^^^^^^ + +Our Chat API is similar to ``LLM.chat``, accepting both text and :ref:`multi-modal inputs `. +It is compatible with `OpenAI Chat Completions API `__ +so that you can use OpenAI client to interact with it. +A code example can be found in `examples/openai_chat_completion_client.py `_. diff --git a/docs/source/models/pooling_models.rst b/docs/source/models/pooling_models.rst new file mode 100644 index 0000000000000..4e67677a2767a --- /dev/null +++ b/docs/source/models/pooling_models.rst @@ -0,0 +1,136 @@ +.. _pooling_models: + +Pooling Models +============== + +vLLM also supports pooling models, including embedding, reranking and reward models. + +In vLLM, pooling models implement the :class:`~vllm.model_executor.models.VllmModelForPooling` interface. +These models use a :class:`~vllm.model_executor.layers.Pooler` to extract the final hidden states of the input +before returning them. + +.. note:: + + We currently support pooling models primarily as a matter of convenience. + As shown in the :ref:`Compatibility Matrix `, most vLLM features are not applicable to + pooling models as they only work on the generation or decode stage, so performance may not improve as much. + +Offline Inference +----------------- + +The :class:`~vllm.LLM` class provides various methods for offline inference. +See :ref:`Engine Arguments ` for a list of options when initializing the model. + +For pooling models, we support the following :code:`task` options: + +- Embedding (:code:`"embed"` / :code:`"embedding"`) +- Classification (:code:`"classify"`) +- Sentence Pair Scoring (:code:`"score"`) +- Reward Modeling (:code:`"reward"`) + +The selected task determines the default :class:`~vllm.model_executor.layers.Pooler` that is used: + +- Embedding: Extract only the hidden states corresponding to the last token, and apply normalization. +- Classification: Extract only the hidden states corresponding to the last token, and apply softmax. +- Sentence Pair Scoring: Extract only the hidden states corresponding to the last token, and apply softmax. +- Reward Modeling: Extract all of the hidden states and return them directly. + +When loading `Sentence Transformers `__ models, +we attempt to override the default pooler based on its Sentence Transformers configuration file (:code:`modules.json`). + +You can customize the model's pooling method via the :code:`override_pooler_config` option, +which takes priority over both the model's and Sentence Transformers's defaults. + +``LLM.encode`` +^^^^^^^^^^^^^^ + +The :class:`~vllm.LLM.encode` method is available to all pooling models in vLLM. +It returns the extracted hidden states directly, which is useful for reward models. + +.. code-block:: python + + llm = LLM(model="Qwen/Qwen2.5-Math-RM-72B", task="reward") + (output,) = llm.encode("Hello, my name is") + + data = output.outputs.data + print(f"Data: {data!r}") + +``LLM.embed`` +^^^^^^^^^^^^^ + +The :class:`~vllm.LLM.embed` method outputs an embedding vector for each prompt. +It is primarily designed for embedding models. + +.. code-block:: python + + llm = LLM(model="intfloat/e5-mistral-7b-instruct", task="embed") + (output,) = llm.embed("Hello, my name is") + + embeds = output.outputs.embedding + print(f"Embeddings: {embeds!r} (size={len(embeds)})") + +A code example can be found in `examples/offline_inference_embedding.py `_. + +``LLM.classify`` +^^^^^^^^^^^^^^^^ + +The :class:`~vllm.LLM.classify` method outputs a probability vector for each prompt. +It is primarily designed for classification models. + +.. code-block:: python + + llm = LLM(model="jason9693/Qwen2.5-1.5B-apeach", task="classify") + (output,) = llm.classify("Hello, my name is") + + probs = output.outputs.probs + print(f"Class Probabilities: {probs!r} (size={len(probs)})") + +A code example can be found in `examples/offline_inference_classification.py `_. + +``LLM.score`` +^^^^^^^^^^^^^ + +The :class:`~vllm.LLM.score` method outputs similarity scores between sentence pairs. +It is primarily designed for `cross-encoder models `__. +These types of models serve as rerankers between candidate query-document pairs in RAG systems. + +.. note:: + + vLLM can only perform the model inference component (e.g. embedding, reranking) of RAG. + To handle RAG at a higher level, you should use integration frameworks such as `LangChain `_. + +.. code-block:: python + + llm = LLM(model="BAAI/bge-reranker-v2-m3", task="score") + (output,) = llm.score("What is the capital of France?", + "The capital of Brazil is Brasilia.") + + score = output.outputs.score + print(f"Score: {score}") + +A code example can be found in `examples/offline_inference_scoring.py `_. + +Online Inference +---------------- + +Our `OpenAI Compatible Server <../serving/openai_compatible_server>`__ can be used for online inference. +Please click on the above link for more details on how to launch the server. + +Embeddings API +^^^^^^^^^^^^^^ + +Our Embeddings API is similar to ``LLM.embed``, accepting both text and :ref:`multi-modal inputs `. + +The text-only API is compatible with `OpenAI Embeddings API `__ +so that you can use OpenAI client to interact with it. +A code example can be found in `examples/openai_embedding_client.py `_. + +The multi-modal API is an extension of the `OpenAI Embeddings API `__ +that incorporates `OpenAI Chat Completions API `__, +so it is not part of the OpenAI standard. Please see :ref:`this page ` for more details on how to use it. + +Score API +^^^^^^^^^ + +Our Score API is similar to ``LLM.score``. +Please see `this page <../serving/openai_compatible_server.html#score-api-for-cross-encoder-models>`__ for more details on how to use it. diff --git a/docs/source/models/supported_models.rst b/docs/source/models/supported_models.rst index c5fbb30b24e28..3bef3f3226062 100644 --- a/docs/source/models/supported_models.rst +++ b/docs/source/models/supported_models.rst @@ -3,11 +3,21 @@ Supported Models ================ -vLLM supports a variety of generative and embedding models from `HuggingFace (HF) Transformers `_. -This page lists the model architectures that are currently supported by vLLM. +vLLM supports generative and pooling models across various tasks. +If a model supports more than one task, you can set the task via the :code:`--task` argument. + +For each task, we list the model architectures that have been implemented in vLLM. Alongside each architecture, we include some popular models that use it. -For other models, you can check the :code:`config.json` file inside the model repository. +Loading a Model +^^^^^^^^^^^^^^^ + +HuggingFace Hub ++++++++++++++++ + +By default, vLLM loads models from `HuggingFace (HF) Hub `_. + +To determine whether a given model is supported, you can check the :code:`config.json` file inside the HF repository. If the :code:`"architectures"` field contains a model architecture listed below, then it should be supported in theory. .. tip:: @@ -17,38 +27,57 @@ If the :code:`"architectures"` field contains a model architecture listed below, from vllm import LLM - llm = LLM(model=...) # Name or path of your model + # For generative models (task=generate) only + llm = LLM(model=..., task="generate") # Name or path of your model output = llm.generate("Hello, my name is") print(output) - If vLLM successfully generates text, it indicates that your model is supported. + # For pooling models (task={embed,classify,reward}) only + llm = LLM(model=..., task="embed") # Name or path of your model + output = llm.encode("Hello, my name is") + print(output) + + If vLLM successfully returns text (for generative models) or hidden states (for pooling models), it indicates that your model is supported. Otherwise, please refer to :ref:`Adding a New Model ` and :ref:`Enabling Multimodal Inputs ` for instructions on how to implement your model in vLLM. Alternatively, you can `open an issue on GitHub `_ to request vLLM support. -.. note:: - To use models from `ModelScope `_ instead of HuggingFace Hub, set an environment variable: +ModelScope +++++++++++ - .. code-block:: shell +To use models from `ModelScope `_ instead of HuggingFace Hub, set an environment variable: - $ export VLLM_USE_MODELSCOPE=True +.. code-block:: shell - And use with :code:`trust_remote_code=True`. + $ export VLLM_USE_MODELSCOPE=True - .. code-block:: python +And use with :code:`trust_remote_code=True`. - from vllm import LLM +.. code-block:: python - llm = LLM(model=..., revision=..., trust_remote_code=True) # Name or path of your model - output = llm.generate("Hello, my name is") - print(output) + from vllm import LLM + + llm = LLM(model=..., revision=..., task=..., trust_remote_code=True) + + # For generative models (task=generate) only + output = llm.generate("Hello, my name is") + print(output) -Text-only Language Models -^^^^^^^^^^^^^^^^^^^^^^^^^ + # For pooling models (task={embed,classify,reward}) only + output = llm.encode("Hello, my name is") + print(output) -Text Generation ---------------- +List of Text-only Language Models +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Generative Models ++++++++++++++++++ + +See :ref:`this page ` for more information on how to use generative models. + +Text Generation (``--task generate``) +------------------------------------- .. list-table:: :widths: 25 25 50 5 5 @@ -89,9 +118,9 @@ Text Generation - :code:`THUDM/chatglm2-6b`, :code:`THUDM/chatglm3-6b`, etc. - ✅︎ - ✅︎ - * - :code:`CohereForCausalLM` + * - :code:`CohereForCausalLM`,:code:`Cohere2ForCausalLM` - Command-R - - :code:`CohereForAI/c4ai-command-r-v01`, etc. + - :code:`CohereForAI/c4ai-command-r-v01`, :code:`CohereForAI/c4ai-command-r7b-12-2024`, etc. - ✅︎ - ✅︎ * - :code:`DbrxForCausalLM` @@ -128,7 +157,7 @@ Text Generation - FalconMamba - :code:`tiiuae/falcon-mamba-7b`, :code:`tiiuae/falcon-mamba-7b-instruct`, etc. - ✅︎ - - + - ✅︎ * - :code:`GemmaForCausalLM` - Gemma - :code:`google/gemma-2b`, :code:`google/gemma-7b`, etc. @@ -139,6 +168,11 @@ Text Generation - :code:`google/gemma-2-9b`, :code:`google/gemma-2-27b`, etc. - ✅︎ - ✅︎ + * - :code:`GlmForCausalLM` + - GLM-4 + - :code:`THUDM/glm-4-9b-chat-hf`, etc. + - ✅︎ + - ✅︎ * - :code:`GPT2LMHeadModel` - GPT-2 - :code:`gpt2`, :code:`gpt2-xl`, etc. @@ -169,6 +203,11 @@ Text Generation - :code:`ibm-granite/granite-3.0-1b-a400m-base`, :code:`ibm-granite/granite-3.0-3b-a800m-instruct`, :code:`ibm/PowerMoE-3b`, etc. - ✅︎ - ✅︎ + * - :code:`GritLM` + - GritLM + - :code:`parasail-ai/GritLM-7B-vllm`. + - ✅︎ + - ✅︎ * - :code:`InternLMForCausalLM` - InternLM - :code:`internlm/internlm-7b`, :code:`internlm/internlm-chat-7b`, etc. @@ -177,7 +216,7 @@ Text Generation * - :code:`InternLM2ForCausalLM` - InternLM2 - :code:`internlm/internlm2-7b`, :code:`internlm/internlm2-chat-7b`, etc. - - + - ✅︎ - ✅︎ * - :code:`JAISLMHeadModel` - Jais @@ -188,7 +227,7 @@ Text Generation - Jamba - :code:`ai21labs/AI21-Jamba-1.5-Large`, :code:`ai21labs/AI21-Jamba-1.5-Mini`, :code:`ai21labs/Jamba-v0.1`, etc. - ✅︎ - - + - ✅︎ * - :code:`LlamaForCausalLM` - Llama 3.1, Llama 3, Llama 2, LLaMA, Yi - :code:`meta-llama/Meta-Llama-3.1-405B-Instruct`, :code:`meta-llama/Meta-Llama-3.1-70B`, :code:`meta-llama/Meta-Llama-3-70B-Instruct`, :code:`meta-llama/Llama-2-70b-hf`, :code:`01-ai/Yi-34B`, etc. @@ -198,7 +237,7 @@ Text Generation - Mamba - :code:`state-spaces/mamba-130m-hf`, :code:`state-spaces/mamba-790m-hf`, :code:`state-spaces/mamba-2.8b-hf`, etc. - - - + - ✅︎ * - :code:`MiniCPMForCausalLM` - MiniCPM - :code:`openbmb/MiniCPM-2B-sft-bf16`, :code:`openbmb/MiniCPM-2B-dpo-bf16`, :code:`openbmb/MiniCPM-S-1B-sft`, etc. @@ -323,8 +362,24 @@ Text Generation .. note:: Currently, the ROCm version of vLLM supports Mistral and Mixtral only for context lengths up to 4096. -Text Embedding --------------- +Pooling Models +++++++++++++++ + +See :ref:`this page ` for more information on how to use pooling models. + +.. important:: + Since some model architectures support both generative and pooling tasks, + you should explicitly specify the task type to ensure that the model is used in pooling mode instead of generative mode. + +Text Embedding (``--task embed``) +--------------------------------- + +Any text generation model can be converted into an embedding model by passing :code:`--task embed`. + +.. note:: + To get the best results, you should use pooling models that are specifically trained as such. + +The following table lists those that are tested in vLLM. .. list-table:: :widths: 25 25 50 5 5 @@ -345,6 +400,11 @@ Text Embedding - :code:`BAAI/bge-multilingual-gemma2`, etc. - - ✅︎ + * - :code:`GritLM` + - GritLM + - :code:`parasail-ai/GritLM-7B-vllm`. + - ✅︎ + - ✅︎ * - :code:`LlamaModel`, :code:`LlamaForCausalLM`, :code:`MistralModel`, etc. - Llama-based - :code:`intfloat/e5-mistral-7b-instruct`, etc. @@ -352,7 +412,7 @@ Text Embedding - ✅︎ * - :code:`Qwen2Model`, :code:`Qwen2ForCausalLM` - Qwen2-based - - :code:`ssmits/Qwen2-7B-Instruct-embed-base`, :code:`Alibaba-NLP/gte-Qwen2-7B-instruct` (see note), etc. + - :code:`ssmits/Qwen2-7B-Instruct-embed-base` (see note), :code:`Alibaba-NLP/gte-Qwen2-7B-instruct` (see note), etc. - ✅︎ - ✅︎ * - :code:`RobertaModel`, :code:`RobertaForMaskedLM` @@ -366,12 +426,9 @@ Text Embedding - - -.. important:: - Some model architectures support both generation and embedding tasks. - In this case, you have to pass :code:`--task embedding` to run the model in embedding mode. - -.. tip:: - You can override the model's pooling method by passing :code:`--override-pooler-config`. +.. note:: + :code:`ssmits/Qwen2-7B-Instruct-embed-base` has an improperly defined Sentence Transformers config. + You should manually set mean pooling by passing :code:`--override-pooler-config '{"pooling_type": "MEAN"}'`. .. note:: Unlike base Qwen2, :code:`Alibaba-NLP/gte-Qwen2-7B-instruct` uses bi-directional attention. @@ -380,8 +437,8 @@ Text Embedding On the other hand, its 1.5B variant (:code:`Alibaba-NLP/gte-Qwen2-1.5B-instruct`) uses causal attention despite being described otherwise on its model card. -Reward Modeling ---------------- +Reward Modeling (``--task reward``) +----------------------------------- .. list-table:: :widths: 25 25 50 5 5 @@ -392,17 +449,23 @@ Reward Modeling - Example HF Models - :ref:`LoRA ` - :ref:`PP ` + * - :code:`LlamaForCausalLM` + - Llama-based + - :code:`peiyi9979/math-shepherd-mistral-7b-prm`, etc. + - ✅︎ + - ✅︎ * - :code:`Qwen2ForRewardModel` - Qwen2-based - :code:`Qwen/Qwen2.5-Math-RM-72B`, etc. - ✅︎ - ✅︎ -.. note:: - As an interim measure, these models are supported in both offline and online inference via Embeddings API. +.. important:: + For process-supervised reward models such as :code:`peiyi9979/math-shepherd-mistral-7b-prm`, the pooling config should be set explicitly, + e.g.: :code:`--override-pooler-config '{"pooling_type": "STEP", "step_tag_id": 123, "returned_token_ids": [456, 789]}'`. -Classification ---------------- +Classification (``--task classify``) +------------------------------------ .. list-table:: :widths: 25 25 50 5 5 @@ -419,11 +482,8 @@ Classification - ✅︎ - ✅︎ -.. note:: - As an interim measure, these models are supported in both offline and online inference via Embeddings API. - -Sentence Pair Scoring ---------------------- +Sentence Pair Scoring (``--task score``) +---------------------------------------- .. list-table:: :widths: 25 25 50 5 5 @@ -450,11 +510,10 @@ Sentence Pair Scoring - - -.. note:: - These models are supported in both offline and online inference via Score API. +.. _supported_mm_models: -Multimodal Language Models -^^^^^^^^^^^^^^^^^^^^^^^^^^ +List of Multimodal Language Models +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ The following modalities are supported depending on the model: @@ -471,13 +530,18 @@ On the other hand, modalities separated by :code:`/` are mutually exclusive. - e.g.: :code:`T / I` means that the model supports text-only and image-only inputs, but not text-with-image inputs. -.. _supported_vlms: +See :ref:`this page ` on how to pass multi-modal inputs to the model. + +Generative Models ++++++++++++++++++ -Text Generation ---------------- +See :ref:`this page ` for more information on how to use generative models. + +Text Generation (``--task generate``) +------------------------------------- .. list-table:: - :widths: 25 25 15 25 5 5 + :widths: 25 25 15 20 5 5 5 :header-rows: 1 * - Architecture @@ -486,157 +550,216 @@ Text Generation - Example HF Models - :ref:`LoRA ` - :ref:`PP ` + - V1 * - :code:`AriaForConditionalGeneration` - Aria - T + I - :code:`rhymes-ai/Aria` - - ✅︎ + - * - :code:`Blip2ForConditionalGeneration` - BLIP-2 - T + I\ :sup:`E` - :code:`Salesforce/blip2-opt-2.7b`, :code:`Salesforce/blip2-opt-6.7b`, etc. - - ✅︎ + - * - :code:`ChameleonForConditionalGeneration` - Chameleon - T + I - :code:`facebook/chameleon-7b` etc. - - ✅︎ + - * - :code:`FuyuForCausalLM` - Fuyu - T + I - :code:`adept/fuyu-8b` etc. - - ✅︎ + - * - :code:`ChatGLMModel` - GLM-4V - T + I - :code:`THUDM/glm-4v-9b` etc. - ✅︎ - ✅︎ + - * - :code:`H2OVLChatModel` - H2OVL - T + I\ :sup:`E+` - :code:`h2oai/h2ovl-mississippi-800m`, :code:`h2oai/h2ovl-mississippi-2b`, etc. - - ✅︎ + - * - :code:`Idefics3ForConditionalGeneration` - Idefics3 - T + I - :code:`HuggingFaceM4/Idefics3-8B-Llama3` etc. - ✅︎ + - - * - :code:`InternVLChatModel` - - InternVL2 + - InternVL 2.5, Mono-InternVL, InternVL 2.0 - T + I\ :sup:`E+` - - :code:`OpenGVLab/Mono-InternVL-2B`, :code:`OpenGVLab/InternVL2-4B`, :code:`OpenGVLab/InternVL2-8B`, etc. + - :code:`OpenGVLab/InternVL2_5-4B`, :code:`OpenGVLab/Mono-InternVL-2B`, :code:`OpenGVLab/InternVL2-4B`, etc. - - ✅︎ + - ✅︎ * - :code:`LlavaForConditionalGeneration` - LLaVA-1.5 - T + I\ :sup:`E+` - - :code:`llava-hf/llava-1.5-7b-hf`, :code:`llava-hf/llava-1.5-13b-hf`, etc. + - :code:`llava-hf/llava-1.5-7b-hf`, :code:`TIGER-Lab/Mantis-8B-siglip-llama3` (see note), etc. - - ✅︎ + - ✅︎ * - :code:`LlavaNextForConditionalGeneration` - LLaVA-NeXT - T + I\ :sup:`E+` - :code:`llava-hf/llava-v1.6-mistral-7b-hf`, :code:`llava-hf/llava-v1.6-vicuna-7b-hf`, etc. - - ✅︎ + - * - :code:`LlavaNextVideoForConditionalGeneration` - LLaVA-NeXT-Video - T + V - :code:`llava-hf/LLaVA-NeXT-Video-7B-hf`, etc. - - ✅︎ + - * - :code:`LlavaOnevisionForConditionalGeneration` - LLaVA-Onevision - T + I\ :sup:`+` + V\ :sup:`+` - :code:`llava-hf/llava-onevision-qwen2-7b-ov-hf`, :code:`llava-hf/llava-onevision-qwen2-0.5b-ov-hf`, etc. - - ✅︎ + - * - :code:`MiniCPMV` - MiniCPM-V - T + I\ :sup:`E+` - :code:`openbmb/MiniCPM-V-2` (see note), :code:`openbmb/MiniCPM-Llama3-V-2_5`, :code:`openbmb/MiniCPM-V-2_6`, etc. - ✅︎ - ✅︎ + - * - :code:`MllamaForConditionalGeneration` - Llama 3.2 - T + I\ :sup:`+` - :code:`meta-llama/Llama-3.2-90B-Vision-Instruct`, :code:`meta-llama/Llama-3.2-11B-Vision`, etc. - - + - * - :code:`MolmoForCausalLM` - Molmo - T + I - :code:`allenai/Molmo-7B-D-0924`, :code:`allenai/Molmo-72B-0924`, etc. - - ✅︎ + - ✅︎ * - :code:`NVLM_D_Model` - NVLM-D 1.0 - T + I\ :sup:`E+` - :code:`nvidia/NVLM-D-72B`, etc. - - ✅︎ + - ✅︎ * - :code:`PaliGemmaForConditionalGeneration` - - PaliGemma + - PaliGemma, PaliGemma 2 - T + I\ :sup:`E` - - :code:`google/paligemma-3b-pt-224`, :code:`google/paligemma-3b-mix-224`, etc. + - :code:`google/paligemma-3b-pt-224`, :code:`google/paligemma-3b-mix-224`, :code:`google/paligemma2-3b-ft-docci-448`, etc. - - ✅︎ + - * - :code:`Phi3VForCausalLM` - Phi-3-Vision, Phi-3.5-Vision - T + I\ :sup:`E+` - :code:`microsoft/Phi-3-vision-128k-instruct`, :code:`microsoft/Phi-3.5-vision-instruct` etc. - - ✅︎ + - ✅︎ * - :code:`PixtralForConditionalGeneration` - Pixtral - T + I\ :sup:`+` - :code:`mistralai/Pixtral-12B-2409`, :code:`mistral-community/pixtral-12b` etc. - - ✅︎ + - ✅︎ * - :code:`QWenLMHeadModel` - Qwen-VL - T + I\ :sup:`E+` - :code:`Qwen/Qwen-VL`, :code:`Qwen/Qwen-VL-Chat`, etc. - ✅︎ - ✅︎ + - * - :code:`Qwen2AudioForConditionalGeneration` - Qwen2-Audio - T + A\ :sup:`+` - :code:`Qwen/Qwen2-Audio-7B-Instruct` - - ✅︎ + - * - :code:`Qwen2VLForConditionalGeneration` - Qwen2-VL - T + I\ :sup:`E+` + V\ :sup:`E+` - :code:`Qwen/Qwen2-VL-2B-Instruct`, :code:`Qwen/Qwen2-VL-7B-Instruct`, :code:`Qwen/Qwen2-VL-72B-Instruct`, etc. - ✅︎ - ✅︎ + - * - :code:`UltravoxModel` - Ultravox - T + A\ :sup:`E+` - :code:`fixie-ai/ultravox-v0_3` - - ✅︎ + - | :sup:`E` Pre-computed embeddings can be inputted for this modality. | :sup:`+` Multiple items can be inputted per text prompt for this modality. +.. important:: + To enable multiple multi-modal items per text prompt, you have to set :code:`limit_mm_per_prompt` (offline inference) + or :code:`--limit-mm-per-prompt` (online inference). For example, to enable passing up to 4 images per text prompt: + + .. code-block:: python + + llm = LLM( + model="Qwen/Qwen2-VL-7B-Instruct", + limit_mm_per_prompt={"image": 4}, + ) + + .. code-block:: bash + + vllm serve Qwen/Qwen2-VL-7B-Instruct --limit-mm-per-prompt image=4 + .. note:: vLLM currently only supports adding LoRA to the language backbone of multimodal models. +.. note:: + To use :code:`TIGER-Lab/Mantis-8B-siglip-llama3`, you have to install their GitHub repo (:code:`pip install git+https://github.com/TIGER-AI-Lab/Mantis.git`) + and pass :code:`--hf_overrides '{"architectures": ["MantisForConditionalGeneration"]}'` when running vLLM. + .. note:: The official :code:`openbmb/MiniCPM-V-2` doesn't work yet, so we need to use a fork (:code:`HwwwH/MiniCPM-V-2`) for now. For more details, please see: https://github.com/vllm-project/vllm/pull/4087#issuecomment-2250397630 -Multimodal Embedding --------------------- +Pooling Models +++++++++++++++ + +See :ref:`this page ` for more information on how to use pooling models. + +.. important:: + Since some model architectures support both generative and pooling tasks, + you should explicitly specify the task type to ensure that the model is used in pooling mode instead of generative mode. + +Text Embedding (``--task embed``) +--------------------------------- + +Any text generation model can be converted into an embedding model by passing :code:`--task embed`. + +.. note:: + To get the best results, you should use pooling models that are specifically trained as such. + +The following table lists those that are tested in vLLM. .. list-table:: :widths: 25 25 15 25 5 5 @@ -667,12 +790,7 @@ Multimodal Embedding - - ✅︎ -.. important:: - Some model architectures support both generation and embedding tasks. - In this case, you have to pass :code:`--task embedding` to run the model in embedding mode. - -.. tip:: - You can override the model's pooling method by passing :code:`--override-pooler-config`. +---- Model Support Policy ===================== @@ -683,6 +801,9 @@ At vLLM, we are committed to facilitating the integration and support of third-p 2. **Best-Effort Consistency**: While we aim to maintain a level of consistency between the models implemented in vLLM and other frameworks like transformers, complete alignment is not always feasible. Factors like acceleration techniques and the use of low-precision computations can introduce discrepancies. Our commitment is to ensure that the implemented models are functional and produce sensible results. +.. tip:: + When comparing the output of :code:`model.generate` from HuggingFace Transformers with the output of :code:`llm.generate` from vLLM, note that the former reads the model's generation config file (i.e., `generation_config.json `__) and applies the default parameters for generation, while the latter only uses the parameters passed to the function. Ensure all sampling parameters are identical when comparing outputs. + 3. **Issue Resolution and Model Updates**: Users are encouraged to report any bugs or issues they encounter with third-party models. Proposed fixes should be submitted via PRs, with a clear explanation of the problem and the rationale behind the proposed solution. If a fix for one model impacts another, we rely on the community to highlight and address these cross-model dependencies. Note: for bugfix PRs, it is good etiquette to inform the original author to seek their feedback. 4. **Monitoring and Updates**: Users interested in specific models should monitor the commit history for those models (e.g., by tracking changes in the main/vllm/model_executor/models directory). This proactive approach helps users stay informed about updates and changes that may affect the models they use. diff --git a/docs/source/quantization/bnb.rst b/docs/source/quantization/bnb.rst index 682938cc63d48..84f805bb60c2a 100644 --- a/docs/source/quantization/bnb.rst +++ b/docs/source/quantization/bnb.rst @@ -11,7 +11,7 @@ Below are the steps to utilize BitsAndBytes with vLLM. .. code-block:: console - $ pip install bitsandbytes>=0.44.0 + $ pip install bitsandbytes>=0.45.0 vLLM reads the model's config file and supports both in-flight quantization and pre-quantized checkpoint. diff --git a/docs/source/quantization/fp8.rst b/docs/source/quantization/fp8.rst index aacd07a34ad46..4dbf8e9d346e1 100644 --- a/docs/source/quantization/fp8.rst +++ b/docs/source/quantization/fp8.rst @@ -45,7 +45,7 @@ To produce performant FP8 quantized models with vLLM, you'll need to install the .. code-block:: console - $ pip install llmcompressor==0.1.0 + $ pip install llmcompressor Quantization Process -------------------- diff --git a/docs/source/quantization/int8.rst b/docs/source/quantization/int8.rst index 04fa308449507..aa5b251becb1c 100644 --- a/docs/source/quantization/int8.rst +++ b/docs/source/quantization/int8.rst @@ -19,7 +19,7 @@ To use INT8 quantization with vLLM, you'll need to install the `llm-compressor < .. code-block:: console - $ pip install llmcompressor==0.1.0 + $ pip install llmcompressor Quantization Process -------------------- @@ -142,4 +142,4 @@ Best Practices Troubleshooting and Support --------------------------- -If you encounter any issues or have feature requests, please open an issue on the ``vllm-project/llm-compressor`` GitHub repository. \ No newline at end of file +If you encounter any issues or have feature requests, please open an issue on the ``vllm-project/llm-compressor`` GitHub repository. diff --git a/docs/source/serving/architecture_helm_deployment.png b/docs/source/serving/architecture_helm_deployment.png new file mode 100644 index 0000000000000000000000000000000000000000..8f9ca29795ffe442c2d22a5ba79c3896e36eb2eb GIT binary patch literal 991484 zcmce;XIPV2*EXz-iVX`$5h)G|NS6-MEFc|}-XkC&(xi7X41;u#7J5)1Aiehzl_rE5 zdP@|fmk1;T2qEtk=brm`pSkCLpZMeZe(3?^%D(pAYprvg>s%}1iIy_WdFJycPMn}o zRZ-MAapK(T6DQ6f&z=E(l0v8+1pYbcuA}_$MA-n#>WLFKPpB$B(DSj_nDhQ{Z47t! zOGwVmJDQ5u_%u6Tn-AZS*l6YRijf;neX%h-X)4B*f8BqB=Ej~N{r5 z;^+F~Br#DN0~cFDLzj<3K5t~XXQ$0&W%)17+9q;qLs^(lQk^<`<>vk0PW&(b=@X&q zzx}#jq#JqvfByXkM}hChl8>yu?YkhD#iDF0`;O#t#>{rVxfNlK&&09Nq60wkZP!MT ztWDi5?)3xoXl`^Mb9T%@&V1e6jKI}Rle+#fw4HTZw07GK3l|~{tGKK@JHe)Np;9l* zxv|?6-Muj9A|L*A0bYCs%`#kU9hX|*i!ckq&4HlXf^uN|3pvALv+Y3Hmqr1?d&QA6 zdoR%9({-srhG5@;6z5Gy_@4A2{2iJmZm>H^0TkJ$kbqu^u(sNgUXEpOAHadZ1LlX- zi+_jTt7G5k+Cg(S^M&$j0!t;bJ?V$O zZY#UOR1K{F!k+&AvEN_cs9pzazbDmr2nwuE7D5NuJit@=>iZV^dP@Zo=L zmnk$iBXrf&WA3iO%4l(p`WhzqGJBLIT3?<7*%}h4+h}v;Ihn8Hf3#p{{pAg}DEEBI z=A>xdR-0;`6kqMNnOl#bu~*S?{LRK{ea2qFFsm81jIIPH#?nbNkzJNKE(og;Air0f z4fRh%HcAP1k?zVvHXHOYr{O-uyk%ncl&FCAcX)B4e0$%I2ni~zH5PAY(w|Js6 z3nO=>T^VfQ+6-lF>VhJ+m`pv<4f9^h@}w}I{oI0iL?y^Dh%|t{b!nqjxdc8rOfpWO)|2JA4BY=$Y*;sEc~Vtq2oKf?ZCp^jGr!h2wt#mp1}{+ zWFS9Q7PIS#l#JH32Ev(kHiu-tc0On&bpOh?Y3@4Hx+N!KL74A1BW|`}$6e=5l=Wu! z!o##93tnI99YYHu99g+bj2q$R82Q_%<3l2FV3qm8DGsaqTRz>v@ongx8c#xatTLejM zD4q5t)@x`@CUe(jR2C%Wmcf*Q!D>rD=k5X4PLQ`1y_O5F$xVdH)_Vks^WJL1`ucrW z^H<6iEH+?KMhz}o?Bc@%4JXAVEUe}tiRQ=U#a15(mzNKN)G@VLDX1cgLv=KKwRPB~ z2Pe|y|8&*Q`rH0fSjSfq7hV4CPmOBe{Ph_~|8gww-BBL$LL%sg^n_;O96`O?d-RW; zhyr)9j9UgKuNo)YmY0Sbaa6!>4>tYhr!0lKyy?1<=9~v?-4!vXQ=(Y?oY^jnKLYc# z(aYd?arHwj7Yw3>%pLM}F*A`7b^TVMT8N}F5`)7C%nIyVj@h+{D`Cl;XV#!g$ZuJN;v(^mv2z*oawSa9o3BJr~huU{i{LrUurzQOxddU%oS zK?beGo{3k%WRo&Ha+~$u_dis6qKVCk*2W3BYgKk+(*wjUU%6)x0 zouV{K@*}Fm(##5-z}+XeKYosL>rCM~JqRLwtD4ClOsp-R;W7l-=O!t-@A+|$n}Tl$ zY*#ltBkJ}7CWjt}yLL0lxSEwQjVKP%69)vFoziDl{o60W28U#wzWOpo6TF(rtOC~O zDgpx(>;s@%YCS5FPP+xSnsX&r^C1Dfi6)=*A{Y3`-qz;~RvuTc%u*~Ybbbvohap{Jh zj#W-?X?eLDh1eoBS_*al`U^Nqrp<%KeZ~vS(;TRFQ+987#2uf))rdiTWydjq!XTUT z!*k9&GobXhbDLX_+`Ch_lG)M3x~iTM9^Y5HroG&@nytBFmKg)^QB2J=Up9$*uNAX0 zyY2aMO!&-}oAuNtJ88dBYAt>z8?6g}6;T(}l{G_Z1>SiT`v-R~I7plL$wR-CO=|E-QUw7HTUir zVgpoJVaL7t68@WrdaLr2@XvCnB<#+!(81yy3_9tW*CN+4WDeZKpm6jK{Vmp|RcRtx zUGmVWHEYnA2wO*b1g6)QB7H;mXKWcEb1f@)oACoPEWxR*aXF@7uOZhlK#71jl-p1@uVDO4L2ldbR_(K4rG!0}x@n<#Us zfz&WdbV9pVOTivm*$rcIX&68HbPHZTm2I2Qh@bzlA-1i+y9*fuK|M0=9>y zR$$|wi&HrlR7*!P5hH=<5jOYs?y$~4H~G-u#${qKj5>{1|E*?3)@uDzHQ7xO&~oJV z+|k|~jBq2FRy#v7L)2ia^K_yVB{(hl0I{nW+VE>E%>W z1B+Lok*ZU*Ap>e2jtuUKes_6Yez!8{oUFlMKNz6Sm#-eL8d@uBB;P)S=M9&98=Gan zBXRpr^k&Olmap<|^r9`^;S_`=T$ zm+KOC3=)sGH*7R++2p9yua6PstGRfHO>S`cek)D?4l?H5DBA4;zxlR@dg6u4LHj-d zvM+to)@10^=t!$v1PjDwlJUn|F7xW?qtkYcS=PJGH*8U7)gx4l)f@A)_AG*yrToTz z;oD2$3K}`}3JNKB_)#9uMqeJZ{TRB%vHsY9?fF86SAR*6h>e|b1eFrO)c51!0wx2b z=f$7P9Ub9WhL)HxDwuU_a$V=^bq0xDy{-SW!8S@Av4K~ zc;3RiNM2u40}jpf6Do8keNCHs4u%9t{LQ{a35f$K9Gg9ITX#&;_yksKa8878TY_^c zT6Ba(uK6gfZ?z7?`J^xFp|IOWFi#cJm}xcWpYh3T7_89QQ={sT}+?Vpb@B-9xjyrBL2|d z6CKujNUvF=A~~IncE3M$tg~363l+=`m%v@9=1R~NuLda`H_u|~OLm1cKeHUXI#B#< zzj-GMyoiF2xCgMKaQQ1z+F|ywtNrDum^q9zKi;Q$AjQKR7e%$uzjNd6{#T@;1qy=m zO)g5#-19h43@gMZCf|7?R(V@L?s~<7xJ{U@n5%$5D}K_s93Lamco1Hqn4S}1S(rXZq^FHqmk};mIY_`A=!gmPultp*5G463x4P=LqVYqo;~?Xsah-)l2$^D{r%PTqlM=cAq)%)1XC zSJ7s>`0U0?xjZ>QXDgLYL|UK=jz@6M8&Ab1=_T-TG1zNkTB!32Bi5Z@x zw=3J9{e~9TNVBwza3lG6-vSoOM)FS;Lj^g zxmuX>0!nQ-LfrOX&BEQZQ%jvKQikimlTGdwP+ttn(>ocowdBbsq*X{YU34J(^xH=? zwpn4FX}R=SnpHW5x?mTZg8#VMf)Ss&3M6~?J0NY;{5*YTC_XsKH18 zr54vWkA-KY4tzo}Pn+Q-5pg;6CRJSWky_Y3 z8Six!Ni9TeP6V$p{dr`fj||%P2~@Y-HUrz?tHzXSkO7U@)qD4Lc8A4Y^^Y14op=bX zJCdf;zBWg4O@}?%IeNmILuEJuaSa5ENXwzWt^-b{6^P`LfF1K{p338l)ln+rDAlo= z9D3SrT|C1pL~q~^GN#*BLc18#(JQ%Zjgl;1%Np?%(tv4YL9oAl1&E_=fP@_ z_sI}?9H}RU*x^gRlP-Q$AGqe6ex!GxgNWsv?pqG?xi4~@3_6=~=)WRGQc|2eY&+xP z^ehDhtdk31Mbr~6`o31BpK(tNLZfSD2b(5bvYV+a3{HQ{Sq3$YYwC;i0x#ZcO!w5j zMG2Cip}NCk+@>?LR2R6A`!xHTbc^eU$mZ^^d>B*L84h*Ttr{$3JENA@utGJWlIMYh zHo`p+^qi+>19mb>pj4_^fHO_}YI>}Fpb)3=w*XZH3OZV_b6}V^Z$GY6yt*qbK%ZV4 zy_MuHw~luiJ`W)X!qr(CUmipzth(O}Wc%i1e0lyFv8~UKlpj`t#Hpho`VaVUyVdLU z2r~)y-s7DBIQ^0LfdTcV zx#ozvk&Exz=!~OcCD-wb*%pml$Jn)6-vhO=RX%+^E!9dMX$coZ(X+IodUK-Rf#NLS zNO8ye>kg7#i$W>ocVs-l#uzmA1E!o!2z1ha?YJsdd3Z3TgixRaX1AFG3;MMJ6vbeJ zW${h~&R%*G8wjnpnM2<(IVry$pyf)RR&HedE_uFJbFf${6Z^%W>2g-VX6vk$EB0kF zT}IRqTVE4OQ9R8UBpzfMh=P)JVwoDpU&w*|IO7=c{6SuFdz- zam0G0kN&O>66D7Ymh)2VRQ%Ku0rp*fw9CcC6(t*bJ+kCO0_|Hk8@^wx*ig&3>E7Uh z{SB&9MY{K`j6B)uxni2+>B5lrb#w-}bLC5`HHB!GIR)q*kmz% zu7_>J!r)zj?6T?7sB5}`hF^Nze@)~4oX36>0CAT~N0s9vN=t4~7s5FrKhuxt)f=1HrxykxF_>*>3Uy()%PRY zedNW$`6rD#Yf5wiIvUA93MP0u{!pDhuvg!l&o5VM{juUxc**%&7_jJVua;`J(5mTf z;o|wO_vdo0=cpt(s^1%G9Iy!%SM4-tf^FYxFb;H4)CkET$(sbRl5SH=M-goLx2B-Q3146JuZ*_Iu z<58uKy7uLQULkw?L=U-bG|ce0KKk*YzgntUP_T{KX zVQ#>(lC)mm>I&3#omj1wCPI5mVsB@4`$AQ;==F4!UIVf4*z^Iev2T|X6Eo1VpqY5W zfxktkCbe2gDFyq@li8&c?^10GgS|rfRGS+fG)5gS)+QA_B2k%Ce)qXn;a6EuzCW8FH&$AFT zp0ZVtV^#HYxV6b!yA~Zu`#_Q|y9~MtuerT3fi^sD;T5TTf>}m*w~Jz2uK9v*!}T`B z%|Tj?gGmYU-K4vfSiGu&R^;M6K~|plUyEVx2R+(nI=vU*e53=UW9-UYpTVExaZ|`8 zK(~vKxNC#_BYn5Pqm949!*RxaMCEdb{>yr}KUh}W<4JV-ljDFiQC9Tn6g~kQB;hn~ zt)9&)kWzL66ePmgAP=5_ZsUfs?pbxh7LYm~g?>-fE~;Nn(SPaP;tyUuDi%Xwec=xa zY}622HV3dDnGg@ugIz*1(@tNJG~*!oI8u0>AYhKhV`?ASc4k&Xo5k3ecl4Gpao8Y& zFXlLHB8gkxR|}w_RB4Sj>lFw#tLZIfd`(H;T)*B|^Jijs$#EnqPSgE@U8!kXj>#|qyoz`6 zYsfLO$`mN-LgD+RR_I0x_MruE%DQ~bxMLzQ7wpF7KYjVvpEW<@FqI_&-e)M3N?MM3|GC4)fM@O7^m0?FwPPr5-C@Sg z<~w8S-OqkGl`C}g#t|w?d?L@y-_r+ja0UkF!8CgXLUn849*%1=!`uR}c$R{(d~Vkj z6l-~m;I1ds7L3c96q6&$0W?TF?LhumBuYx98`mS}EF_aADMy4ws_=YuG$~!V99P2r zSSkB#zlqwd&P>QovByZ;UPBe6j?cvM_WHS|8VrYI`p(fCo&>41<=wU+ymI;Ge2N2$ z$u>a*b2%3~<=p{b-ZF%vZYA6!S8DE6@2=t~K%piU!)MT~Lhc;$fuyZrzUJSbH5n(+ zMk(~aUr7acFo3+a|J1C5$T*u&)>*gOrn*FDz+-=lGbC@l#4X~+Dlr=$_Vn;-7VT-B zRXjkKf$is4w&SwTJV7Fga;_ZV2i>wUzFp2hc>KR=9O1~og6Z88tHG;Sd{SyE zmtb|mRsmD~sYEq>%rd|n@@xyasGmD)%^kzn>^po60AcUL(Oq_3TJI5FCLUKeT&SPj z>r}1>BO&v-ytlXVsTFqQU%bHwDdP9kA#7%!)1PZDXlw&zkCLR+%vd`$on!PQi9mv+ z!TrmqUAO!&A9Zq^nn*fju#N#TFUkqc_fJYTfp&~6^)E#y%amV{;ji_bve_7sM;Z+V z>?NqUo2{Y%f4XEFYfk(UiOAR(Q-AJ$;VF&$3Yo{f(~CU z`6f$zR_oBfRO-DGbGY8YBcqPz#t#~)RjHhQkNrMFV`^y8Td$(DmNXdzutWwmJR&4W zLScDvg@?oNL48t*+la(s%DkY-*Lx0aB`>UWO^i7>*ODES_=arz!Pm@%Z#uM$R6qRm z!Q7c&Mu^nAOo|A^$lROocj|qdOiMy8uL!ZpH$DO5TCFV-rHlwxbmCcsQE zb(a|Up3Z9FlZ72bV!!wniI^3_=u6kC=RxTTYL|nabAC!*%Jp-kwLkY&O#l;8th5}5 zv?Gn~vWcr-9TeA|I)J7q_zQ4#e@pd`ZOm~(NDS90WmNH z#K*sbustw=>Fy>;`s&LR+6_xw=(gJR>}gHhkPMVJt3)%c~uki{>($8WW!ItqVRjyG667oMYi zDvxNu>~eV0{mUip2$9nh^70>nkbOA63(zFuE zY`hy{FUWbY=(Ia9*HrUR10agpBpl@1URW7L0>_1i2wW7AqbKq9XKN3hm$|$V5I9v- zhm&JhkXtBCN;SgzJnzqDlmk^A@})C1E6yk_3~VBV#>@b+LL8ly&^gg;NkHN?p%Ct; z<)4?wE~z_R<;F`F)vPCNK#EvXOr>v?G9*D*w56r&7o$s(=Xav*9@p7pJ1Ua+-H^J= zO1#$7S4xrTTS}six&g@|UQv)W4Wd5prU^FWCN70nBDq*m81VMB-K_EQR{(DxRPkIt ziPxl(hi{?n#p%+L4tNg0+7UTNIOu(dtv0r2*E-(raSpHBUU&wa~tTIWHWn6^bBJ#aqRm#vnw;&`jD-bL7C z{j+b|WjC$H2KXLNctsg{s1`o}>LSmVLY95`E@>J+y~TO&`1$E_9a>v84;GM`$1Fff zil&rlgWr)o0!PxYnF_9wXMI^`3RLQ#aIQ-C(vjMHQNuvQYMU_ z;~D}Ms)33I?`vQQ$+cG6MJuUG3%hkCupk9yoT{VyOq*|WRObvot@q|D{i)3;!N82< z&-!BETc-`|PnWM6M>goZ%fMQwA5bZoz&e_0kbUtuqX!i)euav~@+=4>yv?xi{(7dM z>vdGQ?jJc8^-jP^KB9&`;pLopQmT`qH+=c`lJn~1dS0*3fU?s5_rJ6NGgTsyQ4o$k zlcu|!`nTI<_!JLpcL9_#Ih*Vw>{ z3!ah7g*Q_r(Hxt12O|v%ISgSo#SYZa+8Wp4NI(0=-C_xNLrFjNErrNUjMU8(^XWSd zO7NV}FyxwE945%r-hPV1SCj`*bNht}b4{U1F8AwA=`Z0vL`1!NO#LH-*KqEm2^D>x zotl^wBgp{}e22N6(=}vOJo?~xjU+-r@9oP45bYZV!nb~+M3%5jE+{cdO zI9KfdZ+!93;)~#y44+M=rD94|PJe7szq)KZ%**NiR|4`-gmuZDz1Yj*S0JF7xVZuq zehvXuAjCz2XK<7JIu@ER2*gDraNIBl)hR@TC8_KmqzZ{@7Xsznds=4e;YcqYDu@>qhTa1C!n;@#V{vPa#mtx#89oX^iZJE znM?=zb(3vJ>~}`_z*@bJFKa{nluf7&ZvF3S7P8{y!v@Ez3IEGoh1#Ge%+4m781MDt zdxwJK1r}i-mk3Kd_`4CAn6`R1o;CkFjaEeLuE+a{`<{>gT@n6&{wLV@!80@=Hr7y{ zq@DGStWA*efVHkW`QQ?mybv?L%$F<+mY#dLz&ReXJE72s`hE23-}dUiy@f4(aNw34 zuLW^FCi~*DT~yiCWt<$(l0C}pb=bXjVBsyKCz(g1z6$5QO&xou^k!0U+y4CO?+U?| z=scZ^t*@L{Cpm3^b?NTa1Qg4b%{q2mT`ec8Z3mB>)Q+8*w;6}F`)*Le3C@_ zs!8mbRnPzZPXG9p)Z|n0tVCay;~`aHpKounFFNFM1~D*}fO{{}csJ3t%d?5|do}GY z3m=D_^Q0n|JsO;g5jpPbTB6rd8R;mxX7`^NFpx{pnb^_B?G@$0qvxsAxGnauk{Z0n zWpER1I{@rfz=Kz}y9_a-Q^57!?W{GmU;wVO$lOWaUnxBZ#^9tV&m(KQu1AlcM?G)Y zCl3x+1&74VBP{>qCqE^7Ql)usjc?=og(9Jw%>#nn9a85gQCnTP`9z2+j7C-%dVS9- ztBq{3lfJ8^1;wOK5tg^$B#A>*!-BS#*T2GusQCjQim#7duxnylTnL+#yai#Wl(zlT zt{{`7i@QzU@8%;T=eb|Y5qmDU?mvPTR+no+Hf}8Ml@dt#+)U-a{C- zLc5^<0cM++x9tiPJstza@%)UOS5}kjK&jsUl2ndu*rsV+j7%NLzs6zMwkhQM!~cVmx$}$5>ki&5 z!_RZ^9Tl^Be-(rO=b8NHj@(~3-*PnN;5ut<*CB}p-yoj{+}Z1aheN%h?G@T~kqAf1 zHu6Mp{#7Zm5NppHQ#|Dhh>E%Z?Z?1HF7H35#5-FDXp*#0==Xm|5t{t^@ANdLz0b)Z( zqxD96G%ga>;V$6_lBa%_7IbD83g{r!Hzzd7{PRx|J{$-g373Hv_L!p^moxb+CfC~{ zrP3lSo6&(US`Kjj;#nniPcZw4{AD_BN-x#pD-C3g7{H^q4M4Z%wx?GBO+}UepvFa0 z%4*0*%1Gd$;R4p?AL z^-_Fk!>MpkCWsX&pf6>+{TsXfd54Z)>G+eU z@_J&N3S1TYb3K~QsMdifm&wD(6>;!oB!w?|b4Z2C0aULUohDUh>mr)Ov^Nm`u@YfU zJ#mf*q_uHvQ8@d&uao~swEWXoXk;?+?D-Kgz?pH8#t}#{-(0*sY;sv{Nz9X9$+ORz_U$l%Cm84b?Aa6j-?B ze4Tm1y#!{uG)+mu434e$r``e>a&x=No5EE*KhN@tva!ClXlnCGJF6YWlZqWKnp5(g1M!6QFR7c5*-8 zd;w7HPn>EE9PD36s*Mwr2{)r}haP_mWjdVuZUnBZ&2?WtU8Js9^G|;0AE4cPx8NYj zO$JajvB>tCXeN`_j_z`|9Og2==Oll6r*$)$dDEn((2eUWg+A)7->INNm#1j&tkvQBUa}Pk6kVzSrGVCnr(Lb~TCySoTrtl7o-Fh|ofo>tq zQfT~C0Jey_3x&X#d7Yq7Nzf=yZ?c*U=G(eTZdAa9hbK|^NPWS(-`lz8zZDEbk$Ysw zrh+rIU^8D@H}ld9rE9-rWLYm2kW4JfRNbfamdFQ_qZXOcy6$6ZHOV~T__S9f1G#4& zlp^Ie_KS#29i^6YRxIW|@PzaU=j&)~W)PjZNs36Rb|J+{W*-mWrSD!d6VE^AdfiIS z^`;QuxXX`&uB{8y8Dx$we|{ls7nSBA^@)Q*uN&OgaaWPj?|otpIERZ9_ZtA3O11R;kJN9jG^Tl(Q?ykh_$oYhTbAy7=1jqio}=9;jY) zm|n2KP`K#N*8usw?gCIsE}h!?o{W$A#bCB|V;xvd!ZhW}so2~kbiQ#38$VUKny}t2 z9jMKJBdMbzU(RAri?XK#YXC~_G(`ytxs3as=g4Sf+p;v$66gw%9M~jeRE2!j7v4x7 zB+OCtLymsZQTOZC;t*`XVi<0 z`f=eZ6mlg`7O7^?uJ;4(?&PLF?R(W(k+pT?+k)>86K8d9*uUor@L~qiiQg%GnjDob z_iLw?jwaXRL+%vT`?kOXG57AEShpW&Qm+fud6~6*wI931p+l+bZoJ&cME~TW=Th-GdbnSwq3H|K}F!~AIw~wv(=n%u{ zV%3m3cM31Ul{-SfOGtQ(ewXqdVxLEYTCVeXt2DL|lb|DVUYnb>cQ!q2|9i_dIU9|^jS7Evb!W&z95`HAHmUY6oG-d&s#e^65W zdCm2p%PyHl&B|zmO~{`@_UV2;kSYAc7BrpOV|k_6Vo_{0Ic;tbrGO=vPd)fn#I&~+ zetEug@R+rA33HWCpuy~?-0xjafSwlOlzy=fpMOgY)^FcIh+m44^K&v>)c8c{o8ZoF zh<{<+o=$NYJ2hK?C~{STS0gUnthSmw2q_C+N4MnWhy2xX_fuH%8OA>@5)9sc+<(Vb zN_gWB?txgKF}Wtcy>r?lqSANXJvhYAsAB@<BhMs|Iz|bgbkhqQn`SMLHt0IeBTFAy7qj* z2jhkM-AoOLm^9WIN=mYCe|UEk{NqUphE>KPv9-T3oqzs&LhVjV1|iKqnF*r${1*!2g;?naAZA_mj=s?(&d-*DJ}s9B{g5% zY(f&4#%sC}CtE!P3hEhnKQ4R(^7Z24mV!^TveV3Ea&AIiK+J;QYCXN~r%0Hx5z z?AcmJ#dQm{o#M({RzT-sOpkD*Vl}{R%mduU&#?}duJ5n=I8TyzUzZLQ8k$vi!fdFA z9CyC;koxy|N1Vab!I+Pjb1DBR7UB}C&qG<64H$rozw0D`{WrTBHWuL% zmcFhSf_0g6x}kJ3#mJbIjEqYwK8U#XL*PO;0sv%OYysyH3|uYuyvI~C(K>83&{+IP zF^NL^11KCn?TgKaxQvDQlw+N+#REBNMZ}UoQKlNj`TeEf3!ny(vcQG$^k?;3!$Jc_ z%PBS?gC$Imd+J%^&V!r2+I=G}HlE?(2^1QdzTo$z2~1}&o}JN}D^_69*}Q4k#TpRq zrjQ}=B+g})Z5jT(`yQ+82&s%bZiKgha!9QHWwRU|2V2HKuX}h|O=fs^0I~P|$gr;e z?A6_VITzby$~|BgBbhwiE|poBT_!!@uHyEB%K@WK%C~cCl_~TMSNhka|*!3D3YzphR<@a_({R>eD&GZc7=5QXEI#>(|P^Z3lZE)1w^>j#bsHr zuk81)3zg4`V=GDLo0Z_46AKjkQqjWuWgQi1VqTUMvoT_1L@e5SS zkn)3(aQucxcUiM5{0K?w{8fH6=?>5}9m<`s(Z8Iqo0~=~&Sg^gaV)#Om2u@rl`D=R zMNc?>h{CW2&{Ob$jr#4OH{1@tZ;d-gG&nV1f6ufx&FIo)foC&T{t8TUh`1y$qn{G} zg3@QgsJaz`Y%d;tk4Rs!15hzQB*g&g+1g9pnlnc{khP&wwTM3`EG-AG(WW+8839Vy z?#&ru?Z(Y?O{lD|8|+tkfCrS?`I@-y=(aF!*delue=m4_m{N{f?ev~f+Tol=fp zB0R5^q1Sed>K1gyIB%Iy=$Z5iKLB)7Y%4}iMY;g-y?ca+iDq!GdlBignMh#-JS9SQ?8x=< zgTYeQb|6Nv^rv@jwbn%ez3wL{I9obSc=+jP|hjapH;c&Zt@Sso_|Vk-m0BK zT^E)DEJ;ArW;NmIg zIbcW%+66oF^N1M8OWE-)3Kd-8q08oXw`NwG{=Y}WJlfgp-f%oT((*!o^RronT>ADd zEfz-dEQ_{`yZ0oF95-J~R5^JTQce@|zgin%>@dg38HxVxr+RT_8I?OfCWN}Qg!KlahkDO=lMNqCTumhW22WX<}SO1zs)rLM0xir18e>D@`R@$mzSGGG%K_@Yxy!fjKa56LW|<0I0m|)(Rfo z0Q}4un}0d*&{HqDU*ooF9DzALm>is0>3N)`HPtEJ3mR!}G)&Daj(IAFX%!y*TgW_g zma43s`Q((~|Aa;#__@gPnd|_(y`%5Cg=gwtY9U+Ud+p?%3O3e!hW$g>wf{za7g>E@ zCotV{7=??FH#dlpPVW~krtyTZ#&uK}?+r0eYTO>WMwfsI+I=v)zmz2mB;YEXvGtpk zs{F%V!)Yo?cm9PJgI_bgh+MCD&7Lgxcc(zZ^9qyrM1A0n z>|ey8-v|Y*R)f2R*a=Q~M%u=Nl@(x+(xWZs{HQEux9{twCY^ z8rF;#{uWj422ued6gj+F2}3HoUj>#jWq^4j4U;A7hc+?@Kyc#Nmr}Xf-#oGU1?w3b z<>com5_r|`hek=P@V=Tz<8jOk(2UM;|{j5$g+bZsw))*#mBz?ps@g z^(US9{qJSWeAAny17x9N--I0?eUV>6Og=9Hx&_jp(3J`D$KM{FLJ^kQ_YZ!7N4vC< zB*>&d8S3bVrWi+)_psxUGELksY*KT5qfYMxs;{TT?Q&CnuSGTKZxH5c#Xy?r+{sH1@P_1AwVStS`1kXa0d& z$z&6O>&Li|?||8miQpublO6xnd2DQ9Hkxr}J6=i19KvZ$-D$?{R%G(oli69H5_7ZJenS!($>^6_pa_o{%j zhg?vNBCA$4sq(&6TudlCH1Q0bj=1~%!7AaiUQ`Q^wHX3JRX+%Ei%h>n+BONZZYh?Q!?oNT9ITtS7B;>j@22bvwbsi^)1L%OS=S*s|@l|JX8lGRnOiuy16B zo7RknB+s_Yymp;kqc%jVC}{w3@eC~>v59lt$hGSLa$VhW|4I8*frXCRpr;tR=vo&f zrec)A)h#=BO?)fuT{Uq3786*IuFXpd^&Rx(W~_8UsYYs-WC9k6R)dRP@4a_9xO5G} z)_|x4DA!@b0D;dj*C#SFuoYohOJ|?YGO8EqDM{4J26DG8%{}*2lb@pPO*f7Ke8MG%uQ>sWJu6mr-vn}(^A_w> z?%_)sebz#+`Qg7tx3)cm+8?I5xwm#^3e4}s3`PSLE4ltjq{vgSFYa)EqHtN42Z(sa z)uWQpzhp@1e^`5|cBKw?RPanN?$$f98#?v?_dK4?n*uD zoVy{O=I-T_J#fjoq};nN30?NXod4-=0@9H&?>I4TgS!D-cxlq;~vMamVFEaCCiJkWkw}(DQybCHsgTP)QXycxy) z<7%DL5I{zpc2224ArCN9otz0^3=w0_ru=M1V z;NAsO8DU4Sk|cfqBi)uDR5O^R35nXqatkFvW;3u0c}NA-%pUj0{j)QpP{l0TRX`{c zt(js5fACAT$ggS3qVA8WcuY>dTC?G_8+*QVT^Kk0`f9_%9ARM#2$=E+3@tDe8L3@% zPG@X5>B-Iz5MhCiiwIy`Zh_WK&4WOFg8Oq5^}76ZXoP}Bv;iq)W&G~%H0X5zOX~`; zLd*KQHJAVz%2~W$twL8mPA9a1tfeOx1zumCX0tsXysZ!&cZ*$KoIEC#2fE}g7Uyy7 zIOUZ4ILhU(lc3B0FLzrs?n5&=;7U~eYJzmRfUx6|v@OT4_n%2z`CJ4MdHHE2^{)NH z8mB3)r9)EUHJV{t+>JpV)ul;g?Ktk&@}Qy<5TM)>?ZBRrUos>c^)qX=9cN@ZAl3uz z@_oRh@~H^~ z-A^cBdKy?~tQ2Uhb?tdNGTUAl+syjr}$NGPu`wPxcYynGmpHT^XZBeyd=BK{O; z0pkKhEYU`l@kM9;ius<*lH)+k?<{8jd(2-wjg&=@CHVHv(zWR0jg(4#e2~pAiA>|z zZjDo?&h|Z~s!ieUmEp0-maR?UHLJi;k-5RX1oh=>t&Q^J%HRA~bP|N$A#y=vkx5&w zC+(QD0X_TFf8J}*#yi^DP~p8inpQLrG*Wa^ob&0O)@%&xk&4;TeD=b+D|$3~F?H-x zWGiIGHJ^QkWQ~#iP<#i4At8ZU!^4$UJStBOlN0`a@~YAU{+mil_jFF>-Qm5-!69)a zw49?AueFjjH7#Ilfvh~6Q4nk8{qqXj9e$jQ(E!0A3GTr?1b4UK?he7--QBskOK=J9 zF2P-bOK^9Gsr2i9-P8T1XXan)ELPonxp!54bk#20fs5%B{K6;me(y8I2^rn<)9p0Y z@F=5N-F77df+gV(PQ63=3yDFKe5=d@+ZEyAoALlX+Hfwiwi3r?`e$)iaxL#qUshAr z6i_VOFolVX^)RRKQWUg_XcznhhYc^H)<`cNTutQ~i)*PL2( zKARNpuWAv}{t4Unf)^CzjQQ^>K`miM&3%~6C!72ze6Q`h*4z0mhbNXD8q%pBi<{?2 zKYftvZYT_AiJzgAFLsInHkMe+_;tlGxyHe?R5cI2TUKN1zfLTWRKQKi)F`9 zb9@@979Td2dSTM9FY%+~M$2i0Lu(kWvOt?j!PK0IbOz%Pha2XcsXiu&PyvpNrP6${ z{@f?OXL=&god=Sn*p{)*f8Iaf9eq*VAE}kUlC+q(A_?=lr!ZcsEEBq`7S4AjKk`9* zT*vTzq(zv<8;+6KJq+>(k&OB)Bj~B>R`cCgcYzfrlw0It=7JcG{D<&<1#Z%!3BPXT z#y2yBbu+}?p}g(ub2LmFgG~<9yPw(yriUo3e0HBws7zN?rNhum6l!j)L6Xc+GkO}x ze`3Ym8AMp(q9Fgxat~9qxO@60T)QG{H%&w)86u zI!o0@rRt=lu%AOk&z&c39}_-rO7~x9&-w0oU+y1Q@AMjnHsVu)4?-k}RazvXNu+~> z;#}~W)6AFQw$N3ihew_-jhycE**(rx-!(Mni^qutUs&dsC&pdrsTRU{Mz~0065PZ` zAU^WLxn)r`W!#Xx3p%k#x%dZp~$bb(svrZ(IM{ zh_Qi_#Z0uRM06MV{h3`ypdO>Gxi~@+qP4nrwOPVbop)wF@>{ud>*c26W~=-aNrbHh zXCBSf^l7Pr00B6-FVtJu$ROezXf(lFB?8urRWdD%>m-fW<;2Nrm?E(9CPFd$jegQ? zh(s=NyGlxT2fc;DI^M+o`-6|zAFO~!B4kX16vE{22bGoEi+DSk8z40yVMVN0n@H9C znIyyV2Uqr3mO_ZYj|^-E7>5)B)5IzdP9G6;^`-P)lN+y9)I9a$H5+Z^Mo}BV+tf=y zGh_2~g8El7_i+Nn@f9Ne{d}{Vi>dk%czME)mGkwsmhN;o#28lE)nzanJLJ($ z@lz?`qW4@CbB$Ui--G!ZZ=@EYtEu%X%QXn2Dq}BEvE&*mb*=fEbPhk$?cgmhRIIdr z%M^7mseNP?DMY_rt`jrmF3rR3@Q}cZBau>0nYpqT-52od|5tMk`5kyK5~|>5iA5h> zwVTb?=M^s1fhfJZ4J7MdB(p+uGb#8KUvUxOV!+^fF~FaLb-cZjSssFQAL2+#x97E( z3{`r)w!M5=XNZERP~t>0X=YT-wkrYv>;HbHq^xJFpr{lydBZ~z?QY8MfGMz}PsgE6X>ypmIXQUT`RW`#Z8B}6T z`&Uk+QT$!Nem4h$0+IQ8tLkzOTMktsZp3UtgylL1K8j`;Sd_)-^;_25HR%2wNOrn@ zZJQ^&&ILmK-EzJB!F2ebTa0Va8N8@sLyWhFV%rN3WM89|&0Vsqt}n+2^D^yBYzRFe z54P>QVTNzlTXsA@uf;L4R0|O6IFR@q^Aj|j9O^7Pu`5D*4zA?&73@RJyQesQ}~wkeaJR`zN;nQ#B_FnSfXaZ`(CyU>D_UcE&b?oS2;^68un_5H|6-W$S0zHHl;U#YEd&X1|^q@;<%i69GMhV zTOuN;+{C$f{K@fsUn9rJVMUa)fBYE6bI|Z|!iu+uEokac_a(AyN#)|(@cU`Wt-HpW zd1Axf2is4q9>s;ccP}yIT6Fue6?M(Qk4!oZnLjBB=pPlIc?^{%lpm|wwDP5dgf6=iA6w}S9oM%@w?`78G>?HEu zi&N(H@3d>3pB!ac5%rGWab=L<-r^W@`|VaaVbQay?*kQssWk65PWq1@^;9heg4d;5 znh1u*?A<-3i3G)xNFB*Q72m$>Y#zuwA3JA7_GWhvQWtKQO05n1^$fqlmwi+c2)w0g zr?c_xex9X4IMm1sM|(_>v~a~b&S=3e_RDoAK`!z31KS)9@`oz1^M+si%6lv53kf4p z@{V$Q@%Zq-6}-3^VHY+e^h`6S#hb6}LaB#ep4cGvYaw%h9I!g{eE3#MI?MIE zHR0Mw8h$%J>np})u!;mvp3qxZE5WMsl{p;?{{)ei3jMkXJ2G7n{wUf1O5cC1(N=?I+$Y+#(6Rhk?%Q$7zU|!1c=$a_)sE=UeZRP^ zd$g^c^l;mt1M<`%-XhL-kzRBigKToYG}!1W|cwPui5?B7}-r zE$Gv&62651c4Y?7c8aaZc?Sz9ne*?`=~Z3oi+HYlb_IhUq#DakQ(oToT$$Ker?0cU zb21LjheJ>%R@NCyMd5VRUYNh&F~z4}u4*@9m?$J%f+KI?STN_HKcFtG$gH#MPxDdc zg?*$Kazk-hZ|c_QvWLnvNPAyIP=MWQXg&HU7K)wmVHue-!J5`H3Hg@Gq`9qsbPKok z0sh|dcI2y8?{mIel;}(*1d(ycyW&|Iko)R`7_N-1j(lqRlu7O$PiHE~WQTMtN$_ZND{+H(ArvOG{zPefr)n z_Aedtn~$O$*jn4?eu$pLKW)sy%h>9*;XY)vKW)v`eC7y##<6l=v$&rutiqn(8GZvf zdu#UrjS&1l#~M(NFc`w9;%f|4E%G)*T(Fpoo8fKm6OVe_1~LDUjOU_+gfbxbElt?EA7(|PT7?bRxx;$YS{Zn1P-w}zNopZuf&|w~AF#=mO#PVh z_TZ(?+v~F?kR(&a;`^<+`4+v+{dQXaasaBKS79xjAJE`t%Nl{xp^3bhjUs*#0K@Q= z8i5SaA7H^I4ivmB{WhXcOl^WZ6CQpa@ZweLkZt0bJn)UPZl4$?0@=K~LKS+RsK~EH zB4^^MdVS(kO8u^NXrq@lv!1i%KkD$t&3g`$FBJp}udN2n9&N61^R-@1mT7(I#;8;~ zo`qr7V#CBz+$Uklm50(+iE@Vn?!7bp%;QOPNrdaxyAB5P$kaK}8-Bj-L75fkR)NR6 zo*!FCZc2AD|6U91uIjfty|AhD{ldl^xJwNe;SN!H!}E|s4t8>!ZgtG~9Sb8Wqv{$1 ztL?mnVpP}as%%j_^?liC5>8xYyKZ@a4qMqt4&eIaH&-uNV~Ckc(lAdnJWHGtA6y9> zubaA6UiA+H0$tl4?nLJ2LMvbTckGX-QSh=GX%TvO{H|Kpte$_8_+4z5nR?+1bewTP zCFPX-r2cV$jst3I`TlxGauxzdP6DcoH!&EQe_?JeK1FnbhS=qy4svzT_3nSNQmSp@ zFg!TnH9C(4WsZlN(9mpQBc8*qF*J<$Q6I5Bn>#4^S#90p=yR3r17(|gW(E;yBJ4y1efGKWyA1Y2(yd@YSK+H~wZXa@H%`o@ zg$a_zr$n;K0-o+(0x=dcX&)NVN?%&Hkk!1+!gpH&S8lr&G`;a9|D)dGgFvPNuVFy= z{_qH^#>s>gjzDQ2~rR^uDzzj)@Rv{J+lgad$bO(g3W7VAUTDt49_yZn*wpC_WULv$|H}R6;j~SAH-DaV; z4`VH~o;-wPeciK$T_OCyFx5%~2H5~(jfCoy``KVj{TdOR8j6cBkqHCV@Qj6f%>8zi zmx|((>%b|HU48#B@rAftl7|{^r~_Mg23jvR^90S3B*s@UqTti}B;uTVQsJq0z;*%w z2x^xg4(7Rm-Rs9k2PCG#zUnj~5g+%vW?SH=J}ZrmZ6o_G$jo~b&mANO`RC;i&GdP* z;ahK#P;j#x!D!^{50hppo3B#9MhX51QZ#Em1CLVKenF4n$3(T|mggy55;fZ{$;IXV zcAQzPd0z6XXkGR5uDx7n(Tm_$IPy%}$l#I$(e;N_zsw=ENs4B%PTUa@yc!Y}%@J~P zB>V;EpqZ0mc`7Hh85tna%L`hU$~QuS$cr=y=OF?NOb}Sdj%fx8Q_Lchp%}%>jhtJQ?%z{o&5M< zIBBLl#G8-;6{zS!kMM7I?hk7?B=7q2yL1WGs@t|9Y=}{y67YiMt%>?bY!;f@LhnuEsYCCp3pTjGNFrR--BImOC@v}Y5wVp`G zhWME{usQY%^K%S`E`1X+#~xR#Of)OV%zELe>Lh9J_QV_iWJH7OKCNra^x#P=XG zLeQ#tA+@EK$&D@$!&0+)%hj+i;8J>?=PfuN6$=`eeGN9xXFVo8rYV zA&FZxFO7DO z>ggV*XV>b-ixJQaR%7yn~7+D0a*t&tC$* zk>oaI4mcs5x%_9uMsq=dL(pIq@mf6h7GQYNZrJR@suy(ph(P`7*+apL@c2Nq%?~xq z82j4|;QV$2DS@1OkbV*6VLUm@Et{GBau2K$aB2xWxsl&N`+tfQXRE zvW%ZKS+zNb4M$o73;7$bOgrbkqGYDV23#<%{0tWlMWdE;?PGX4n z>ifGNU8fAIpO5g>Ts_**ZixEULaw*CC?(4RWUVfz6vFjecWR@hF_XQoL_2HULC!Op zOi*+gRP~_O8kc`-!ik>7>>I#cNb_K6bH=KLAZnm{;L`|VK!{>>vhwUdcQVz? zh#0Sk`gslArN$PfJSs;NrYcT=yl59^$5HS8#iHMc=R*ZZ>%(;<8CP$!zd@R7t5v3k zHT<|932TH(g(u3u-dMJ+qja0rvl{yANiUu#@9ME5T9U!st$=r6IR|;8rNEu<$hpvTgmQtwV$#o9cdE+`!v)gq2R&}%i8G?;$y z-cYOBttQHJG^ndg-F>IYE;@TkaO{fZzSB4w{~IoGl&IMw*f0Igo6CK9nG|o^($;^? zJ#M4aR|6iF!8YNNc&fQS`gJ@|x?gwZ_i|mdHDBGQC+7Ru#`{TgE_1m|+JMW1PiWau z&U;+*e0yql4)b}#Ohu(b<-U3CU|f2kgUHzL^83;CYnwLTez`H!H*AydUSfm3uk`Fm`i{>AS%9u; zB#kcV^A8oK)TK~03*CHG0Wi2RTG+DWaJo-IXYauG0)thG%#1hKd025190u_6r1Hhg zo}J0rJ+I3i$VZH*VHoTQ80rU?yp!lzJqwq6VniGS;y0*#Lire`_Se@YSJRBN<)6Eg zul!heRJjNx))!RN@oL=BAtrhbuM)o$e^MQT3`*B4DJI;o?t`DWq>8x zcn8IZRPpngll7zI!Iw|_-{mAm^re@AUAbDVbh_&5sg1jGmh?$3U5=OcP^x;H+Sq6J z%}0*!9nLmU_Zzd5^ur(~NdnY%9J1~Z?C~C%roLhKjWKr}q!MOpvAeUAG6Zy@@1_Iw z*uB+KW#)CHIZSXJ07U0%a&W62Q2#jW*Dem1e&Wtz|8aCWOy#!79?=KfD^{M7NNM;- z7kNPe%+MK|=AzCt5`wv@+vvzR-rLbS$HQ>yBm@S*w`+)n^+g+9IWS#nM~6H5?E`G$ z_83dt{gLbi4d8Oh1PEh$=4<~LlN&sEQ?Ac6O{==N?J4Fly3 z7#fU0OF;A-z{b(Td?+{-ZS zih_wE<_@kVO_ueOa!d&dw16zE1d*a`%ijL}RsB+ur`jUOoqdQYgYKe1{g(GQoeqE& z&Rc0hP7#f25@aEqmi>uVMI|04)7md#iEX88e1zBNuS^iV`%iYTSx+~199C2Z#0DcU zSV#8P&$*UzWzl`p+7)c8(+=NRHnE=`Y2$wiA9uvy;F>3_`xXl^O7O02(_<8FNfUriOkTlsocATaF_A`np4F6o? z>96_fyDa_|sYbO309^7F3RrOqNT$47S)fS8q6^`BP)4vc4-NSF7B$cE=x8WV5g4c7 zu)1s-jD422n5NuGq4bh@Saht2b_h-p+l_A_8eBJ8gBZRzANh$HO&ri0_P4ViENV0v zBtw$BaS&dn9rwGGv0)Z}ksUa?*%;Ov$E7IFXh}sVogS836ljYMnpRF3=_^=SMh4~K zhT$W88h3@_TN>LxKrPs?*Ezv(L+Mv}_*io{z}BHe#ivQ$n3kO!$YK+B_jNG)tK=NY zIsdz$T+Z-4E60ax{wXOS}WMYjzg*kTF#L!5=ugw8Dhr@6G8R z#|W-Gf(YrBHCWP@b9Y$6@HOr2c_Pc^RgCde_iXtXE?J@p^f{o;2niX(K#=K5Ril){ zCSfcJ72-Q!7413?)H8N&R&ClA0eU;;!1G_On+$dz3|PrLX|dcd`_huOK`D4jRhYMG zYyvN}m(djSsK@+Z=N6L#Mw$zT<0fJqv-MY(63Kp7!A|=Q2HmQ>Zzy$%IC`~DXr*wY zzDscK4ufca2eAPYN3LF6*dUU1kGrYwUzG@F+*_VXr`WvlL~p~BZzhi#_r|%YzU}tf zet&P!*d~nt^KBtw@I;k0P!Hv_lBd0kWTKA=ermK${sQYdE`V;88D4Uvo3hZLJTWiV zjwCExu~0cz>PUEv<>d>U(6zo-3cs`Br|{_XC@E+4Pf?SVNuc#L(-%?inh`YdYSiD zW=q$}a6J;d^rwn0w2WuE1it}Y!ZA3$cXL)_!3X7*@AAd&fE;}$?P>??scvb0dj*=C zO8{DD-*ZHOQ0Mx(3%|)hQ#)-%tqBcNAezAW=>#>1R5JC8$m}tB^iGMzuo!Z3c3g+4 zn@5+ky9ZFkDHYBv1#0xnBf<<&t%O(Fs(Mz0J9-p6Rly%62SQj~3UmND)rpAX!GW47 z)1jv?wZELhJh^=sSIt5vZYRTHm0j@!p4rGHd@=Btr9Bb=YN$n6==&(Ef7Qp;@V!1p z{v_j`TSU7*83T-z&e`O}SLge4`F$BjCrg?7OlXiSazGC0q z@i@@QOM7ZHZ+^@2Xcut)!GX0!V9!qoiiK!OkmFN+{b1l zaba0u?yj4SMFqE*yrC-l8DEYVzvJaKY!TuA^l)o=Y6O4-R}Zbv{4h694Z{twk)JOqPaAhZXa=9{2y9?|& z43J<|g!|YU%m)|kvMOx)2XI?hC~;f4vJ=p109W%51!soO;gUL3lkxxzOzEOrOWjD& zq42`&+GIFPu@yXwT z&WyU6oP3eV}tI=r>5@&B+mm2p|7Z5%qv6f%1o z1%OUUf)RoHD=$dc5=>i(x9?2M_$(S<%2RvXrQci+aDra}@*kHxUY7 zClxfIALMpTLLzdBLrnMnr8{4Ww~MGcBU{7*DEX}~;RI%bYjp%-T3M=ykG??sIMjVm zq@t0NwdA2EBPTBrB5^w6nL!gy-|;zCvF6nkJEN2m`qKG6Z=*Z9h6Q_#V8Okg0m9n= zs&%BCVD^mYrPe5Io_*9HENM6~b2gr%!ms;d2)u``%MchJ zNf~cY_s>`VFc`vu)E9UMH8r2UK(iL`OwmGoBLWvN5IFIJE*W}*Xo__VUI#m{&2_{L zOd4aVi9PGh`~h8CBC>+C{Hmr{nNZ?5hBeWeby}{F@Xl4fn=F%#7_dj0OR|Jqujqm=qNv@DR6L(zO+F?hmL_;TnEEpPZ*Z69PeGX)Rz45)+RfQoIP*0pkA|L|tz zZROzMw@3daa^{xTg>rNQ?&RIfrNiFi^Zny}eKOA2u=as{eT^F3rLXftq;Od&r>H?c zgganxqF4h3E(e?A^*)(ByZl6N;vMot6=))dXO>6Ga;Xn3^{ZWQbJI^3mC3N=sigbF zdZK|D9Xo|{jkqhGpFY5C(%;@BH;2CP9*(wXIr3aS;2s+5nVp0sN>6!2zq)_azO%T8 za&94!i4#PfbKeAmq)+>nHFgZNTKfgh@{0S9wf2EFC)$Z#Z148Hj&u^>a+b^mU%m95 z5ShIRYTQ!#Mt6ml6%fI5Dt8$!Z*8spxGwDrx+aPlXzItO9yGhXCbnZ6e)VnA&J2s# ztZ3MLReKl9GACJ3W|S|veIw_^Wc#>S!~GW+2O&7BXm=G=d|GisM&wlUOwEN?AM20k zKZ>WT#7e9XB9y}mG0tsS6x-w0R-TsdT|{jie#=JB{p)hCe3mTq$Ll*<-oai=1;qm# zt#>&c*U?)Z1+3F+=^~!60w`RLrH$M?c{*PkuFIl^#L4OWj-~r6VU%s3Q6oMl0mQI) zA#|G(Ht(}r8T)r}9?egD&}i4Jiz_=)F<8RT?kh4 zccl)N%?t=x(dJx=VF4dPrnsRd$Sj_f;7%uG@~p)+BEjhA^3-TT-Z-r8+*h>Z-0A1T zEJW5>eJEj1c@)932LI~4$T(kz7KG*g(?=;wR6Hr%V3hJ55txU)G%LIJR<1-IXU;w> zX@#9V4>SF;nELopet_QknbtHaL~NoqN-Iv4S%QRAK{$>1P6>_m_@ip~*paNMreko9 z3_zqDBKIAU1)0CMeQrJCXpr%Y^0)|c8OO2qe7U2Z2^)0Ov@--rq=hnSgz{(HyubEX z5gfIoH1cqrfx#cTuSazSv z(C$LI&YH$^Mhzo(WkVZ;Whyjd^Vd!Q%a~?lW0%HwSUe_oqKkE+69=I0*zv>LXtD!F zraud2mH-8^Fi`1kfrg6VZWFiN_8hZsjpfb^7G6Ps^CvHB#}V`CBJ+mCawuPydUsD{ z698hIxp2k4Wn?os4(Fm!lQ&UxZ*G{$am!l%5QMdL=^T0JS>5clin-MeZz9X(o(!$u z-fobyvQVbLJ?m_-n{lrlbQpyJl?qUgmUqny!&KXK_dW{|jP>T79_W?TNazV% zV%#QM?j&I=b@rF+ax&xlzt}C#QYSwd&1Ypk)`B1@c6c(WcZ29dca*=j-vU9e_9eoS z6gPnIWuni(?yOsnG@?fmiL&-I#!AP&Ui2@ZjE`sDP`GeCW=~g!2@NaznEjMOzyIr1 z6ik{p~9^`6?XWdNAkfngks6RpTxF$FG>k_NVx1$$@ zP)OO43K(NO)PtE3C1E#3PZ@U9oNQVs*Y=f*8bB@9j#iYd3SGOB3}Y2Fz88Jc?dKhi z9E`*S{|w_?qdQ0=-TmF|fcBXqMOet{VhR!!sMQi0hC8T8=DoL2x$m3XoqW+(93d=| z1KD?+996<)%)MzMFK5pcCn24bixnNelkB+rx7OtYAY`viG!wQT?AMKr?ThF7VmVFF z63k!9b;gdZ^;kWE-YzFsj+YeIw)gBxfV7kk9icXAU z{waf%FLX5L$>^}{ir1fz@G<;=v$5QKb{l*=o#2AnheFWlDol{2;c@NRsiIDyGv7bB z>ozelvNnHiJXzeKZ)AT!TLzaO?vIIltCFWXcFIdoaTDGrs_Yg}Ej_h^?f$kGqM2bt z+9f|+Ih_j~a0A{}AFBCOInO4MLFfpogf4(6NhiT)l0xM5{Pu-fv}hMBFIge3xo+T& ziA=}w{0Snd7s#|EfU46Hnr-j#7hVUoGtU+@#pQM29;_)vsUdx#ydKeyW9u%D0j69c zHXDs6lX(KqtXj0ly<)-9T#JVJ(2kL(;>uGvWn!qZZL4rzR9JaAhZki!ab;84>zBP- zSV7gNrIVB)ZumYOy;xp@>%t0l$2PCBji;Z*n>Yt8P??WO1X|Q>NywL6uUJ+fn~Kj< z+s)^>fnLZpZdjdzm@ArFY+3EfBe#52zL>lH88}pqH5ol=K!E75%RdUcQ%R59qxd|2CwqxO!h%QqtuJMMf$DG{)+)LY+ zBk=Fft9pHFC+s8XCs^Y&E>2x-lrqif)dvf1_j&`h6dm9J-@ee_Nr&o7I})gjPqp6| z7S4xW|3RGp+uvpKburAFRX^?Oqdn~LpXVfU0DZo4im34sAm)9AB*1(N!&&JDBU@OZ zH_`Homw?P(luw_PhMZsb61nRuJnx$x&o_P`cVYl$YnWTkhRhso$5n%asIWoVN4DN!hgfj4PIU*bvP9r2YMVS zIlgJiP~}ARr+>|YG)xAF&aVr+dD=(mY`;4)`~o0wK=Zxmd;_pG3 z^mCdVN^a8%$d2oVwFh@^vJ_a&557=+4O%aj8!?9$h(X#`FIDtHiQnH(j_{E_UY<>u zeshBWRU9eLBT0!RS6OLCAUFwmZM;jsHii|2-|P+v6$e1pDs(0k?dfJ$giUHOAuoc2 zjC2-uwVy3sFTN0WQOXa4mQ-dvY&VHF(5t1O?HMF76!$OtG;Cxl3&T-YQe}+0z;9$5 zZIuAc4NK-?$#TjSd#F2qrP-;wUzJUkIcH&Mdb=Bo#DlDskQ&?P4vDsd-uELo=D<@= z35uOZ0T#cjP|JI?%iO+1irn7pVfE+5U`edp4YMYJLHUMPlzYiW*wHf`pq&eOj=1V5Od)2ayzte)JKhetrPHlje!osA*rIlra z^oTxjJOfJuE@pcrrE$9cx;hU0+u+-s+jnd(xr9bHXU%P(!&o|fM)_ZDnz;pvqjHnG z%{>nv338(oZlI*cUKHRm?lV+E+CGJ%Ni7fbUCSQla=JC~#0k z_=CHAts5s61WDhgcZtZQt+Xh5$?#M1PMQ;vqc|MPKGO#p#z zN?AL-#Mp9ynYHihro~sNBWllwEX_wn;qVv~Va_EK(>$o*%ZF&XjWSV-enhw)@fzXB z4dVwhBAE_9@DIFq#JVAzhkOJH7NyHUYgq>l_hEj|_>tJZoi|yla5+`RZIR&dnWGsK z#*r~eq@18~Y9*($?F1AjGd#26?Y0y16ZO-#bVt-QrIM-S`om*3t-A-?+1p<~bE7#q zBGoLQiRSXZWj${Hg4)1$YMiR}y2y2kF%4k*~k7BHu7A8e0@TfBMackwI$o+9U~JPVIQrK z{BDJ4Gl1k{C28SlafNc&#QIUZ8D~I|+4Dm>GC;ZZsJAK`m>d?fP9h^zg-a*$siHzb zS;tpptMx(K$Jst<^Hwx#0=fx=T>HBh4`kaT#~<}9`YQ-Z3srkN42}wz-Y2cZK5k#l zGG#u-WT=h;t8y;6Y;iJh@($O&Nq(+cV!7$Hn`oSwA)Huto<^5pO{Q!txXl&7lA-AI zC{R8Kx_k}UIJa)s%-xI&o~&2HWC66W5-w#e^J>>UPb1bJywwYaZcMnEDR2h*dMD@0 znUM-L-+7iN>wIJ!M;*xn6u!fUM%pnDL``{5WI>OEzqNgZc+*CUnH^l_BWV=8EQgBT za6OT(CLvLTWuFT}Sal4Jkb&T=L^CzZgY0SqidH6KcDk4UgtbE?}P3E0OJp`Il2C+1-<4kcXhS&qDeAG z`WK2dG@C3R620WdLZk5C%?M{_D4o%}zuPZ_MU{u@Yg!az4gi7VcBmhsrGkU%B)^UE zXz1uCKtxMA!_P2BamUzV^YApZoh^N0HaWGYR@ZR*#E;dyWB${@t+YTEnwO@5>LK85 z$Wno{?Il$+H;+y5o$mH`$4`K0QBG5P`KUolqaDfxmQHE>KIE$6roG_s!y+?okzX9N zh`4j``0^$*xySb_O%}yabXf#B%-&D1**M+AP|dH7O%JTsMIeRbF^H`Whs&^hI59j@ z;WRvY*lHea7S+WB3sU-ouN!cy2ALgkKL%F_=Dt+l`ih zN;0afzSoB(mh5I$zMLzH+f6uPQtbGRa}&U6{uatAy(eh{p`o(iUmm&;8z2LxgoTe`!Rk{UidL1GL`QEQVCEU~*FU}Og|sp0Za*>{#Tw`{j<)^e)p*X&wmD2RT0yIo zj*A|wRO*Cyn~1cnbIp;I9x-l;M~;;2v@dmS-&iM|e#1$2ef2zi;zIrX!wKXTi;QxG z{tIlT!v>EKFQ8%~Ii=mBbcx~zpLZVWAfr#Rz%no?Pqvwe{*D#|8d6kbM~qx%(B@{^ zGl$w5@O-ny$a{HMTxN?DvL!luxI2I8JJ|qSc2N?``V58w3^(gM+d~0ob*e&}pTPih zTV5OC#?^!FF>>(=)1rNtt%t=NWt5})s;{u@1)=EUyE*<~*Ogg&$AK_Q%jnQi%nZ9I zaqfsZK)J7&`tztu%}91MIsvD8dUNL%DSbL?m!WLyv_S@B+G0{t7jTN~@)Nou%O#do zcc_YT8oI24Yll6yBv#v2h1IgZtQ%3>A>IY`WfE!pmY(_w zNv%IchD*r$zq6qbgHfR|$oV>k9qJZyTYL%2K`eW>-G6HMTiF>SaE3Uhf`SPe)Lb^7 zm|(-trGdS=B6wKAMuWh~oAV~=IC{te`V6T6`l1qd*CFSvABWAhPfTBEe&M5@=?f%E$BBCZ-_P-Ji$ZB%cvkdq9>g_@!JOU|0Q6Eni^AYzJY4NyQscGbQNvr>dn(dWwl$^6Edq_Dw3t3=PPDQ=Zy4ewTe>81Kl3w(?g6iQbWPvAcf_B4rHWVDf zw@_)yQc9~kN8j|NZz6*8vxRYM{bF3Y54{Hu4lizbkw+3=xW(D6EY~Ehnq#btxu83` zaxzuV&iqV2v)2xX4X9V4*kLO1X*V#Usr2HOh^cgTWk#|{Q1G4Hf}7V2@Dqm=N6s^& zxKS@%X3wwX8O^bLYoapvZoHWMh8?4g1atE-ouy0<7b8tJ;K=2+6Gcey?zP^;_5+vHT!C9&^ zB1d3qs2>U8w0Y23r4SJikfp zD>JHNSXJ0w;d%Dd7&p<7=6&yEYhPm#j%l7uPh4Jl3Z_8V+yq!nS5L3#Q4XoEmt#?) z7X8U6H_3N~ELeA8A~6Ck>52UT*&=Kp*11J$&O2T|BM! zZ&lkLFOT4UcbLRzL6f~-eH*BkF8r^JaNo&%99(R41e(w3aS&vsC23C3Co;CK=1_~Q zHFjM4R!|rOvO@@$0HkVrq|RQB!$5lJz)R*QG2AjQ1j>^IPn%Z1^}QNM}?}rX=+CWiHTa_t~l(qlkibIP}C1){&f4)to#FNsHMGuyJ3W0Tt-W&ZF=Zl*OcLmKMB-jDB+%$G~(QCj$VSpisc z@}$tsQxGNW87dbbH{bOdXOPvpnm8jGFo}5YLLZVNKmeUnvR3XkpPAm?bc);`{E!4R zjhhbdmeXRZUFW)l{R~EK&01=2;0zLup#OrBS=NELi+o7IvK}ffHBl1Pg*pQmWJ^wA zy@lsCZ7$A#e&eGX#JrVG5+KBji7c$A6m1|#^*j;NjopLs%6j9Y`$|}U|wR*MJKzx5v1mN@HfW$w+oCF@OHxVpA z0?=Qo6?~gfsDGHIOTYVFTkS^ay<80z!JE_pjaf>?Z3gm$odE`_+eh05c8&7}o9>Z1 z&5puAHLi^0Co)J7C;ADhAjyH>bV~^(HD)K{5F-(h%nTw;+BzP{+^OgHli#)OKN&sy zN~{kH{1sG(lF>N=37`L4Q-vHOwKq@RzmS{D5K$BIFI+I8GSrw>c*0x_$I$t3A=JLU z$UFf>v3!1nQX1C+Cpeo;8Be?1oAMWd^_|W90H}g^eXO?>J1R2FbS4A#t&bIK{yk`APvAA2jb`BxZ|A zvf_bW4F?D~z18#P7@ridsoE&j*T|6t)`3{v4;A`q@MpZ*%m|MY#^)n}Pb6|D28tMThp0PmqR#FkIyPp$fpjY667J)|@6n^{y?<_x#=q7mNTND1F$R397XwR@X; z+kw;S6QN`Y?yrp@{Mj0W9~%~v%(qLZ0I7`w_sVvL_la1N@U!9mB(sDnBam~;Qz=X) zBld4nz>dd!yKCTPQgRbtn9IY=MQZe89@J5hFs3S-lC0@P_I>0&-84zylGOj`qgXb5 zGZx1VgHfD>g`j6Dd%*{CYBIYClR$XmT=kgZ$Ie)7l=Yt`!Pf?&EO+aCCJFhU`Lge$ zfbL+d(v+*54yl-1L$~=5R=^+HGr*kdFDQ35gM2AC?LzW}Vx$JxNQpqt=UP_OS=m1= z_-~EAd=M4rcR+3V$!l09(`jS1V}G`(e%JO0v;KUB1t6z%sEnwoe^*d#TNW|@a?=R) z(AZoys(wqgE!bNWR%tHdYAIK!!g}E3d#|(@H634SINA!lRz=Zw_!g-DP6fHZ!Xf}P zBH3y3H<1U^6sO>B0v4yAQ$x{l*}nE2u8Tn$ra!kWL$2m%8750Q zz}W!0QLg0|Il!zF>KrgeiYy7bqfh#&o>wJjUyBK@Qgs^pFe=Yem4sP4#)_@^<;fhzJ>7bzqyt^+LEDHK9H@IP7!{!mkO z`b({6F2@5AV{oC0;8&LWEth}a5)3KR2N1Vrdfz{^0K*NH62MLm2Wwa!URApgqJLzC zF>J8u&2GwPerrHyAF1oquTQO31jV3IiOoGOck#dSVt-7&b&!u5W)K2z+EVWnV_6?T zrFbJ5f_Xw^J%CX8C;9!$EL$fcw0X?p^qbj4uYVzCJz1{BW1LpvhKlzU{Nr|uok5*E z*^2^9p6u^}|KUNbzWPG=85}Kvi=pc_1Mi=c;!n%`~vin+; zaC2`V{`7u{*;RPkgR6tA{Ld@#g$eTkxvK)L>Qj{>tfp}tW01_sNI`!$8yYqEa^+(s zE|K?fR;AFA4l6DVmO>EO+(y;fe>s&`)JCt}pxV1FM|Q{Sh=(LA5a*BZK8=M7+m8tD zP9mH?-Cwc`V*$klX#C)c%;A4CQ3BXNcR3DElm0$LT(e=(T+If2D6p<-TK zhks_s6AS9WA&F%$aP(>aB^>w1n~efsZ*fgkf_R}FAi0lW2 z1A=`ZH_o5e`*$z&k9qdrUiz{T0o(zL#|m3Yt3hfZgMcj_W@?NzZJRwY`l=^-_0<8Uc>)CI<=(EO%m$u=Ri;HNMqzN;+o9+$(xVwf|`c2 z%71ri{QnN0LntzA`XS=>qdCxyKrG?X#ol8?otG#Ly!~@q#LBWJBCzIK)ONc?rxB7 zq`Q&suJa7Gd++!A&iQZn>CD{Ex>sE5S}_eLlWFZ4hJo5F6wJy=db9Xkep~OxsA8op zB4wlBimyqwY+vw?|6iQvmy{iq?kXWR>qLuN56p*y5e;gyJ8PBrf#7>WtTcS8|9j!x zfuB-@3@v*e{sFI5WUV7+<$n0Dw$D z!GdQXmWv6pOKmpw`F}aBXag4rvu&h<>%hg4Ru5op5EEi`T!P8k3K#>gk`9OnniZKRnZ0 z4+XfWFGov8f88EB2MEu>ytAjed}(A_b>JFF6pI)rjaWYxEGb@`(x{}m85@zti+Z*j z|NbX}{&CNLeU#{2Be^BeQPB~?8JZ&Xp>yMoE-;rB#hWkbVtwf?7MM)>=TnHjgk}Ra z-T*0S`F)M)8W&YraxqGI8-+uf2u@(YLUNW%wSQ)Ka+ESg}@)l5I22+(TC``#*EVwjn+tD9F#O(a{WAzNiu{ejzw(*=|%Bes~`pWuDvZj z-T#L-=hp#?<#?}1S`S#1l2ADCkiJk||2Xmw)6#i1_c{E_{s{=idij{O(vHd*SLlf~ zg&1k4z((|^Ai9r%>f;XzrFm5z$por#ZfCI6`6%*4xY2r;%*wg4PNco0TT?2`H7ixT z-p-iHi;E~rO^kG)ti|x(3vzeLn7e290y8ER*H<#lC+mM? z0gg4mm~R6ZtDU}) z^3p>9*1ye{%jzqS8~-@fAJ$J#tTYIdX7&F=Flrc<a zh|ZS};E~wL^JI2+Dj=s=$lysaxjcH4A5PtocN!}G$i))tY+8i2!%Ca46X-t0KSK4Y zr(n9=W@SyS7p!e8<&6LuXT}n4&iJ_6^Ip3Jp8Jc}`8Z!A^ZyNaC zsg;49bZ_;kspIKN?EuU)A**;P{ z*AbY}6Nzu?B6M79&f~uiYZdhc^5ZxF4U2Pmzb~*z?|H*Sf<1^u>h>`oa!+*_e>v4) zSSsY9e1@4_N_iZxv0&WRI2%mQRPwK7B0`0x%G|(ylnTekOc6DN&`igwcxlE_EGK*E zKtJ(12IV3DAlxmarhXc2xp0s!4MlLRE8kB-tco5$Ul#ULMIy zEwsLrNeQ^Q!-+QVZZ;wp0~)y84F~fqr+~+a<=ZpYJAJCwTgP_8uyxW>hL6;UlZtMA zp4em>=Yp4b^TzQ9XD8#aksV-M4p@&Dy?Cs_Ff7jLWEF)6Z1IS$x?pPCIzx{!<1Up4 zw);I**R>G~CryP;7tQ^wLn2_5`l4<^e5~VN`M2L7(+n63qmlfHPdqPfxi=>E=eC=l zmL-%1P0nP_Om1A;2;9Jvd0Q>;IG^ZV1Ka+9rV;UG*HY7LvL3Bf;j5)N-{ve7A%PdZ zieR72f5jyWDR>G=@~+%^ef@zzRo9YOXe_ki3m7G-wgzYTfQI!&JYaQ_i9_CG+N0+Q zkE6L`ii;H4pS(l(#QYj?ayvwL6nZR?1-aY#P#PPueVrGWvjt}Pqx;(Xqt{MS)l~W6 z?ou<^yH()jXc0uJc+c-S15^6z%XtMXT;z|bg&w6yGJ$t|7Gj^h5Lm)Jxj~?k*{BG;$cOWY0+XI<{r<3R23PBN>)$v@` z$T?>pxD}ET%v|QN1KZP0>cUE?*t0q91@mknj8gPIw1L+;2DU;f+{-Ko=6A)Ya5_mN zhFS8(dqHr{I9)|=x$3Q|Swt?je73-lVqIpfQC+?;#<^CMQJ?gjYPmX%EX6fKwoIN6 z;1pK;n5+iFvgA1B9-e(1#~#$`w6Q)`@mXBVp>0Ao2lg?_l9(8s8FmmyW7Lfm@l;F- zko(;L9N_Jq15wq^|E)^*>Y;(b>o8hxXtuM+5Prh~mUvzp!Rj2YYf-lw=kyxXh?hk1 zaOjPo2B}Yys1VsBV4Y0P@mc7Ati+MZ^M3HEh1$CqgyRuRihD;QhOiH@gp{SafUI9M zpy~PY+Jj@pS5lLLwE%A$#uz>mHc`*Lb;^ACmAQU6Ut`0&Z^y-;;|vd*T(?u^C~;mR zfs?O_M`LGIY5NF!J8rWjUs^S% zO?BtP&@3EwXSB&G`2sF}-L>6GQ=&6?w;&(Sb@-BK=rES@s<4|LkUj){;8jnFKFk>z zD68Yk+)ALGdO4HX|LjebYdC{}y(q9UlA^>D=baa#=I2umcyc62OQK14`ydfR`{iT# zO6QFjwHhbo-J5$c>eX)Q96>y)oABYhrRPCC>T8$Dm9!Fx#__4wVG8c0>=)5Q6tVA3 za9Y32jjo1?_9}wOea896z=hnD6V^aDY>Fr5y#lwnI%!ty`?AU!0Ahx zY&&7SYbRZjno*GiFo1lMR?C{2@L7ZNlLk>{LUM?0)Tp`w!R*{!Hg`*tT$XP zA%PzBsXAtTz6vJbANE@GM}PT;IRWxJ2622Rec_>sz0`YDcH1f`Q#CGKEK5mC zARu?+%M7XYV(COy4%FEebn{lU7jccbNW7va z<@oX{G(wmVSey9PY0G~64e}-KND+3r1hAfPQg_bK?*dfGKNZ3Ar1%wTFYWl3S05fl zONat6^@@|7?$=fRE`x8c`30%?wUq?x1viO)iHq9@nwVcS=5h3>7GD8=wX3X(l*iLx z_iizP^-hE&#Dws~1f)XhXrpBEF^?)-=j&ga++2Gsy+_Pq2bSV~0LEX+ zw#I=$GjU+g6X&**cMYo^0O-q(f;4zZpcDepmsCYKpWUqtK1ZTydz9LlL>Lm@tZ1oU z?}HM^)qZ=e!R>cTVh=zQEJt3m5E}F|f>|pW)IdAyXJ;WlMDlH9gFIo;rK5waX-=ms z32@G|)tp;+wsXAZo{NHH!ZK^(p-41KUB7ds${{O=6UVM@-a+JbvwwP@r41Jv?|{% zZ1eZ?cAyqBTPq5PmVqt0HJm|f>dI$>H{^D2Uq;kcTW@3yU6h9BO6ZBoaMIihQQc4J zp^&$EdGGaye=moB_r7!P=mqG046f!`kAw?o1M!=IDHQOqRj&>AOx#s+OZkaS95?$f z?G5(qf(E+3$f$svWHC}2VM>Q-x`s4A%vHrYy+_+6O&HiV2RLeilN3us_vnes8)!HM zI25I$%5KgI8DP&SBQQe|LZA5v03C-R;5f1m}0v5 zS)J;}LVHo)wZ#B?-NHjF4AeP}b`m3{J36urhIo)rBJO$fDCShwY%Hg;dC19)bf~L8 z8?kZk3lPEQ#{BKnf7xckQ^5vtoU+q1qYz%|cnS!hw4=Sz+Ahs?z5Dncez1x{&67nE zfllpTpuNFccy47n_L18T{y2sczmytps@MA6PpaWS3sdK5YqKc?cwZP1!-J)}GC&Ag zI~R3vN;6~;IZ}S}T^SZa8pch~z_=n%8lvz4e*0!8zzd|b6_FGzwbN4QJQMGPBypm! zs8WM$>%LsG&bDo(|2vxg22IKD$w7WO?!F&I|5dNiMsCO=M4<)9Ad2mDm)i*HT>?*z zbPXlB`WAmhSx@MN1pci=)lT2oNc=?VoF3PFyTr>yRu0=e!!Uyu{6=L8ml*mG0uR`m z3xgI@J#sVtg2jWPAD;d*D+nmKXU=k!#ZOYK<_SCVsDC^>TPmljFU&nS=v{kcyEUpY z*|0++EQkm!FAM6>g;{So+8z710nG1YX#>69DwLK&TD;eNCf^* ze-U4Ay!ip=aTCcVkvhQ10BBwazygQ@%yF0i zTG5U>&@r91}!2f~>LU0--bqUg-*Gh=GKN@#|~i(m})loK(3Sh6-%!BV^NT1i2*?UXkqKr9+L3la2r>R-zrZ3Kr*6J z&?;B?wVQ9alVCwg8X`<-Xh0t|z*;N7ki7@9StB*vELL(lJHH|c@rQfN>Bfh{Tfj0K z(OPa+(`*vGk(0^v&Tv&l3eBpi5O#G&z(tAdr*vgrqbT10#VFmtUYC31;REC7 zQVqig`(hyK$t<8lc_=$+qZX$2)%XBwASbA#Cm9b3=y9#_=YnkgZz3zW9iDhn!0+Wp zUY4`ZqBvMl^!y`r`wpZcpSOR-ZT?HN3fqmh_?Ju+!yhHuUK-OXRh7|Cy(H|BA2$){ zlro*GNOqLE(Cm5xW=PuF3Vv8bF#i;q=OXqL--qJ|9)eNo%x=7yvFTN#q4pX>idY01 zYZbcFV2UQ7d{>1?uozg}CIK|Akt@b*22Sb=|GN9X;|=Ju7(fP?9s23WlZz#Jd|h{p%`flisY8pV0L$tp0n2;|w%&bWaI zj}WL#8t-E9z{lg{B9>S*_$vIsyrQM8l)_&`=tFY@yC2QD)dnlRo(@vGNE zFMjz!;71FlE=J>#&?d*P_;D(rUi;Ln&g> zNz`@xl7PB|lc5_jkJ;$PJ#er_~y7QFf92{9Mxapz3CcM+9Nz1|%=psFg%9#q?`2;SX0fe~AC z%t=lij~->0Hzc*6+Jz}icFU_f5oyT%=mq%ZSqK#%vjZ)Hnb@T6-JOXKgMaQP;hU3c zmd$&zs`_H0GmbWS>3O6ag71nVO%Won?WmrFM4jTNp8XYhfj=9rNamDq>^-X5ihspd z{wa(KfIUVCy9Urzawu!3qAIX^Pg~VtBRYP6EruyP0|_+XN>UkqN@oFh5I*ReWi-t^ z!a&a7Y3w;Fkj*F=#bpy3Ft{lM^g&RN|+xsalQ`{u(hgjaz=CxEBT+Rgw92q31I zMeu`yXYKIZep&n@X}$4|5tt3r#tXd|=&*>$GJ710h256Pus#1Dyf1 z8z7>$_2zjImrniWd+sDzc9n3T%R(7QEXo#ZzyLOJP~3c~axF8I;Yk3{$x82Q1$83&iUjdARqL*weWXezb2;6pr$?6D6Fo!-m0#uDl5(1&Z4Hy z$SCl{ybmM5?+i~77VP75m>`+B<0AD|2>p?uXV}HGNuE#iLBa_~y~EgauuH<1uNy^I z_I>^4wB#dXPtclmVafWJW<$E>)b1zUfuZJyK#=i-{QadN3uHEGZrj!@-(b>=X1)p% zQ*Gq{D>KfBzBvP8>i4(IiGtYKYJwcfdk=8*#`O|Ohv+Ig_WbD%3P^2`o&Ar5MjM_) z**-5$4$oK zItWJ@K+N!dd%r!cp&R7j(a~Gsau>C+{;v>@t5d5%92%TW3LIHz+XU--HNPGC$LHO? z9W=t&prvF||1>A)yUakz&}{fKer9KxV6T5I4K^ zf-*;Suabwc!g!vB1mnl}B#=xK-|&5a`wyv1d};`B>)Q38!*w^b?kE|5y%%6?%R%UX z)Rj%!ApIa4Z5|_Vps;!36S7I#_d_IM-ffR~zaLQlymGRT!vang@%XXI@MY$WfUJyQ zDCTVC;fu?V(!6WCGd)dlCFw)>oj1JNJZP)4nV2~d6u&9Yr83BCoirs7jZ>>&{)#nY=*6vuLh1xHvW`EG`0H{UvalM135sn@k?%)6Z)#Q=GHZ>C`Z#aIn`pWoe&B2={2Fn9P{U zZ1gF*zPS^sDKXl2_CuF}GYrbVEc2l+3!A(Ayw{d<*|^-NWpw^lUy-WbG?rr*)~_ih zeBsV7*8oa`e#OTymY~fgh@U~+ljQhv8{&iYQT4-EEKZd%~r=QOg`P&Ml9T=NzOf)QH_`J2#7wq?O zzd0q)2t`5t5^>qe&gbLHY1(^b`Q){A4G*oF_twXyS(SGIrMBurl^&{RBoKA+BmW5K zR27_q{Hz1w_FxrOxAAY^KJlL4*@)5}YrAC|c!-F44CQ5xirO32s1RH8P?IV73hYRR zs;wwk?HLkUa(nY8oHz`=tZ7FplHc9qucyeBp1XkJTl|=KJ(5(ApVlKlRp0(B;K-0p zZ2VZP%!6}Z$^C5g?jyJoUI+!DAG#TU(EVK9bJ%}GS3-lf(+smM&Sm)>%r4708^<$2 zJ(mDfEv;8Xw#Pw61DqTWepfja6@FeEs{t=Oo`hqsls!l1C2zu@RsLUB^wY_LpZM|t z$SaHV-G#0570pMDMkVKQs($`2Ump{~G_UNac-7yW=fh%LI-a>g$3opmn&C_QI3FHH zg>{GeSVDc*Vivn*`OsqFADRg&Bi=n_y7~O6WQT`Vo>z#5C~ZmLk8l3z5I~_6yREIt zr5N zuox-{;$nKd4Qn=S^n9>m~D3eR(pvH;#ZqBcsTHI~P8DHd~PJ zm0)D?Rd|on@21+n4vi9~ucJC6Wy&<>+f&I^knYHb?!B-#%A&8sFC-p%)S1pPJN@V+ zW3={A{}6uTO&wZoKT{3%xgS+?+uZc}NRQWV>ipZwi3m@T#sa1jSMPMi+9@r)S?NT7 z%botRQWb^oH~Wh&hvv;7a0~zS5YB9Ot|C`i0>jG z?ZSV`E8F?GiC(rA$B%ykYp!8Eq)`hTGm{+&+v--sb%=MH+SdbJ!j?*S-57j1CG~fJ zhZ|sdEM_L$;r$=sN(902K2j)#hVqT3YpKHnuO3hZ+p=uj=#iK){{{l^vVa!`y*v0Ba`9WZ1#`1$~%N4^AQ$A`|ZyJb)*x|h+97+pBQq( zz4#OJes}668X$BDnh1j&zx)9>GEQW_^2Y2A9l#aTo_q+ueG5MvS^=k-qs=0HPS1?b zwD3iYdMR`>IdSWs+VFJ@a>qP6fSs*(|E{*-;hh?r?E}L1%f$cctwAF+bAEWbD;n=) zO)6;%G#!~-nb(FX1N4}4>}Et1gl~mJ05pEx#%P2f#hUX@;WkM(7j87T9~$-TYRxo~ zniMk25;P-=;p>-8pQ-GDo{idSj27>0X7~qHYMfOHK_UA+JMm8(DDRoH{8QJyUu9A) zylL+w_{!58i`;8mc>x7}&luiVaD0q1=X~pPl6v6PYF|+TA=$Ld+z9G?w}1<6gJ^nF z3FTLC1m6Q^ny0BW6G7Ysnx!!qEhxYu_rOkBx-d-TnhUO086egfl z*}-F;ukX7*N!6{3uQ2`>NvT9P1i>lJ*AXR}$_yLG%?gw!F}_#m)Ka3W2GRT9TH{2W z=OyVg-pu+(CIqBzO_u7Nhq~A3%SXsNi?z82fhwmB?e~%1Z3YN@aC=Cer8((8kMNrp z1wxAg9rHpmO-6|#tFmTdK?2VGVeDjG|Bn`q`3lu2SkL~@*Y8?9QGCZ+d+Ti5+U!>M z`e|XZHiu8QcCj#FRby-SGICY(wt0|Gkn2)ACb9{zx+jh^&xJg0b~SKtdHCHvCH@8F zL2?w9BQ-|BsFjY^lFws2sYGUWI%EQWx3phIXD9&rW@R2+>9PMdy;f`c_Rl>FzvzWq0!77k8uz*z9H(wB6(nP(?+dGGV&7VWF2d*A`T>6;xf zHLNu@xRyxV&6QFqcgFcGtR+pu-&eq1+>~ZCvsVY)NnD~M$~K`bSNh5 zXoQ@2J}=iPGuT`Qu&~C68iK5v*z?{70;oK*qM?!nsEX|kmlxXELP;G&WIC{mITdI) zGe-9uNDQB|N0t7I3;RVRJEy+VAwQrZ(8-IvhV}`^3$1qlay))e1o|n>qT$BtK)H z63iJ^Tc?D*uL|9^Y@zY;W1XGtgE)ro8_Wn;>-cz0Gythlfu^e^DyG z`>#1rL$&Iv_H|y%?ntba)4p#-N!!`;gk=D7k3Pk-oIF8gk9u{S;_*@1u?TM2^6obS zm^(ylryAM0nimT~;Gi>yCM2?A^6G{le82jScx9-fCB5f<8BzPd#F|aVtrzPh7yXfU zEwaX`^8`LL+rh!;N)5p->nZU=>V%Ml6k+! zey;O!llfgB7gRGBvaAc!!4F0LabO^k-JS3|*}Wc-#$}+LV251&9Mm+{OLht|GGaHW zg!~I+|MWSzLJfct`87{kHnX3wQb!Xst8@^&dH@zIz@+&!)=k&r4?>ta4AjA9Yd$h# z)4BWK{f)nJ(AIaUf&n85yNyYa(MQZ+omxtb>h{=i&p_{ro}NDQ zZB!u6J>?G#=jL6|P#fB+`JzO_^bc>%KtrsaNThluozpQhH&DWqa8xm&AM#fm{Bef4 z7tE4LOQBAQn`#-|`sA8E4{8}d5q;O(6oi*!w0qs_ihI+bMDD6yzCLG3GqR>X!5AQ| ziO7cDrpLn&Dpev~{{AtX3HVNYX_U_mH3L?ECkTGpQiBzOBR&#W@0k(KEAZvk?F>_g(^ofQg{pX^F80C>jvU!x!8MpE`I4~Is8IBl`rm)_T2Bp_#$BG zPpt9%0r?T{0^BO(>o~7l1S1vLK*e?A?~&t_JkUYr*2K){9yURmLUc25vov+5_irZ| zj)+g+)iih*J{A%A7WBc*%5KM~E$1{-o+(>K8BpSNgU-Tukp>`n>{RU7|Ld%`?!I9M zQm+i*GOiHUITs>X$DiKFD{B?2S?=(q;D|>hyQ8G zx9{yIfu?k2g92#eA(d(vheiXr86)dyBtN^?<%~?0t>vPd8%Y*Ov>Wv$iPy5&OW$291z{Y3@pl9Iz$v!(ooSD8L)_ zaO2C7GAV}KN8OZu?|R4m*{pt>^*^3cCMgw~)-?Q-*A2#<`xG%HR7tlDZ}pOZOxLeu zh=K$4U&MU>i8iQo9M7uD!NZ8T4=z+gxE^B*N5Lq+DJy4|ShLNZ5FjvT;jz2mZxuM{ z3Le0>zAhe)MIF9&$3cx8dj>jcm(+)M#Y{c!sG zQMGJN+&7DT3-&E)bzhHRZ7y(X9#X8SfgvOiapJ=^tY!a!SQ_3Sgei3ezlibWo$qLi zGjZz_&nwM`9izyh*N%&htSTm zzJZEqa|fFQm|3Be zYZ(IV4nX(VI=6zxO)7k(N!7SOBSZ^h=CuF=oSNTQl1MoCGx@!Zie(K| zaXl4hsEq|DeJ&56NM&(-_W+~{3PDnDA4S_fYp+iZd`s|5>K}Iu_6<#t zcqkUl$g@+US_!(W_8i+pOAv?JnA+M3g|; z@~d2}A9qGDp!nfAe&TYF96gvbTuEj8JbK}7$Oh2jSrp70&(kXeH3Dz7@edadI;PaJ zQ+c?boLNwfZ)^no0CK%rwf`-R76aM-GCS$m)7a9}4fSqp{z zFBr8U^R}|#T4Y0VsKS$WQgMgHS$$~9mj}U*0ElB@OnD~E9X81~Z{Mi*a_eN{pbZ4ZASblrk`p&5z_!Dj8egBEUfzH zyyww@F0j*MBN0u`|3Zry!hw01ns~&$%NX_z#0Lai zNRh`icL1V__o}53@*i5r&vieeKwhWm@ zxLceLyz}>O>&_qrT4GfN*OEm<*U}w0d2JtQoNI>x`lzGJwj~AUE+wqLMQY0o8UkE% z;n!o`ya}LEWi(ASzdjR5{FyGp8{Vq&cgcTuIB~DR#3)c&);T^tqb>{65g)7s17s>| z^@MLQno3Lye3u1VET-Ml_CuH&JQqgq-f%6Ft4DwN%OM%2LLTGuC&MIVU(o_By*C=Q z#}_sz{)j04PkG{`46d4&t>@eQC%5u63taOD42Kd+RdUZY>PC2MISJMYoofm0f)r?5 zi=@){hhZCBw5~9hR!jSPR;DVfn4QKDxuZ0*(;cShVRjX%pc+9&*%E*MJAfn4%h;B4w-K#b{fUh3vg z)Xk3!%3bZC(ATq^hFmp6cLtM=CALaDE^WvZvwPlL5n^ZK$sKf~aXhjk;Ba-Tkx|xr zA@Ha&Pc3!D1^MyBY`q1&DP`Z9Ejn*De0)btO-)VGD4rIx=xO(zoSr&H zNm54j=pO~!tk*faxMcHnDLS>E!FqR}tP)x$_{=(9GFr|xK#&XFzY~bxaO0J12$1wL z@%53Z`KBRTb&ch7>oZ^4|ANTh94$Ax7IQZlh4W_ z6;as{RQjXf2EZUJtctw}iQV}&L}n*fgl8xcK9L=FLdWkEcibsH&+;G7@PBb_I@oO3 z-)uPaG(Fs0leLllEGrk;ICFI!Qq69jA+wULUp|za>pTMyGz`->2dicdpI)h5&ZSk% zrCr>i2xrQ+N>n$(Yhcp^%oFIiPLEA+6xoQ+?HKsA82k6E`?Rck%J+ua7_as*QTs=Z zLsvILR8PZKC$G%ER3D79%s??>CE;|+IFeW`=7SzKj>!FxD!P=*`%)33EE|4KOeQx? zA_jWQ)|e1JN=!>~g~cUWTeguELz0qAl9EMorK%w&5+TFMX&8EVoe14&vTz{uh=ay@H9_OBl(??3=Zbv^c=Ww6UWWr5_ttc*He zJ_4xq=Lu|2vWS2@?=?{3z3CqMS(*F}G?ShqULFtyEzFA~HXkCw>27nem#me>^)67n zjEK}k34M_dX|P|QLQp!(rIqW}tCQ)Jx6)@f%LOQ;uzWbOJQi1HXSTeOqy`gBrb4w# zr5U6EJDtBJgYVkO#ILwGKSHrOH zu5lTJU3RWObF1_go5=AwF#g|NFF;dE*`O3JF> zp67A2nI6Lv^mmF0mYLx!BsNn8JU|EzlP0^7?H*AOu~upUsmX~8C>uykCMrE$-#RuK zC_b+0goihnp9qr}AR{k~r?dRCLUIQN7vb_GKGLc!9(BHKLu662Pl`&K3oSU%1RX!L zKt;-&#??*o$Rp3CZpGwzLr$IC1lsIqy}mk zmf6HW52bjkl(ytG8D=HNm`bl_T+x1HN%!ep>NBq}t{2acYIQqZDXmzW;W+K9pzfnQ(Yn_+ce_vE|L~RlBrv>D zM{MS`M~rGuD*0BfR$Ebw8idPHKuaH(v{v)n94}HIR4m85U>2DLMJ7~2J50Hf+;bEQ zz&-=Q@I#ALK~@WEL-wgD2iR)=@`^`PDgVfc;xs@2Wn6KdU5$5tZ)fkO;Xynsn+gul z=Wyb_w+;?NQc6ilRd;8%HBpj#UyG^t`2{sz_^d8L{}rNu_MW(LqPdk=i`Lg4)eASf zTqYOLm29}sL?2v{%F3NAv>2q#O#%13|NZV2_@APSn|Be-jNCS`x6A8!+;0hCNC zGG~#%jScbb*0)U3ALs=X$N*%1ZFM(lJTTP#QqU<-V>g3!%^&yifesp1fQK8DQfSrt z>pq@vKpw30G#`zs2c+a_kL@ldGk>+CS`~~7hwQm~NN|8&W4ySp;g+I=FxdGvvubrQR2^=3Y4A;V5)1tR8;euyJL}acCBq2h|E#<#t{ofN+#)M zEV9sP!<_qS7shcDBz9C>Ic<(ZYSH#yG4S(b{uB~)&yz+v0=IJ!uDXVRjTzpRnn8}u zYm#!}L8_XT{66@9!ADQdAgLH|jvMyV3+dfbRX_l-Cavh&xYa3jeYJ!M#2*w=%Or9r zrmJHV_YRfOuWx6ctnO^$jtI;ks!{%-!@H}{5%kJlG;(SD$yT~83vdrzMMwGAAw=Yo ze4%wNxC3)G)wu0%$hoiDVUYFhp2RUfn+6$SBQ+QlZ$o&+Y;W6EC|xzp`e|>)=AI6TZJ;cF08|?$#P8j z&$2%+ng0u9wnA>x=QpdQkH-)lX1TE(X03r+vs8g@RXb%<$eVcQVnN`KotQ9Uh&+tN zb;;Q$G^_D&tl*M{_;f-}cB6I5%+YGA{)0-?$ei;|;>ja`&5sMgbwv9&nM-N8Tr;Yj z`3GvZO@O+S1GVLkjgf9Lv@dL6n^m^ctSA9XUW5mTf@YsS{l}HNgd}e&3i~i|us!tK znvMMFW@?;R7^}XL9*VE|!Y@)a=oDs%xa*O;&3^l_*my3-{!>*JuCTDMrO9+gi2{T@ zJ3bp+jzTuVyAH*8jUd9>Dl#rIJpE#2nCuc*0xd{a$(ddDi|l(5-53Kj=<+efj%(d_ zna@Z4>t9_za-cX=A@D*5wRiLisHx#64JWlLBgHEO$+$hgm(6IXutYB)tOvt+CJe>M zuErSYe1GDtyvPfWQ^Cc-hBe!!dCR0B4L{>*m^^E$n&%qp9M#h(vD1O*CN#Dv$ZGbH zDv{h-xx>b8_OS$v>DlgPl*RD4p;&bqyt@bca(ymNPMP^Cp;EP2D+Hge)mBL~yN<$N z@4Yd<$KBOH0v!d_Kg?don?dQmP&ykA;&t!n{Mw$}BoFW1dMC8iafs{7LQzu=E30GO zkUR<*((YhP5U%C|?bD=@n~YU6bG&!&yFIdQXt|Habfb7&z(G-gd%vA5{d~WM@TXm# z%0b4mnKe0K^a;PLc(B-d_!&`pw>iKAA=q10a&BsEM|%x*!*aAocQ4-{ot?u%FC0@q zeOoyFD?k3&A>=*R_{-13I(CoQ2f(eWy=ffnKyPYTiVe68ip>GxulAo?eN4hc0QzH_ zmjlc#WX9{;ZlDZojcY>dfU=FTKa4>)M}1?;2}<3A(ZjkFO!NYBu!7}kCUq--UJw!0 zmI~!~Jl-a9MFe_3EUp5ydq&{ zD!^xBeRVLIQysiP&F`SA_!;-p(7{=+;C+QiEsq%pp;)qe>j;A3sR)yu_D!$YlYpAj zvuX$dDG&OyGn03?%;;?X1pf3K{ZAljB~XbRo{LHlR*tx0&UE&0zuSHx#yZcC4&^&S zdQ=KF1NB~^D+)EyqfXASchyZe)RL`bSO z^GB*uMy!4`8&mGtk{u0ux^lU=*nYNAKRROT6l#(YXd>&akjz=PX6KAqki7$)p1THZ zF!7~^4w3mQaLa})A6W+?M3OBkO#mO5Q?8&-lJ zIXgW~8dFEG@MecRbU)Irui7@J*nR|!2epy^f?e~WIGXug`2&euyTvVBxIG0UEFb}ISQ1iF$bblX;0G6a3M#imAGIcjlE~)TR@<@1nPaDzg!wdc;^_pN z^$7>Epj!D!wHFWwCNc7xrB-~Y!d8E8wL2J9I9Wy{P<7{JH9fO2lISCfhJ`YnNIRme zX)H%0$*%_6^7)mGMaaUvvPiq@@9&vuT^(d+xqCxN9kLzqzs_4Z<0*4{vPsQfz;^FV znqw3z<*Nys;Ei?>&t4*mURm-77)is1Lr3HlX&|h%V@dgXTkKHWYMjO3t#%EtmvTi_tLyX_eF!YzNu|R!6keXJCb3gfd6TZL#u$S_Dk6NbR(lD~`tW#?$MZ zO=|9ag3m!s(i^u9Hlg7k*+xRzc_d_K5^AnrIegY$xJ24caMvPT45nm>1fojN1pZQ; zm$?dw7Mk(~E(d6BsylDO!xUcLJvx%lm*>bHHlZwsa4sjrJQl~9hE>7`%F(&&F6|>9 zOFweKcgk~R$-ALfg_?--OD2?8nHtAQd0FJmLYbov*;K0cSCt;Ae&{^guaQ(gFzvxg z-48xVNX=4#+1;+%V07g^(T(#sgK<1>OrIE^6=M8W4g-T2(%{fg?>882WRFtx@s5eG zXGS$5TJ1_6f)Mkr??}`sd*kKfzPYJ!)hiy3R8+b*a|MA!5azkFGK(y?)Z^*;It`OB z-^>y9z=5H$J*u&ya#mjS?rSq&`J!95^%LrXc?H`VvnT!RhGXU14JuBEF;73}<{#a8M8I+Pi!bTGeYoP* zch>OQ$|qU;5w-#A+MAHYBRPWVgS--62Fl0yLTkLWn`!W=a!iE2P4iO z2jzVUXz#E3C62)H^oSX6U6{hj*D_Yjr|$`S(ZHSslFDno&Ey)~QtlM;vU4$8TR%rT zm%$VdwXzn~VjRH9uahoK``Dn2MD!jT);V>kUVcQ;LO3%U398%eUP|^^HPwc)VyQCB zrgKykEh@*LDO^}RBBRiH^AN5zzEgXN0a+tXwlT^V-=t7pS(vLO?%ZeoNEalw4?mO6 zDyMKpcEtJi>ac2*1=>~+eAQb=VakQxDf+NMF9TwN?|btGfAGvye%WGo&^hy91JbrA zk6VP~6O#7p5s?Pz4aB`j{zNfd{?ux5nE9qdk_=1vOc%$71ei4$>^#I5HvVhgR>PQz z+FcVpPfWU%qgYL_{E!}a9rW=L73GruYLe-nfTpFTWwu&?hCWj%G_b&O*Kp+{Z927i zX;Ljs@G&7EFV_3UtrTHY%^r5fV+%JZBF9UchtP98r497HkCb`dJ_*y5=B91I0du=W zF96qmzPe`hCAeMX6xYOEYW_#x_HPL<|0EjF@k^;HgdFXr;Vs{T?+BH*W1~E&OhX6e za4p|Lr;zoZtnnomd(gu^yCxLk5RP2cQ+d-+V)UiJ<;1+^czSXdVIR0vRbS7gf{L8e zo+4p4j}^2@aq%bvyJc9+A>5_#PlB3*R}x%zz0V4wLik%LY+M1-Bj^d4kkDYfO#MUcIYY zXEK=!7wX(GWk2&Y#@EB6rbaaCooveX4JdF`xC{5>s)tk6(#Pbp1Z8+tn14qS_e>MH;W1i-7&Q1K|n_qsFY|_~GfzJCZHc~rThN|-Q<*qoSo4~}KVKIj01Z4OPF)fBhnFAx| z@pGG4J$58qsNO?DHO*CxNeDY4ox>Mj&DTHGh zT;f3JZy8foUk&CFfoUTDOzCgS*JZtw;X=|_f-l0jPta0Ok&Tvgq_~5}vBXoiBOHRcV)E>!&tdSc4^UJESbo8*XEjGQoz}M2&TD7(6w-V!~b5aHEPmB7yEr0 ze2T;;?#H8}K~ zBbc$cq2-R!M#4pskG&LuGjR2Xi+>jj_>-4Uo^RLf%s%78)a#lmd9`(iy zWYlfJio#_HmCqBp{fSb>EGr-rF}2{9kZ`Joah{-{An%;_^VJ?=iYt8crNm8Bb)z_n zO&m4{N$Xt;VQWdXQfl2>^iTzCq$9iX12RB&8HRc@A_s5?*dhxou6nseGFhraFGa0F z6&kKps_<4}Wmp1-Pp41gW(xSB} zosOG-*&fEeHIF}>$1X!1F!UzLyGEz)t6y%>*L?cbrf^WMCg&7K72VM66^3Ytd~=9u zZ#&Ln<3tR*XtoNg9}}kq@ffP%5tpse4;nOv5!_)l>Xgib9Q0P#!>=CW`gcFi*N3)+tVG_n%0>7(I+GW%wQ^;T$u%*oB&1o z-0-BG&b`@}Wy?~v(mu3%o&FyZ!UW)dyz*ZZQ}Yf7p(c_JaOkhb1Rkcvj`{Oryxl%e z&TTZ-xIWX(lXn7U{FmLf08F$$mGbH?-Bm7V)PZ3bQ(+(aV4qtt@0rX$>OGtYNixY; zS2xfB_qR&pQNOp|IDL`*!2fbR>+$>L?qXis-f*b7SLK^TL&%upUa9TdLgPf35~{CM zxnu`xROD0$uAbKI@hYvbdBN>eZP++Ncz3cwu(+#(;9FW#JK= z!1gf=q$^e9MlJWe5mUuwT5DRA@?8vRI0s!cQwfe<8CO4SP(uByejF2WqpgGB?kz$)R! zrsh~-*=FPcBT)H9yPp?2cM^8)kINsvQ;>B(x%_)m-}!HCWXcN0uJgr>Xr&X@($f1PIdy+4 zltH2)?Wcz1AbQi|ggmSJ>txgY^PBtY)%4fv9tQdf_!BqM$D7Fh&aCv<+-(wAD}T4A zSy1Kp%k$r&?w@@GN7m!Rs?&k_wjYnG zP?yovE`FZ(j1B@dgXMC&*0x-pt$SNFQkxs5A+aa?Z&~7eFR|46q%-9LH0#VZQ_Zuw zIqr1hhPh-EyR#~bCg>%p204%9cDOI6M;B#X9n1LU)$G**AOZE{)CZ9OZRC;p01WyB zJHwd+=%;fejQQATqY7bmX;? zA}Iv=hm4SulCuwI|H=X!?|v>2MAEh9bH(O~inm(*K>S^j-StZevn1?DfpW6V$s&-n zFi>~qq`XV9pvP*4Z44$qt+#-~uo!^s6+@z@1!^g>!Pnek4C*)i@{L(xoHCceQ;ygC z;n$etC+ZyN(ZYcwfV@w@ZHWd9RskEAjkr8&lpoO@o z_a3;Fo|<4ZBs?V?3@J8X>?KqhISsXgn9a91h9FgkTL6g=2nHdfQ6zub;F~(>9I;FT zT%s)?N(TTx4O{v-!-ZGbknc#&RlqTcm&oCV3wRb5@+=?&`a%|FZAA|*UmY{Z8X3fSyC6wvJ6-MwzFE;X;_y zcjpJ-M(A?)&-vuhM*+0^i$PMyUSzJ%4NacUsWj)-H}A1$4$Myr8{EKuB?NmuyZV*5 zsg~NwWAO4?@C6&Gmm)q4HTc)YG|K%29qpgh+t*9 zh*~6U{K4d=tRUef*4#1NxdAyzX@Dc|QA4#)MV) zPAjAigU{A`dI>GRArg#YfDxQt8 zu)zULwyx*RYhrDeKmV0oJ*36lh~7H+imDBe%YKn~7xcwgf#amkw8Wb>_wxZQjkfIw zIXUM6Jk4e;`r5lWJkRwK4sI04grO1B)sS0?{n!)Nn$IyuQ-++&=?cEk?`P+6=v}(< zGdA>nd-*p#U^?X-j-%?0Sr!401?PXJ+M)cJyZJ3k`!$AO+7)xlfF~vY4^;b?t4NUd zmkGH5ahL)9N1UXp#n|C(oE#L$Ai!y|kbEu8`M@bdK)eZhQNfLv)=1a;~?yVw0dtjG%G9*?`POx;V zgRPYyXIPt90MjI+m5=BR?SMT>m(Oy`f>3G5YFP-MCCmm$OHPKsnYqdF+4if(9=$}` z4O!Xh9Q;C9Sp{+Z6xm>Wai9r%qYG@*`~u9tOgWswA+ve_dEG$yA-Boomu3heZ~*4G zku-SRHb^c%sB-`nVL-z*D|l|E{GcpKO}{GZ@L42Q2h2CB_={dFL$JlK+~qcbiAXwQ zm%feK`rG!m?dr18Q>T(xZT80CCO6HyyeZ8{QDt)#UI<#wm3rn>pHz!Fkc$;Tao8;l zLj$x=wF6k4G8vUb7@V`=({#=xo?k7a)DC1r(M5JV=-X6J z%S7AQ#9bpmQyIZ+KA!}wbfKN)i4jU9i`j%SQF)K7-v(a#ePgSYOKr_ouSJXizF0#z zOE)z9omsHSz#-@NyLbZO@iv`!{fKAY;A#eA{Fe!S$XGMKO??b&srVCTgc z?a+M>DlGz~yC{CNa0#E0$f>KrEYfEuMhScO0f*6I-QTjA_g{@&tGz3X)Gu6qe_Z$r z^qxz~plXMS3pgjO7b8sr2zJWD0<6c=OJNFYB`%Jy%OJv>WzEk`LOx7&8N$4lbzR?4 z>CY!&#&Q;0$}Udvg)mi>zgch!ef9nOrM-2&c4j#)(3O2n{x8d78y+xtNPHw5S5F`ps*2C^MOv2*k}n8`WShK#;3Ww$-rXTrCP-vz2PEJjyV3# zr3AzZ@t`+*SaSkWd*9B%I@&F6!y>$yKvr82KovI9M0?1>x9Knsu*u?iZ~daMTc%{4 z^j8YZS^-0H+<j44O?4NjTq0Ma|JxNEYHF9&9uWh>k{@qZ?aT z!2M$f`|W=*;YFd;A}tLxtd+=iL3M6kt!`qnvX(?p1A0hgC0Ae;UM0KWewc?Vt5m}% zh~$J`3CD!Frp1E&Gu}?AeQZ@lgLbYK4l4fy9|g{FM$HUMH3Wlmei)i}9KOyiO#w@# zh7P|nDcfo;4z6I5w|u^Ne&b#Avqx!9jvY|KTL%@JzRA8)ascp*6%NZ4v_+(7=n%Ar z8zN>@$2Q%%_4o66wRkoHdu_uMja9Tijoe1nG9Op~zOXDI)`7?gY91wBZI|X10%b5n zK{e2?$W$6ko|Mim{M2KQ{88~+oy#^GdHe!Y(U7{g-7REL9#pg3LON#`g_tr-zS>L$ z1{l($$_Lsz*&<{&n?hubiJt2_teCc#t-qZRnwGo3ftU?u5@iF8dfOf$JcY_jMTaM4 zK|k@K&Gm8j+vip4QIwnZ43RzUyZhKq(lzxm7?+C~n_RF=)>U0p#B3#zu_*o?mnkIUA~ zoyL5w?QHICDN#s=zBpL0c03(wr@U)dftP6k0T$fEKs8DWIM(!8t-JVO)5NV}(&Wa? z&Q!+CfA-RV2F7Zy8>>HB^+$W&57~?wQQ0^GUzsySmM;@Dya}=HJIy->gx2p7jVF&| z?FBQJC>R`CbXdjLV&Tq+;&MLQKkwGKfBpIUY>fhUgX(Xt|3^{(AAQILFMuI=CFJCx z^@o>8dr*zbBhBI6SPU_A(}hYZnLb-i8T+S(DJ?tc`SV-Xujzb6 z1(mP6LX2S_@#u8`#7$6**`ZnY(BG$!Fww?PW3x;tfLcGZ5pi=U>FY%(-pvcivLCpG z9<3Z%!I1Qz1E5=0uncxdl42We2sMOiM=As<$CM?Pch>5^&lDyVHIM(9jQr^(v zXAGmC7E8GeMPMF4I3TwGsxZTpEifqV)|Q-uY`p0VDG#uywXw_=YgM^R?U`` zT*u6O7_t{-%ZV=Vpl|BAb&??!Es)PdKa#?01q#dMTXcYAa=bnfL1QeF(j~IOE*J`+ z$HLJ>jbSe;D%E2~#VYAlC?=ULF&IJLRD__?i#G6%384T;E7PD=&Ud?vQZ}In6}RS= zq)}4U8W)W-5GhEl!6Uj7wE;L)-lrJ} z6~@v;PU~FM7#tHzS+g@(u13`op-!#BZbQjmslj&z+iB(c4O8U^Cp;5Yheso9L_)TLz2EwmIbEd~9(#rM7k9}x#y`&O?q9E3wXbVu z*p4IMX!3s=4X$VTlTJx||Mgs9Sqr^|VhHZY=p~=6;)x1+KS*QIrN)Q9ZS;T9nw)V& z=yIpw3e7Cx4hKUkHR^Rub;B8!$)@}hF<%9MqzE$3gRtX$}!oPmYsc9}iiZt`=rAiOSerfQCj?~PhLbG=+< zTuZ2pZt4-^=NwKnu?Y%nNe~kzUj5diGrc{rOh9?D&lE1<}I-%iV1TP5szH33F%tP87s>dyPqB3mNo%cQLkZR;D z&26pukkxeND486)?)P-^>1BM&^be!`qt5&)dHeA2!2NVw$M%qM>fwIt?r{{tzR0HfS*4bd;2Ybuv@Z>?k2m=7^-M+1k1@nJv-gy2Wh?JSd}oMW_ktcZpNh|Xlo4c^6xt`R*{rM zLAW|@I8lF75uitRbMhrBqkqu^s7hYE32kQ@2PObw2vrVXwQ7ujvX+R)DqZ>^4yM&a z??I3EE9|dlmA@Fr8}wo8sCKAUi;Qlj4>?lhdyKJ9t?sSbpg|8sYv-m6TF1mPXFwOV zA>Go5QbQG^yIR?lhI)Oy@wnhZIuoCYJL`cCz>p(Vgx3PtB@^g|fdw*1?A`4md3tlB zQN}huwAVm@^|i?A;mkhuRLO~ zt5jr3cB3l$(lK;JBo1mk4y&>YxjA|8nwF{XNGN?=>yHHplg(D)o1q?&_JF z1xY9Kt0QvrECJ17Bwq0AzIVh8HcL(fuVxp2U!vop@+Nt-upy4lf!B;aAU5~zmL-Y38VMDYxiZyT_%|RdHZZ2N?yJnTvhfQBQ^lq6mm*kZ%!i0ioik zUF+h|AK{#EX&K$R(N67RLR|?x>wLzs?|7gdkmJ_29i>@*0hUNKSYFRQkgqF#HGzuY zDG52>Twp*V`ER@kKn62u)eC8{KIXZIWWaq0o(LH6$z<7i&I3UKxG4SBBsG=@`xA)y z4|zmOBJ#F{Q!N=hamxjfOSuL>%@j3g4q;=aX(RvUOIt3;OD^#9%$v~zw-N(TFcfpx z1;`58bW+7k$paNF_QMScPD5#&S`EFYwdBJ;#9RqMBD`^hT6bk=s$VDcU2AGv$`^}y zzcr&5pen6SQaL~^xTAy<{BZ&h0mG5n8(wpCs8Kj~^RE8YDrQD;489$48F z9p>%G5GT}%mBu{5e+-twz@P0;ndu*qp!R2*hRpW{f2xzz!fMiZ8Fs1Vbq7iM3%m-J zB92!VMdA&7yd^|}=YSc0)I=U8N3d`pUqPeKpxwP7-WI@ueW<)xTCuNhRio$B)U-Tf z-JV}xg$WysNE;yt4u;#_HD&mgl|kL+p%xvQA1po`H#Th9ZVVcej*p{QeYwIlez@FS z{7!e?i~`7k0e{6%cq{c8;TZ8PO{NP5#`f3R86%nWjL;CS>?VeyIUaBqT7Zz?4f@{d zEyitTQf!=lt66b;#Kh}65y5ovwdcLWsOjC~r9DJ5 zQ<6Z>C*8=VjV}M?!+#t^`{qAR{l|-j?9Y}GAJ!wxE!%Hl16UnkGRAdT;~%a~nc4TCz7%nBHJ|qj zQkzUeXfUAeucB9YK%VUttm&cnqlxMQ$u0ad4-BT>PE9C1#Q(T@?F;(2u)#JD;hZ_3>m=%eV-&@m%m zCz*N{zt;@a7UfA6um)A{Jq>_A6@6DUpqykY((uF`ex1By?6#ilh^!qqZN0urk}LWf zr_p+EarJ%Jyjsf)x$cHKJ6~_6oh4Y1Rv+xht2bV`9PdQz4XcJN@2_J#`CO4NJQp@D zp5z8WlfI#i>t2}EMJGQV-1d8;esXR{-r2ta)Yu}winTq!>4xOjp)6;P{%B78=NICC z;WV22Z8{(Q#gRJmPpefvQ6%l>8w^G!AJ1Ri_StP%2;>)(DBqYh5TNfl*L}MmULo=MSn=>`#gXdGziA zZRcbYSPVB&MBwl9d{IZorP1hk@o^LbZIP@bUIzpr4HS8l?J2iS~4&;!S4EDX^P zcqnI3C3ASBnZ_}9!U=kaVHwjCz}706^Bf9{OV(47Eu;aC3Q|Nj8HOx^mJH%w*qfse zw01r%3Chh+6fnR3_Rx+k&qgo=*Ef+y>Xq*oRL>tA#Y>6LyD0e>(c{9wG9&uPVDhng zR52Z|Txxc9l3FI2m}n^#aysSLsq9Iy_EU@yk=NoD(o?rzw56_z)x??;ph2D~0@(tX zMu<>~Cjk-q<7=Gg7b!DhemscB0db{PZT1;V+7lrW zrZRE)T@s2DnsZ@SEc~S8iiWg$od}z3=up(}e@jn|5;kXB@vDOO)G4-<_R=PUwF@c_ znvD6_KSVOm7kNJR&cFM}-wU>|u&^4szSye&=fnSJIps_2x>?YVu-+{?e6e17>b)_x0XQZSISvEF#%@_87usy~~ATUzw{q@XWpwb}7CkNZB% zI}$h&-5ade2FHGX-2su#ws&Dg#7%t3rOph0p9C%3|2h^=5vBnUy2^E>MwG>7oc^?U z+E=}ulpU0o^_Uy>Y`7lfIMKHrP0QV)@#itS+!5M39&}e;c8!0ze?&NV+I}!5?eA-H zM3hC?$Ub|;O#XfAM8vnR!SD4ue{rl#JX@p;C0(FGl8YvHVwMr8Y?(xbo|tlJ!lg02 zcO+cv8U5p$EjOYTeV2TmTd5(>L`*Y$^13JL!(;b0T_}JDpndP;+^RKG!^jF;r6}Yn@>0YyuAqc4Ygz-RBWiz8FPNeC z4u#1y3n8jh`M2L8$DIkB?9xU3(?fe~HZN<5gLuBn;7BPUaD5GW~VSkcC$!@#3 zrVNB&P>w=e4lXbm^i}+Mb?7-UZveV4U5v`g^#7{|`)x4SX(PR$L|8c}u#=-lgCi!9vQ{MQdY%V+;z99GD zKl*Oh1X;Dmg%NZxo!lx|;_-OxuR4F6X&xGZ|B-a0@U}IuYi3t}I{}hC*awZ@Y-Jv~ z_k)L0yxwK`+}r5*j4*sBl@DSpk;*=t zcf;SAq>MAynBxm{1RLQcuu5sm**Kzma0(eltYn_bZeZx?G&@r*@F~5E$Qr$J&;&Y2 zWNKn0qIsmao#PIYs0@4hn4Js&G=&U^s#xh7{XC$BUnPZm=2&mhm7-lyc&1#w6C;f_TCR4p10Ukh{RMk#s zq=hI-Szk9uD&YaxYtxJ-Ny`ul@zi$9FiH3^i~8fGYg%>I7z95`Q}NePRV%vWHx0D2 zmPQ}tcMS+M;BpwBiaDca6T~Id_rph_37#lP{FV~C#?(x&SW?(0-Y;$YJ zwvqB5^)LyOgZ8pKOc1g>wh(;oH^U?)r+%JK->k~HL;l(64HnQn4oi!xHzpoZykFmV z{EE1{s8X*Kwo59SIcve3N=<5SA;8snK)Q6sJfHLCI%X$pep(BJ(y=$vua9hk&l|{H zNp+)pBq*2y4UbhGPOIw;S|e)+po`@EGg6e%S&si7ng1x>c<6tsC{vc_hu9%*kMyH; zXcfhm_75RZKbsO!K_Wyy3u0D+;?ou_M_|-GX_+9b=;dvr-0ydJC-Eb42nIJl=eSS6 zvwsqQjM@!I*p-K$nF;_14-GctDm(O`7BOl;#jKGUv+xp?ofXEnsws4$mHDD`n zb*OQVVg!rXd`#f}Sud4l5~pNohpg4eJhh-ZcQK?HcS6ib4KeaV6dD0~X#b?Z*HoU5 zJDJ)73S=uOp9kC%60}pJ)~_UyYe_EmHaZShV<@KMe?$DeH2bO-^t_Q%Q&S&x|Fe$q zWpqKRIwHl~S-MIG%{K>Nt2TnvD|_B}A9cDPJ4`)x{g#4C0rOiK>jE(=#~wacuQ9FQ zAHn)#gUWGPLB{glNK7}70H)u_jncR^=`;)nw1Xn+&-mF36{5nD{3Lu^%sZkdONnW% z_|0-vf6u-DMyoeoK-sBQ#`%n=_Q_#D@7{Lx^}+JWfm@Uz9Lo!KODAP$rr@i3liTn0 z6T^EKzCYaV00ufmz@EMwUYyx}?8AsAL~VOpSLfecOgZ&pOA|0ZuSUdwh1NGPxCKHL zO!`3`DOny;P|yp#&AgjxSQB*YNwEcPq8Dv4jKqZ{A5j83{S+WhFvGTjPfC-dmUd*F zi@J$>=3mdEyi{m!ML6kT2A<}X^cK)Gt}{W2S7_eYCY1kDOk=Rr;zCfHIT6ieRw6%z z0*&wh0BRXUB4blB(*i8#ydgcEBLa!sA$f!(59sJ;%2ct2vV_V&mTX>xLl@EJV8ci(?|cu(r2a z@Rx{y+3U%|FBi8_r+f40v&={LzV~AG&YsImp~R1kpiq(#n#?4Bib_6%Q-{U6$}fNB zhU!(Aj&l7F>9uA*$kCl#N^mP`Gx|k$&GHLTyTc^)#)Su~@1x7&F(?1^#I{ni#Fxm# zNSVF7lDd&@l_I}m!cSraR47HV6>eDJShuT)U!-)g;%T>`415Rw8Sa{QP#kz@R0du? z3k1&e1p+J?zWvx0Y2gqvA)Uy!9seBGi22U1m`cJiOHZGL`sby>ktMiAjI<5UI1=+A z`X)$bIPW^q2KcQ-Jl0DdlSr^wg-2;G4^PxG3Iha3QoZr5b{5aMT+&3Q_VA8Rlq%n% z|K{pvSK5qNxz$Aj5Se00p%0NGB>Y_Tp=^AC?rFSnH$1%0r3^(L*JwO`XsMQ35Hh9; zNFdCXH;l?t&&`JsZ0;4JvQZo}Moj6qz*&m9Q(!&1y#dM^1=6ATzWQzUe!0Ea)7YH6 z{&X7gRIDKWVEYXLvzgZF#1!>;lS+Allb1`{NF$U7_F#U8Ja^3eeAH8VyogPPp6!7| zrO67-3UNSmnR~O}n~OVA^l-GMh-HzJ6T>?jcT0YfgEK~_QyI*51jP5L2 zGz$_F?b6XM3il<#TEr2$_-pOui17aE_Y1!FS^Hhq3!$!i@!vJ?Sn2KV?k+{hc>9I{ z|D(UX!R9#4RyhcDvCw*ISez@sUFc`HGRyPF8D*pDz5I0d5795?t1*MP*#Y`Q_3#1} z5JTkbHtqRb5dD86ObTx))4L)Q8qak$rGGi!ZTGEEIv#v?emx3p&`0v#go4!tS>PA= zdE2q~O)~U{O*qG#vKI%ytzP@nIQ+^D%d4nB&i1{< zZ_Di>kO-iXtx1au!~~SEtJkXtd%ftzr$foYW~4AGEQ?;Utt=*axN8trQhBwcQ3`@M z5H@H%Zur&*-3jW5vp&Xzvr8D1b4b&_(R^6kvQ7TPI-)I;yR|`-5p{`ogpn#NHeK4K z%9!n_HzJNTk#gL>Q>|uCcw8-wh^svI9`Y{LssBN97_YssC4K?UiX54OF%?-n1=*~$ z$;1iot}m_M#k$O4)8hKm2TVlki#@yo4Gh3^v}%lM)@N71!tjsE`gbAh{$)I0Dz|;G zvJo>2dp?fgMO!F?;q{5IMrhsR;m+y^%6kHz>6F-rD}L(6`ThQ?BE)VQqlAxH;&J}H==kVg`A+9D3h*_ z`fYBxIBJ|fQxK)pWM_P-snfBd)4bxblOCtLt6ytI zILVSje;KM7T@Er*Plb2#nbhdz;U0Q=T(;}NwSOi2K&lhIjl!eWDf^$5GYsw5$q6>y zag(N}$3uOb<7sJU*BQ(7_dVjb>ZG;0FoMtD;X4I?%S!^Ids^h}mMXp72E;y^u?)0d z$@kP03xy^Gx}$-gOSPbac9(cmWr;H5A+& zP3fYcd3e}Xh+;(mQ)yRId;)IJS+)dbYdD2-1{?t5C*Y*b&!exAd8qnH}f6Pd%dX3-lIQS8<4A&agpRnx zN`j`%B!Me|4d+o^jvjCu##qhegF)>Q*JwvqojiX``u=*LvHAGgM}N>!<-fFg+5lZS zgWKb!6jn|oeZP8wuxAQf_cezeX8j;JZZCwk*IV8U_x_KPM3@oC*=sG@Ux)1I%Kt#*J039G+96ZdL4HUO$L{iy`D=god3Ox$96$y%+BYh zVGF`vct5-B2Z*E6QQDjqZ6$FF{#mm(iZQr%!O98>3SW!YwGl6c4MeAmxQ@}1f}@5y-dQ6? zExy}oHAZQ0NQEwLGR8zYk4j0gEb>!GP#(G|L{yqT5Xmh*jF!`}Liu=mHx-&2k1%6G#)c#$>Vt;$JRYD$Nw!jAIH(wM)TdXM!{b5lyzqNxS@wi6ynOP*i zchqn(AOS@)ib;nh`)}nD2)OH&AwHs8#*;Sdy8Z^K z8*Nt;LF=p09a-Xi{lS|T{lyH#fa+0rpPE{t!R!l(MO(8U<(=lq<4^0QE-+762={lZ&_!y=Adc3Xk@aB3Qp-n48esLMd``3- zhwgg6ne!q*GlLh4=24pO=9P$)!Rpf(ACeMTo7{J{^?4ss!m11}jGRm1peU2q*Y}+< z)W@$4Xp7qchJy5Ng7*^|RNlrhGV8t?8 z^AfC%Cx{PRvlk`GR_{sz0E1Q%0R@SQI?%%8U+`wtX` zKdfSz_xV)8P|kBjor;NOQ=2=c$uz7Bo*ha#D$o$$jKt0cPtiKwJU)dG3$eJXMQu=@ zh0<9`5}8y`LFk+3C&q1?wtKYW(?Fp&kOfL!W`ah6&c7ajo|hRE=FJ(_DRfw~DT0jv z&5HH?E~!D@C9V)I^o2Ho#U1*@oBlK-(L6#K!1mJsmwlw~+cobSV*dM$pcFoLW?HHL z^cK4&$sJZ*7Yut_CgYO~L?zUPUN3g$4}^sFm%OSl?#bV-mzVY(&ytAPxX^O?3H}aa zUI*xBJ5OL_I3ETy?Z>-2{BCdD>=Kz=16;-S0THz#9lEulyD6@|_77jo_)aq%nNTKE z*f1K4tpBUTSuKrV(}?GGBJtteu76M@O55Z7hMW2+xz(kHH}#L(y5W!R;jo*1F&2uu zn@}^Y>2VSKTiHO$pcP){Hf;1_&)R+?PM)QE`eQ7Z;IowBbP;^1tV|T)Bq!U5#2z|19-f!b^vrOzpmOya7;N+BCY85VB+gj?h_4V<6UpPp3p2&l2sUsk`+4$ zz+*--)3u~U2^Uv4v9d~}K*eZFHLODJ!xh<*iyu)6Nv5*c@lnHCF{f(igG!9Rgs%f% zHcPt{-^Q)SxLDKAFaj@mvSq2kF)2UAQlsnBOuD_K5p}V8xPKe);>-@wim(M(moe#^ z(8P8>idCjs+UN+uK~UBCIJgpyt5XvS!&dQ zz6>*A#ximol=+r>Wc_F><9AIs$FYKs2Xkh<^HiAmz`jUj8y(@K+hJ!Yo*G^1bg^e) zqBSGhm)BO`tr?AHOjQIU`)&X7XVv@pkrj}QCk2X=0zZSJT8_h&_=^99Fcmdbo`LNf zqb*~H;RFUY%N3Yo+ib?SEAJ(03R*(&MZVdNenV7tGTI`z8+IQ;B~_{zPOuvQv+6Bx z*o7`xU<}6&J3+;U_}%+T?2c9v`IX>Jk&s@KYE-{_z!<(a5|mLh+fW~s>=mS||NF#$dz|N;@KCTe zbeLk2=slQ&T~!P_3oM&#A63%^^3Rf>XzdOB9G3s#O0W%+f$$vyPtZ1tgwWS^OlssP zjzfx2mVk$={q2L|?18{A9EztQN2t&vnV=lYftfpCz}gK31LJ`dbWmc-`%^4`sGixO zFGX7F%N%U&%j6mn;3`+7c=dm$R->+T#~PlbjLxklv_0?nkiCuy>86|I0T7I0ia~RSrq^m~Id6)2S$heY?+zt$J3>nx#XLB6@_j(P$5h6GTjw zTHqgE?d=Y|49~RHie|>OsiXVC;dFTIYkugxv|OsqR=edx*%5X!l$6Q*+Bg)^n-teI zUh&?$&i6c~tb$cBc_c_fji%C4gm8uIl>t2@$}K}N<`VC*Hau7$6m&XsO@syFvv$=Z zFIZdmC>Olx-S3On5g^_4GIVH{<#cNoK27=~K428CWZQv2={-tXb{H{hE(#7NWUfvJ zIHNqH4Gs@9b3{o8tar*4z+cxc!>zqf!!^CL01Q1a-CM#8F^k*pqfe|81l*WwgNO1( zsfr5u%-3lbgr06Fn=^`zY({O)czy&xQsXm>#Fi`wp|R3)^EL>g^L$ z#Eb4o2pV>2#6<7o)NM{bHrCvBI1efXKO=m<{wiC!o7>th6yXP+9w#SIiSAn2amI21 z1#*Kt*zeNnRkLvw*6{dYbX|8pc#)q`;IwA5$we3Rr(pi4{Q3`j$nc9nYk#$2jh1fk zKMJZ4d^PXo2&$c!wEyzmBEQppCO7xZU#3McuM7duIG3c=)t{x(4DZwCgdd`Y)UyeJ zMA}6wgDxcm>OntX*bf*+)2M}ILs|)-%6Li)4se73tCGrhM2HNi0Ja@S*;Y6$!%(E@ zLt}iKuzZ#vT5co#J}|}&n37jMyLo`4X%T2!E-63+9!m)nE2Ui{3PX~vmDDPSMdE}Z zUrnQ^#e|xc%1Sy$1US1NU-et7L@M)-=XX!AJtNr(XWLR}#cCo-jj+<2^r(FuPr%W% zn$W2MkZHy8BpP2z+sOuq!cNtLnHkgI1JXt2;f0jH1Qm_^)D#X*=zqM>y<1aN>s) z;J)SwN0Z_3wldY5JInjxLwt`D`@7OAUU1h0_4&my<0uwX^HXD-+Q04y^MIMFMwqw7KUu|ns6FuQi1+L9@|KRdXQ7{|Radfk}VRsv! zs9fH$2zlx7Y^bPn29-);3qxVq6k?R599CJmI0Rk_nuUn3PDv1(4x2&lCsUJA>dWO9 ze-k;Qkj0@*RrOx}BX3*4BT(%D8POm;BVFt`mq?5!Vlrn)bSctE>)UiFo-)>B3jFR# z!lT5L(vgAUiS?qweRZj0c(!$xt)6*ohZ6Vjj$!lv?fN zIMh!o2FmHjsmPjX&jRM6fc7hJ&GFcvhBE$oiqv>uP%(mDCn_}L5#QTHQW^BLabp$) z%UPNq9X5EDcqHJL9qx~HjWKrNq-Hu{g+802H&CWXU9U)y4<4{C{y8|x<4~QHQ0^)B z&_J{~-hH=~{>I(;$S8@M>`qIPCQvKr5Q+T-1n_-=q?k%Zd`w1vqSr=e~c2f_nFXs z_KfU5O4q$2%zU|KQ0hn2ep6hc;IQ9k?|iDaP#5j1y7Zf-a$(#!1IDqLxObZR;ZbVy zuG3-pyj!xbpPLstzV@-9TUZ^4>%HnV9hW`%%{Yr6$;S9HHC?`FE3D`>U zLAtwPK)O>xQo3tsQ0b6vkd&@{dGGfh&)(1bfe)<3uvm-hJg?*U<$4rLRkw~+ZtRr3 zfSe5pld}q3!O_&=@ArWfNQ${?swrS=C(gu*ZXFsLJeC(subcotnJ$}o85G1SheD$O zzhL#%jP~vkD}+o82LOX2$JX9ZFN2t6)RJStY$~RvUulzS(>Mpv#nFw%vPk8}3g#si zD#oX<0RAZmdw@+isZ8sv1eo8Z0DaRW*zI$4j0AkFDIt9=e6CuK&Ji=d&R^z|Nr%Rj zL`-?73Xeag7f*!4HCc9?Zkt(CB#ihyj#-pwS%oa#kg7uDDX9*Xd|FPBS-jFd*q(FD*yY-;%i&rdLjwJ_JLTL^lHg zd(PiV7z(q<_g~voRBN>f7Iy4XLF|4)$`@($hwCd$pCBeNG6v-BrQda!X^2?Zn4(oa zYzixbIZuYo`dMcCKLkWCO2;{K7vu5J3v#-6E^PI?X;lfv~t)MudE5E zAx%JPpQA5>*-G_iqYkte1qDS}RXZl85>xQ}e3Q7QP5i1ro6t56I--2OCcvUU^Woyt zs~<(T=UD&J0^o5R-H_Zmb;Y>RhU?2{)Q|cxExjyhJ!@zB961kbRnq!;)MabOQq^&# zOYS`Ms(CYRU8Ai&sgq*Q?B8m*Knn4^sw%~znoY~?jp%AQ$I+R=wY@&ZQwvLpDdugnQx6N&a&~D~RNEZIzS%C4^0yMT=^f~21}rd!WvA8 zaDBWEyK}JHzHxnmWNsrOCRmD>>es_FLyhSGy1x4_H@=-Tvu%ynV!4glWDHcmG%Qh>lj5JjaZX>}h zQNDS=k%>MhkjQ(-AEm06@Jjk;2RKSaa&y_|wJl18fg4RnM%n z3_c>M)FvssGHs?RH$)$gfL+R5}aO`iodr`W`be zCFe04^B1?+qJ%vF7%*(uv zw>KA#WzX^u#u1^0D#>|;*JUK*R=#x0AFe<3Y`V*ZnupO#*huh$uL$0buDN-f4lUn* zpZgKM0W~e!DcOv)@E1#MA!15FqSM2xU8z-pZrn_qk)xu{^?1p4Hf`FONg%K|5{|x= zQ3R2tJHO$(F?7FavDFud9_&O4Jdq1UC`9k8#dN9kAr7S#TLrj{Ejz29Xz_P zT%#;gas(X_{^9GbaXhPk>((iByH2Iy9~V_cIyS4I{G|_TpE2 z3n_f<1{(rJ;ahWg0b}$Op=azeUvphN{PH8o3iaRg8Jj-MO15j11crYv19)(s%~8>UwlXC-1d-x37(Lza!z6@PTDiOl{5^N35V?C?!#! zBg_X}s{qL{?vG}_DE%!Nlh}Rp%4zd+jSwcWE&f?4U~+))erSJTKo07a)wh5K5!zr9 zL`GfL<0ak$V~0uQoQ<14_A z(Y#ZUttTBj*FUFMit-ZfGTLHcNP_Uk{Y)Moa*0c@ZNYYCch@K!oMVx(6V$?*Fjxaa zlle!q$bB5Z6=xq^SodkhoE&q>vgw;Ipi4h==Hf=i35G@{dN&BL3&-aJ>0E+_qU7T@ z@4I>RjC$G}x_f^0YG~t6)Uc)V&ZeA!_>SnES;``Mslsm~u~I3lR9CjM$p05| zvkwT&QbLR~@VnBC)^$8}<9mI7YR9fVP_ypl*GdZf6@CGto>l}qmCcS76%745>+$u3 zG(QQe9+JEL>1!;2RHK!r=SA>LF%T-D)i+m`5!3}By zB+RJY@qjjt?maL8L=9^LCWijBLdZ%6ZLXTBX)N_r6Ie+lqM-Y|XC#^_C<<(b| z4I*Mew-t(|u~sSIx`~x;?3aUr4pEpwRVoX}d4>UL2c8IfFHjSY?&We{NW(dP%0PBKJ~{VmXAr zFa}-PEW1HRB+%lZlq>=|PC6Tv3Aaq~3g5OPc?RCe0G0J<323(M@!q9-Fjq)bB5%SL zY!Z-YL16>#;WU(?Z#6AcDT~Gk%xhjPrNpUPg=T4_1s)zseq$nc+R--Y*c)k6*3u1* zI!8C(F<;+)V+fOUpti&Eiwi1)ktD@j2XULbdX*7Mw9V1^ z)}qSBGhFA5jAz1$0R9x9Z~{ANMB%hF`9h^_$*Bl@qAJ({TJ_xJY(vd|?gac_Z{nMG z2&sl%Ow6)cCcnc>mt79Y|6-&f9`S~GNS}}bui}J{+_b4*RhnLk5qFY(Zlh(GN1OCG z8pc*>Z$a}`lIa^Xdc9sf+0}q=`9tde@}PQi!4vddZB5N|Fu~JPBe8p*F!*TRH#|Hv zXX!M%zy#?Fk)9X>-416XK`FVp@mfZ{8scB8QDDL|5JVKJ2pr6ZC_mqKEGhAW6o zq={FNH!UELs05ae7ZRIxEzkgZf}l>yqBMMQgpIi)$+n|Z|KoSK5!dcs-bD!YtGlAf zO!`~wrIWF&w&gytky`TbW(KqaB>M+6Nhln>olKEEJD91`i4BCol*G8E-~aE+Ze@higViadlw?ECNE z5|I`9vl9oSq6IoN5st}=;5dlT$CJEBXR4$=;PPEGEoQV3GSoY$N>*>=kt^Wb&J%3x z(Fsw0A2DQ^ybIQV$V4|JfX1~4WgviL?yA6C>c`i)rDz;8PCQn#?_1U>LR?PKxwS!w zs@j!Il2;?iT*tvyBfh)ZQxY)KICN@F8PLqlUIBQ4e{ zMI;=>AA^;1*c&D=5?iH;yLg3>!4T;{E}v({#%R$eZ(AHJu(=Ex%mD81JG}~@tDnH`WX_~D76C z+wB^J` zSA!FFh8n*|ud?_Ug~+}Oht2>VTWArxN}5Ss3Wj|dYtfL}des21gbZ;J9Frzig1uD= zWFd_C(Cb?Y8noL7;RUp9$cQ2kn&i1d=iM3cz7kaFJk$H~P09R)1kPq&NW+Q>U4c%4 zMNE;N$`R@5Tk(X0%PYBa^>TU@;R^RRw1X+^!)BoIBN#QXc!|H*Slg|XHRnp{ap?B& zW+xX$Ef>0c5o7=@0*lb6ZEkxVXS*c;V13O%hf5b(b}@~iij67n#@ZLx8I$@7v90_b z8}V6Nul1xYrjiH}6S3Sbqn~Yf^0hR9PKL)beJqc{iHTswvpM%TO$=zTHg}T@z@1b1Y)JnCYenN++&y$`E@`9Tm{(KvOUOlEdf7{Rfe-WZFUbeC4aFS_1uVqmy91dYhofZ%@p>oo?ai9X)SV>1GkU)`rI%6?8GPc`MPN?f(0K{N2D-mqN@^vRh^(0Ed!yqcK0_&CYWe z<-=|vq1kG`!}F;ow&ef$XtyIRz|m8yYWw&I8s6=We&K#2gKV0RIILbqV+-+&EC6j& zM+u|p6;U#LP{AqqDc%!C5J66sl?k|q#47nE>6oL;Iy!|b-3jFi@TaQMNN4sBgOCHg zghwQThjlBj%m~Z?JDO^1P|WMHS4GVL480IIyI$g4-_>BoOQ86XD zT6?LZT(Jd|-w>}Kj9Cjr3TcCC!34nMyWNsp4=#W_M#-pw#)d-h@aBGO?Bl$$spM6g zH8`|RFt6^REqrZHbkwM-Ozt=94-OBS2Ux?ecOisMHXo!og0sM0W z1FQf&i;*E{CtXW7v-v+yo;PY87y3sYJJW2nnJ2SflEAW zL%j?TGQ7FPO2nNEFeRIeJ}2L0&?<3L!6b7SeOF&5^?}Hzd>Ng8kH*vBY6d$R*rRfc1X*$+KRvn^#iTyBpd|NI zJ^bcyy7+anmgHYe#Uw}Nbf~X;!$s)dvE_Ye=(-on$Lz8bu@wLRTH*c?)c{iRpqC1T7_y+tIsIPQLC;3a1daG}-}sj4uSe|po6TKz&8AzD{x)mHw%nH9t5-;Cu zWn!qU`)U8J7ZV@K7O84Ie{zy~9fUwo4CLjKY$!OX4oI6Y{UnZaRQvM6MxRB39}B=S zH-dTs${Kxe>-o~iCHa|^sfO?RU!-SR_1H4Z!+K3QM=85Vv>D5+nNq}Y`_h^RqLfN9 z_NK@+yI8YQ#*gSer1bQ0st{qGC$gewO0rK#YE_a&xFQnie!HhONeNGIXBom zfzNrfs-PLmCI2=5E3Ou~FWQFb=4lTq73S#bQtKBxd@u*cpv$CC_{GFqCA?MFe*jHM zbolte;9|K{8Zh{U%g$Kf!beKrQEWJT*h*631a!^jB?D_4E|zPHqw(Yotgf4FDWVR! zh~89eXbPs>IeEw|?+XH(TLp{=b~kyno5vcxjwfh4MvNR{XZ^&LSY{-gfhM zm`gqJ6=qNKSp*m4>uLNiG?Y-J=z9nJxD}E}lP+B+cxHi$aP9FCL8NO+uZx_1#p?n< zxX1bG)N%o}Ci6ERtM47BP9nQm5$^mqYy7{hE#Dd%);qgIVsw(Q-X3BkHp$Eb$~}OveKYS zYN?cD`8g&Ncp$zSppN?KwpHT66oF`}989Hh7sV?jR}Qj8X{jbv>QKiceJPoks;wqd zTd}khJQIhY9ZM+xE%tpgM158w|Ksm&$;6l|^bD|!!ApQcXgmTSQhRf~pT+D`XpGKm z04m1oty^r9HR zRF~HLJUQt3j$?!L0n481>`=0cog+v_my}dcP%~NxY+3oeJgtLenLto_5?7Bd+&{nF3&hy`k-p- z=xmm!y2qW+ZojYkEO_e{<+$}861S1M@FDOe;erGDAy@YH-sN@A2GIxrIe+<;$45na;~uJ-+K@7aT&&Ebqm@=G%jX!k7SicX!;qeK+GS+G9zrYq;_` zjv&tv`p}oo_owxy%bu%b9v(o@=p)psHowNy{S^hlMW@~wr0VfQ&+BUet0PFW+Z~l8 zGU~6Q|Kia8HO?IA{9`M1`YuSGd(VyEr>N_MJ0idm{-KPx(W-+F$eHFOfAOc;3v%6& zS<~rR$hr%_ueBaKY<(uytzo$0dCkqlrP-fIZ^C0Z+D{Q^N(UlE2hjV(M~M@A_|niD zZH$AZA%B&WG+3eJQ3jxBFviOgAr)$7S3PoNp% zB}yvbO`tKm2uudKjS^9%GmE9CXF)e^k5|PFr5RDKd{;y#Zv9upnp4MWt$ID!BSat| z+cRGQu)alZkvtaY`1_28%n(FV&D16$_AY!y|1}3$7kf=ZLIF8(%niR5Y;?=^oJf=8 z80|7?{>U;7z=@)x7Q>{9*Trn$_Tu$|#8rV0Oy$I0yrI-EjSHR)5CRZ{nvWtih`We> zD}2z{DaV6EmzjX}(v)G3M10ZoZex};L%g6{R+Q*0B4pmEPsw*)K99!zGko505gwrR zBb2%`IWMB!EfSy?Vyi_k_uZ>_yrq%AOPWi_49h$*>$S^s@r1LCiJ8KdaTJN#K(jx( z*;ZC-F=_q0M%)K8q(xr?>DTeN{M7*6utDl?2#_LJpr4f9=a7fT{9;EwiXrmMP#~vQ zVP|!XHH?4(V7+1>gM|ZA%vl~Hq3p9+C3?`JxSz%W?T9`~OhGEnqqXKk=@(hdzj$c` z&L3&;4M(eMp)Q5C^+Vx*9qR*MC*K5IPAI5iWLS+;mS0t#*!92i9v4;qz&TrO_$J72 z_fS;>bIM&5Ek+PQI2Zfn%a?2MPEJlWONh@gR7`!pKLuNdLLd;}L_7@37y*cWGJg0& zaWn}xmY9?tBD=sL^|Djn8Lib>>rY&T0M&VTDRwb$DJq-URm6Hv^BE~Wq9-DjQK!rC z(bJ>N-Ss^+Dk*E7#|Yuf#G0E+woFIB$WS=7@Yu1fsI)?KP4$WF$Z|<~KB`LPe;c0z za6TM63{?7m%ns>TTRU|8c(uMn_wAPk^qDEFtR~+f3pg5Yp`wSPZ`r-PS2Tx1e4bcb za>OLX58|?9a~n|c!E9^;fD9(Cs*3%e7Ac^cqSaSue(lAyOH925HoZq}UCq?e%lbfi zXrL>HCfp~Fw)HSn6&NRO{ul}nKH;W1Ib^G|I(V4Q#|tO~&6z*pa+yTaOc_5KWj3kN zu^5hSF+@qVcMY|8N=I?^#PG<`pgbs6l7bWXuWFT2T)AY0QACTV(9Hmbl!+!|LHUR^ zcvy6uS=Z*}IHy?IWZ<742UcrVH&Z1qh~ZgDp~WM{ zZ%GS+89+i=CsUZjfCvIGjfKw7M2nXXhDFK!MXX(`;lM_J)G{~6F_&lw*dR>8aAdH} z+!ziCnX*)bqdCK(Lk2w0?H1~;DE)J%ZK1TqB|XVs{ftCr`J>Dy#O2ygxqBiM9k6Ew z&=C%scA_4{hhu40v~4x>81NGMD+%4n1bl^{uCIYUAePer?Rn8~-J)7J z5UCjFjU)Dx zeeHeY6^zWmco<6G47BNRn!#cy8WY|@j(}1K*!Y`O%uI!6N89k88Pwk(1jA|~C80}@ z@ZBD*r`5&9h5LwL-(@~~Dh9UjO&*=O6CEStW_d+bFOb;G!8_mQJ3TWq?A7(amB7vq zxSsU=A1~8*tf55?PiYWv*jN1)Rc889h0H56`#D?pSAl(4Z!rln->fAY{f?Drnpg0hOq!Z{TV(LB zWZZMxH2IScnS-AP409O>D3GZ7yNTQ^Ad%kdIrbgkg_Xn-?OF9y%Hz@QA)+K| z;CSYn%`xstaU*}KiOweC+nj7V2lLpG9#zn#G2wo}V1NdL#dOr(-wn%5?xjL9v zK(xS%B00^+;?3gvIX|umIuU&MIaX3<+NXgGi4tjz=5l&A)>%*}T4OD=d*GdC1e&V6 zg=vr$0Gp~dgNY!&A`6I+^BqfkT1F&>3iM&(EKh=f=4ZxVzN(_!8;|Q?KkPm#j*~)2wKEsgnXSx5q6` ziyHXDmbg?Yb9&DtTMOmZ6fQL$6lZ-R;hZ}Rl$w=$7pY1)*H`p4q08x61Bve=+60@5tvk zNZ}rF;|L?MS_8Ttewm=X3pSmP^E*~r{kl?y^z1_1=Q8lgY;(Xbwgh`x?Esa=G_^do1 z^gQyr9CUb7!p{<`pl?#C8~U6L@ZDvZsce@aPc?(>AunTsa0bem-6`%K5r>)#mwW90 zr3Ki*)Ie6%Eq9uWX65ZkiPm=cMrRB-c%tB7cm0535?<@jS~R;t1&Nm769hiGA+9Hk zTE?nCJ;g+U(7L=|O>2seosC4RMM-JUP3hnFZ1An;8aU zH&mhYXBL`3v?an!(KWud;f}9pCWfYQ!9tih8K&m-cR z;;J}iT%_AfQEs3Wa1G=u%MWtKeKUQcsj?wk&i$x;t>o^ZsB<+akWXT&dcNV~W0_s5 zlBO(=0YM(7;MvJ||Vuf4$%t7R(CXx89y$7EhETRgTD4z8$ud?AK>P$9#Zv&W^sBfrAkRuh)vyRc z&ZwC2lHnEv0#Sp(A13d^YRTOH7~GmjVdyi5{e}h4l=2ufvQ?qC4A~S;{kZeZPPCm*ceaiqT85tV z?9FRIzf<~v6a+u3hg=qc=ZA~4ktmDitz{BzkBi7Y|DM)fp&&T?UMPcpbd54uM)dI# zZ4oD>yoLf97XOnU=*e4nW6&VDQ+SZl$&l6>AEV0y#6j5c`r8)Z_&6Oq3YYvz==$~F zkmi3x$^cmj3j9sjr|7rc1l|WzdEJ=oPn~s}T=-9$d)5A<0Bu8xS{glFYO0nfy}0*( ze>J=B-c z+sSO$d?uZcR-=>6CV=w5Kzs~k$Ru`9q`@to947xQV0q&13qiXQOl;yyo4%@6dJY{R zVhfqveQ%>lYVU7{dmdEx zbqMMEL=zd9r8|LQnt&ekw=nSNnq+HV1h0~Zts}2~X4)A`4@Slyj1aG-h2;@n-LF1V zKKLtmFX5^;`AO>OksY^RD29ZFu7{D?i#j^4UAM>ON(M9p0*QIhWkL62{lvaBO+HeX zyTliXXu|brHKosX)w&I`HoQ!uXWmHul=%4EM+ulY28xPtMPaol$unvG_e-mefaQ6_ zOD9zhe)c0)QSehWWT>#LOs1T-z8>+sDQ^AU8sO9)G0?d0qn$NJMu;V56nXfz$ zKF`?k0l@*v8cT7hIh{x@alc%cEi?7JIB(b9DjRulu%e~*9$x-F+vO*Jb2C|YeKucS z3dmRcpgQhm&JRt)Gks1StqlJ{TmQ$26AACWiZ-c!BBcMl8fC&gRkGP*R8D;5vUb=3 z2wm{!r!}LZS8~D)=VmYY&Fl8QlR2sHjQxm_y6((IA|)kt{2@VmRH;~a48;3`14BBTN&j4HN zR?-)J|GU7;DR#+;m%(?bg-~iTsJGYlKl4D_ub>e;nqp}+Ozfb4iNu!6785H*# z7x-4vl{mHEUM@9njE$~2c_w6K1eLD%qCjTB{31CEhF3CNZ#-4LY?7uCN(z%3tN{N1 z@~uZ>1md}IaqcK*Ama+K9nzt(Po}T|QB`UvAwAe+m8vAewoH`}+{zIR#&fM?h}cBz z3E^b=$()x|;*b*v4t21oENQEhqoWqVED2G@v(J~)Vt1G^w;uqu0uiwVPrX&um6Wh; zY;E5~_L$ZJ(YBR3^^)Rf3O-l=P(;fylQgA{j?P<*4&iGl zyu7vRZp7=&xJ>_OvYl+?n~t$u4Xyw0`^4PmA&I^1!Z=Y+4Hhhc9m8+#fBJr( znO=6;_a;XM`g5$}qZ~ri7>&ZKqQXs-S}_fN3Dqnh_`ehXZ*ltn7zIC}{C%xWcsz1} z8VRw#zQgbP1asQs{@lGsy>@Z$G*ZzeVJ$VFN%opV!DFvx2}0}k`~zOh{kHyvtDBpQ zFWvK+)A`nL^FmMomNF?YSGc_$7jV42JZw z8dM^s{qJ9e_hRklbtl`7LuH51-Fcby8=%?)e^{4N{(wiMijQSw0!hu{BvN=4Y+l^9 z2WZdGmubq+ni9}niw~FeOwlkIS7Km7!laQ+|GFW?`G>Y$UiE?)?$`A75%J5@1SOeb zu2noXmMd9`{z$4sdmBR+P^74TM41v+U*$F%VyIg zb;)LJOLGqNm|{-ra5cmpV$;RE9I77FsSjb>E(KOSRh{8WfT)bTX>|C$m46)p)hlk3 zWTs0Wl2EA{(TFy6=QoY{G5_{M!O>Tk8i%Q7WW88F3q$uA>kWG)B{;xEj+s$A=FU=%QBWH(K>1wEk4$b}7A>N%xtA ziBWRMyVt#BaxVO}L+X&dzEVq@>;&C-R)mdGaD*d3rkD zHUpl4e2z1vlXx%dHg4F7t%%FWH133ix28{%2&5eXQsDc{&Q%V&hTUxl+)bk(7#&t@V?O@M&1RS@|Z%^9m4FXIAPhhcRjP+KN zv(g>Cy&#?zrclHwGKcp3AE9K2f3{tH`snu~XyfLSU;X|*R|f-N?f56nr(7-^^YCX-rT#_7-u? zDt#s(8?^SXa4k_use^cn%vbj8ThqtG}N#;C5zJp=TOF^1z7<&-@`$>>2@Pe1 z=Q2?-{o$gB=tI@&vel#wJU_gO)o6rm_Eb-W4tJNaaqrYl_sr|#X>S+#)wW+{X7;Hb z9w#y#UYcF4LhbI+;gdr4XBn%WCknnWNcCKj*U7efeCP28UU$FZqq7UW{>{r#J%=ui zw&T_cpNr#dF3Zx!XZN48RT?~`smnP;(#=Oys`J;TIyH{_mbYz+w)aiDI>?m>n(!3? z8r{ss1U2Z||M|9Nlj*w+Cr}wdmF%zN3;Ft;>Jz`3BAL@hTxnW%E%5U-!|3Yanj`R& zl+T5qj^Cc5V1(M$wZ4@^tkss!uNhe+;QBHt`iPfEGwWdKQH8YZN7714v%v@V^W1}( z-O({G*H0_ic_u1il|b1MgRz8FOLU!r1}pLeQ4%4=du{heInTam4LURDtx?bQLJj+<q6j$KN(y_UF?cK9<$AcX0L0Qzt9{-qqYXCIc?FYlk8ALk$a`%SYaP$Q5k4P%NoBX)X9M#pW2h3;pYPeN==Qx8`Kjk>~Pr3>DetnDvPQI@m z=f__xoWO2rK&$^*QRs4d9B=7E!O3gnJ?9{k<_-$qKR=dC$r_@C-{&X%733eZOaA^~ zoVE~u=#4d&&MmdhoQ3jtA|6?C=<3ipiBwpSW()k%ZBpHNSH9BL?GRwH%qr|3vZM?hIJ`fu8T>(*}@K~+XvOBUrY>Bd3a8cBe&_Yb^ff(f%JN z-B=C`ob!)QBNEO_P0tBspSdk|Cup}ugxC`l`O7$B?f$tU0$*yZoyR*QyYKjCbsn9d z{sMcuuiCxOtld4yL(g)8k*FG~um={fwjBS+H2>1oHO)xM^9gsCDKGV4UN%+>=@SA% zF9Bvm*o(e^x75OC((Xo9ZpH{#I+Swo`od=;0ismM(V{8Bb~l;^IagzgI)R*`6amGi zN`WyiuyBK@nC^NcqB%msknUN!L-hy;19IwqEpXBBNoNo3Vc(!>mF}-EpxY9(Vequ# zHF0D$!6iqaBPk=y9_$_#vzwin>(f#Nj<$NYeYq$4PQ0t~pg2w64LUZ)f3HSx1^!iMDPQ3XV}`k(dE%D0Gl=?NZ$D2I7e`yfU#Tqm4VXHNj0leO0ycw= zgZ9fhT3Xte0bQsx%qmLBr_Oz%YCjvD_%#ccRuEn?eBjC=B{j+JW|qbpdxA+1yqB=? z(wxHP%oaQFsSdFgD5lkUWz2kbXi4HbvPyJ@?Dj6K}eDc3F%(7G8J#Bb554) zbYz{WPOmjI(I4E_0pkIDS&$mnkoCgIUHKw-OjS`Jcfd5B?%AS6@PGs22s%EaR4!CECJ zJz7|@Sba6w;NhZ?=N{F-;n~P9_tlP+3r#)WWmfGP(ak*urJTz54zmo-_YNjYO`*V`rZY3;*VG%FKik{Ik7y-n42R`7@=m%6!udnI)l7!uIE{?g>= zjb~$RXE3r*@1X#xot2Hr$#Ig}HYS&EbB&d70gNJW{;{#KwvVUsTg15+D_Nr~{*>qF zJ_A5QoP(d^-Ih@G!|ASvre+FUgj1GQ0JGuvJ;=DB(S^i+(v-?F=!0UyIKLd@5iIvj zOte`#jy}WkEcsXoyBP^i*22jS_nqaKqBPVDW`XGr$r@Z{$Bm!M_9|9*Q*FCa?Zhk> z=yB?!zx16r5orE#qf{FNoP>*2A4b~FCVA8=%+U??9`PkMKZq5Sdjlh1ns|W|+C}T# zVEgW8C$M@WDVN%*{3>F3aUKt2cM8HU&9`NqzkW^=e~uT6TX zh`+bYM`2{$mtl6D>~?bhOc|m@uNJ}a3SNpMq$#8Ow2H~_5?2`NZ5+C#aBwe-`tgyk z`jeD~$gjoRzS3yo&e6eTA^KYiIkK) zA<9bsr3pML{NtKq7(UUDgsDCkkY%4r^t|XI3SS0kMsbx5f7M6iNVu<#gJN%C{T}eY z1M(cP5z`9b7J=WxtSk{iheLU+-mR-Y2N>4lRW4{SESM5A`qjOni{h)ptMJV>^b5$+ zQ58S3r|J<(rh+SzPli)X-8*gg`32vzsgrzWpG*Uk#sPw$DhKqIbTji11<>%p3 zmK7`;;wP_h?qy+jEZ9BBvlP=>{QxkuYcMDx@$bjI&gOTZ%}NXDBMI zt~d~F`xAOk%t>ji&AF3mh9#m~&Rp(lB>dci0nPsWRls+ZN5@2X83uRsontQhtG%UY zSo28jZZBTqA~CzAXDjRoe$(Faif2#U? zqC5O3kRV;Z-cB__4|uhyqXplAW8eh@?Aia(b`Qa@wXxYMh-QZ>3EzsB-j|9z$^^Dj z6zgJyjc4%6O$c4^98WL8$I7?wLD%eS@cKXp5JW7 zS9sH|(b@euN_UoOFrF`|gr&4yWxKB=n#J);zNJ2f5MW#D*Q+X;Fc>Cwjgr8zQ1lF~ zrfCjxRPhsaM{br~WV&3RH@_U(!i^^i=UlFBKX|d@vn}*J!E?Lg7XAouSqTi1@80eB zaf!S!PE~V4jN@O2bZb2c&y#ms8o$2K)zmBzt;KhAF|-P3y7cX=n!LFfg>xH!=c<^- z&V4r}>c-^$4Md(AwXY79f6lQXpZ(s~TvfK2fh$e>R*mYsd^asZQRq10blONPI~&om-J8| zW87-Ri)itueF1Ld08QP)gw0Lyvfhva zIs{inO1Gij&`Rb;Ra#J_sy?w!`AOi23O!Lg~xig^Be?!-~zAyMUQPWL=5N;U{@)o99tsY0<=7o*h2Xfs3bk zX{8Ce1AGvHCx%6{zcHz7Uk}y??D2>SbL;|mIEsYXp>CYmK^_UD?VIl)wALU}#7GoT zyXI1e-xaC7LnVQ?l|{Ru$l=LJaUPX1i8sPpY{PqQQC=S7j(HX{Y<}fz`l^l%(gpVu z-=@dd)!rAv_eb2ZDQ+p;ig=vxgG_36$e2sZ)bielePdqhgu#cH-5MnoBVivW_NPi& zj1eP#LTPc00=ljiZAssb5FH*?*#BxB208~VsynWr>wV$2_sGb|){I3vRmgZqA@Hv& zzI;glMhc1irk%Xtm+6~VuJ#PXvy@rKuf_clD-jSX9YpGgh}iYPEiEnR@yeNpIr9l; zmO1xBwDChgIm0T=_wcr<t(V+O;dm<&Xv1RJ!K zJgEeJZ6LVWq1pDJXp(4dKxOiOZ3mVRU!>M_;YR zfE23s0V|a=tB1TADDE zG>elG&d^0P2mT@gep{$IT}0n%O>}b$CB_$^$G?<-<~q-gZk@mT{l>^TXqh57AIMA{ zRw)$(#|u6|kAS4NE8OWY&|0U^mA}=MOSJ)P>yF-lfp{;|5F_`T*2f$bXO`M7qqqvfxE;fwB1MQ}zqeurkl z@8{j>p7VUCkz?j1x>-fjKX4VWE9~&GD~WH9qfaK~Ema$(Bzp;f+mVbBheG-Y;}m@n zJEjeyq=(S*t?wE}3>VC%8_;enWj~Y%csL2;#s?^pQ&DA<=MmO^?r8r7s)UE@{e^~F zm($7qb9ByNlQB}#95X;b{2o3CUId;>Mf9yW8$u;pti~5Pf;Fr)&BQj*b3uNAH#GS0 zt#IERsrrgzq~+n*vkELjcjbKRR0gM};xV+DkDa~D8^y$LD-6D+y}{BHHGrwG&3&e~ zINOGvOlZAyG)0u}Rl!V)FcUZ45YSB3r+@VAn6`)pUiHKMz#KpSX@WH0OO-F!caBFU zztNyh@*s_AxiiAuoP3O655vRy+~({3jU)9Tol0c+6JI?1F0K+9sDZjw+>Mfauap!A zHPX*#oln5Fxe9j^_A>616E~t?p(D7@0wWL4kVNrcT7Zx0vz)`9ikE{mVTa~EAL^<` zk97*C++Q5)^~)j{{-Bar8qE5Gk$!$j$HdB-eBE^;YV&w`0DCROBXvEJvjBu3AUx?N zeo@hONa)H1d8*nwG`Ft{N@jsi7vcP=+>E(vsEjG%jC^2zh{T(tLW}0r2=VY2@K^aP zxtn7s5igCwu$ta%?9J_E|ekx zn%c;|?RR4XjNfvMIF@5GTEP@C(u{jXv%ALs`F^!-Quw}7%Lp!tUM}TBh<-e!P?20J z`(a2pCUMy4>~Y&kr^4Jz;;q_2%B0;KZ$&eqa4d?}Sr5l$gik$*7{*9z7G?z$Z_J^v z#0Xnrwbf2ra$4@CVOMwCs@zooQ6zOA?pV#Jo!S|s@~>q7K;?7VqpN_>A@p$Ccb=5{ zVE(@3+xR4}(V_!p%`Q^k{9nGKo6t!P0{IkxoXOJY_CAC$Dz0;!68^=eClY3l@K)!J z4zlt5Z1g2ici2?i+Nv9I3ZNuXyfk}eOiI!qlQz4rmlxqC98FxRpUxr#zTxigDYn#| z(LOk!nw=FtzZD~srBJ%{kF?ayxF+*?YOQ|03g=p|&3*sVSetq+HTBsoj-2b$4WWME z*VRAWzp4}Ga&J4b{Cd=!mkg9Xy8ENf-$mK|qTGyZCO_M7FxT-N(50Z7_oiy`eYXAM zFS7Li(Dl}DQHSsL?*!f5jdUrYG$Y;JEg>b{Jt*CRbV^Ex#DJ7^Ntb|hm(q>r?dR<8 zxxVMxd;bA*@x$;ub3bdn*1GHeZ>{qGzAu_|*9xv{KB|!fSM{LzK+bM!%t-Q|~?@CKh6Fm+6Bg0!~5hkTL!#48uZX}96BzM3( z=ZvYod5xStcq!TRuyhX5w-_s*=i3*^Rsu}?N)*X)Q$y1lD9Dgx+*&ti&(Y-3Kp1;md+6-TG7`k6#PyRtWOm^$uSWDnrlPzWGYX{ zm!p4-Jue~p6hucXL8l5*1sa>Q8I0z{FFwJQtm_mFD7cdIDSdd#&58fMpw^E+v@K+> z(>(hn)Ea+*x+Uu`XgAAzQ2gN=Z1a`n0qmt7MN-@WXLunQj>Al}eq*;YOtVWH;rv-3 zpXj{!_tTR&hIL(knZ8oO#ie&z^~%^H%7DJZ-19x0LAqcw;BV z8ZIqAfZE&Nwjfc7-THMmE9njS!S9;MWWfm;LJhHCd2{#)-_?4(ZI~pQzjfTC&YaOrzQ}5&`bBR?ZpLdOj*q6t?ay6(K(f8q_$uM&IFo4ra`|bD za-yWXT=6rqO(RnHy6PohS=eQ5_Qy>txw_L%6R54&UCAw z(t-Yd43AyhK=D+?h)jCCUALkz$*#np`FR0Pe zf2O+1ZFYeAdzhjvI7y9NLIVCiUb{V<4vcuto6H}G&1L#RwKsY%0j9O+dJ&x^JO7%_ zsqhCejxZs!^2DQp6{{uH%XjIZOd5}g*WHgc#xBckh4jj8E-@C>a~pf0Tp+J)5)XDyw?}n zoUzd|vJ7g4FQ1*CzZ(AeJAyW7;(i~fQAl;DY4^&ZsFi(S1j81h2w6%1yg!t2Es7;K zePw#@@#+5eq5H?j&T8R}NX83aWb?NUj>wVVM*!;JPO+Yru{Wzf(?@QKhOCb6(M#O! z9L>9rza_3SU%+YA zIBKYZ7)gZ`R}3@pBy!va-OG&3O?+#=It4(-A?q|V=F5tMZ2&5tLvY)>*@i>R;6^Y$ zZ9)80*Ut(Vq@Jg#?8bea)p>*&HBO3M!d>c8AE2`@-o3Vpde5tKY9AKpQp8YQo7Y+D z@rQ^5E{*j@9a695^2NvG0n<<8=B6i?Rv^ST{;m67q6A9EA$`;)958HHVzv{kJlI;x58`{4qYSyV-XhF6jpft|z#k%aYoKRq^2%gLYm%9n~zSaR{q* zGL$u$d%>jjT9mCY2dcd3SC;0LvDjj+B8>JQ#a#<6MQPHE?r#0c_(%_7{5yF(PZGCx zaXgnXTizXhYCFE99B61nFf3O5#8_`h+twN81EN98IMzAfSzGO6;u~+BAdoXE1z4>F z`;C3&U}IL+UrDep^yY~MqLS#B98OUY(;e34l6q1EUDbG)q zNNSGq2N{M68{r8$#SG)){q+Ha)A4Yo9Pg=00+I~IWH$k#~mmVO6ai{!9?@bG9Irj5b1pfjt3)@tlX4ssB!lhW8r zyWOiL=f58r7w)>AUm>f?kKV6+z*S8MSp^4Sq3zAI2uwyGh}PLrtgJBLD0pakuQqI& zi(-iVlO&|T%~(V`GlVYtpR`mBc=)ZTZvQd8zebKM^947ymDh8h_{R2(<7Qv~f*LYu9 zusD%Jt+MxkZWECfplo#d(S01k3r-h%y4i%h``WP)Kqq$xWk6PTjAJCdCu5O>YqI$G z0s!)0ahhrC&>D3TOB3`}a6E;HA{; z5%m~A$?9^Ah}arp`}?Rd{_4oTg=*z@m*aDWrQ-i?2>-uBmL`U_!0h0{uByILe8qK} zd_3*)L$GD7dHOG*e-AMdjkTen@sVGvVwH}o57QM}i5>UOAA{Z%9vg+&O_!jXZFnBd zXsEWIVg20g3MAh7-d-kZSeB3dBHkc3|GfxII*BOG_?a%U5BhvXKFjt)FKPcYqdQR% zIuj#Bvs^4=U7(-|nBnsLGFAME=nptnYKGw$d#}oM)d80zlaSDiA)M=DO7dO3u7)Qs2btr9nP6W)jEKb zRGd}wwV!^92TsXt)TJz_bb8MpHl?Yq#qyit6%hkgNPcvC*wpI0(Ho{k5Y@2fE<4@) z{HoNO3Y{of&&BS#n2xrMNgh2d9CL$u2HnCgLrnr+(dYX0zu3e;2@^<(@2p(p{j?zJ zpXraUji}eZoMx$L6*Jdq-rWeUD2omt zChmH>PBHA~-Hm^>={IX$iF$cA(VwkX(vF&dy<=tQQ|)o>sfj8WZ9DF&lFWbRR&$g? zd{GDp;k-ipCf|8?WR{sd#{jzOo~M&TJVCAFc8Ah#6o;b=ReLaqF*8X3wav0~{at@s z2$^fu^dm_oy-pMbJvX7Fexc4Oif7qoYpYK?7S=_Iu_vVww|h6{OdlqTU^9Z~0xFBt zZOJv6>B)9RQl>@YWc}6zeybeHyqSfcQBg}b`Ef_^+(KFKrNC0^p`+TKt)gUA z`)OQqa}78V1|4JpWf1A6+n@_20HreIoG?+gRJCkR86zJuKxXlzOwK~P@#xa(_jox! zjSoj`P2i515UHbCt}A)4gqkmQV@JF1)5@c<#$IP2uc1u5_s3B+fjL!z)sZI_slAs! z)5V2$ydQw43@rO$*cpU5m7=vc?x<7scuBayDTH#?BOeBdB(m)J^_BI4~f*w6Jp z(TME-iEECZNPXe}M~6|Bz#jpebF4wcxFo)W`n;IeJk#|Hgv3KuK{bO{2Ce|oV$E*Y zBW?aV3V7EbNl?&Xg>nSbBFZ~Eu7@VJS7tl8Mb31LSHve~mFSjs=oFAokkBm}$dP4x zWSaYlyYZ|`6x6(6f-FDwiZ909wZPI06b{yC@(OM&ikL1NfxQ`}L_=Fk64DyGN6pC% zyfSESUeGIXse@`msP2IIW@}c@Ohu{$#fece*(Rc;W-_|5eJ9g_h`^JimSCK{`g=;O z4fbPG38)ut;QC}ZFZQt|A?Nt4jn_Z-)=i>;`Kh6$uC58;a@xk{*Tj!%+~z~_hpGn~ zYvAPxZK+WXxMvRe#}v;vcoS`C5tI)BoiD!$l9oAXUJWY8pV$V5tCHu1Y*)gYwnI!c z#kWWDd4){(Egf&08rUVtIT>R`-p$B3A>5$}j?oiW;8?uxbSd|jHF9zL}mIUEpUlKC!RcF@!;v~>F} z2stumBj#sxG=^vO_ja^=JKsu0`fQq&>Zy@cl@Lw_ddeh~B(B##*&MzEsbB?PI2%in4FiL%RWLnK(ag7=Q6qu-I-5cJ>{LFY{_L@H7z*(HrA1v38!$P}A z`KLe`_CAB`OuW*7!(;@7)<=Z#A>z<0i)@g}OnN0gMkn~qk2vn6X#L{&sC3CS*5wj-Jv=LNYKsq$yfS^_ zdXp8xx?GMmQ5*koo<^yK2!LK1e}k{;_DAK9ek86-IWR_EdnqPrlrxR->sB~dCkn-L zkb;`3IxwHF+We!m^qH8io!D?e3ao$KJSclvgYhVL7(rbG`n7Mec+u^~i%MCGM02(|cH(c#Y`er2ABJL&AFl!DgS%OBEncd6<;rLTVlMPnxPq39 zLb#}6R0pgLOcq4xqemR#P}vRgUNJ1#6%boI6btbu$>ZZ5{>kHHiqhTp6NfdIRUV`P z)!KM37whPL9ZERJ592uOX&-U_V59bXrCrCLPU3{=M2Cgy+vmkB8)gvGI9G0jXwTd6 z+^YDPSXxkgyQDTFw_PlCY`Cz`jD)eCE%`<#x}- z&JA8m9J&Q8Pu%`y-G%zp+jL*L%N(pMYm_+dw!{(NSXq=E1+Ue;KV(Wt&SAn`edG|j zMf_?y9enA09g*XammR$GbaoW8Iy@dPD5om4-SA^6#L@998(wtS_SXDZnlFde->je7 z14jhDr|A()B8+Adk5m@FKA+*WP=(8etRw^PwwXxWDnU_h3TS zM^5j(5j^qswGOh1+VZI{Ds8R^Qq2PCDHU2TzB2o?F%{-Op=!#sV&MsA*s15%C|xr$ zu}Fbv$FilyqQ6KH^u)%*nrZq%d`gi)komdVSlVq0g^m=DrY_Ve%_yg9!otueUebPD zTKYQ|14l?By9EdT9o|~(!kDI7RHb62qLjvH+>P=D${58Fs^UoMozDkhdjV$2gQ>R+ zuk8{%ySHO*RF-SLIA<}JGwMkMV`)#h18==@!QF@q|c{gKhnQ0iZ- z6vbO2I%oU!>^zvdDDR{7M@7}DA(|tF9M9Gj%e5%;Qw?^FPFhS%+yBZu7933Th^Lg} zR9qNd;lZhaFAn||c*lA?ba}zTWgXUF{K(=`dQ`0oortf;NdlTCd&=OrAMu~HI&ZU%M^`DWz1%o;+ zbWK%eU!I!dP0Og4>whb|+GBJlS@3sE{QliBiaN*~ax;R?dP=U&hKB@CimD!ol|{PY zJ&^(704h@;h4gSXYCiNB%s#<0&YmCf_vyOx>}%ema?SB_%ei5i&}SZTm)6B}LTq@? zZ$O0a6mDAogOm-ChAh9eIEnn$8#eGML>c7#5|wNx)K>X|8AnZ!WjzQb8%(C9ndFB9 z0iNDrhZJj=`M9+-aDoZ^&0=^mj_@8G@`^skau?`W{ALa6nEcYkwlLeW%OuKyb+?u9 z1DeOIF;$_K8dVwmW41XN{>Lumu2)@#aDHhmVA##VxL*TWW zi{ay4av&+Lz9YxI_AR6$MRmcR&{z zH=~|a34|m?zb^Ywo4ig0N6DLEa8~%46t0*D*UgkPW<4%^Vd2Brg7y_Y9`&C@Egn9U z+((}Fi==L)FT^u-*+Ac%#Br^$_^*FE@N4z(3ACs%$CyZ z`Io2zO}aMjx!|6cMO=I~@ZemnUX{1ve>1<;C}VLNx;dXIGZZB4a*mUoc*6MT#LnAx zePur5j@w_AxrX)#{7&n9C_XWu8hBS_1kSyF5TbO>1)8(oGrSF%whMl{uKt>~}Je9g;#_?GRJ@5g*9G(Un#dz!rQ)tNPJW zO+qys8HG(9X+e8$ESbUibE^C8bO|L6na}N3=E$K!suHk&Ih@Ft#pc(DOAi zI5~{6ixEVl#eCqRZw{l#@AlpQRx(78mfo*Ia2$zAHi3^v5U-IV%5*@4Jlb>ntKDRV z9#bB3q2i?mPY)DIi~<=}Xwi_FeeEFalBg}M$9hDp7)pH6ZHKYTf}xIKUS3lpC(Qlv zrtkDKoABzddN0lNIEc0pAadv1s?%Neaa3yw0bma=>c|Y3uRD!o$^jdC@hCIS~U1fUaS*KGe%yjqShq5yPG8`!Vc*3ME~3 zeHgF?wC?E+2qMeaCZ8qu@DbNrP{D1aUlWiI-HNs4SgU$`-fD5{+w@WjY}44}6XR}{ zw2*eWGdA@^v#erUB%kNkNcf%7uH=^kMj%QmYLcsC$fvzkl09hao|se54o|b zriA@m*F9dh|FsPO4Pqcs`K*O#nwV5|Vc>{o;K-={0U?motply|8P|~V@u-5~j@hn$ zJ9o;cOz~NQVw>-`JtPh%K7WwZ_#va|#`PzIaCl=O6{#oo8+jnv>2qscq(vKyg8T0p&(Fq(7q|D>wi40XtK4}?-#{Hm|74?-fTvk4hnVC-edgy=)t zKmiUovWpYE_c zF!W315{Qf)qXo9cy_c_)ty}FJ_p*g(7tqv`eWZ+g5G+Q8Htu*Iy^}em~dATbXFfwrk?!1BRr3&5-xn&vVWcL~ELxAxBGuRNx;$TTkyA={X z<^10M17y+Q4HOseKUwEFy=&VEw{I59`8y$nyU!P(GfOlSAKWB{{VU#6M0f3%^V#-V zj~dBxA=u~q%xx=|GCpPX$?x~EkFEsGon2?JG; z*Nry!{h#PKJl0PEkv)o;k3sF&G7z z=oTAAXt+|{I5Z#SJX_7V>rQ>4f~Ary%}f8y=D5w9q_ao4PE7Pl2lHpDFcQVe94m#Izmoas6pwGMWpD zK5K2~Wq8h7B{2C((7MHhCaiqI*}6^lt@O@*_5`|mc|vDtznL43C0#thb70HR%BL)z zZ?9Fwj8@KAm9}ziEXSj`Ji2JudkJFS zSJ+Oy61j6Vk-xd16}ERgwuz-Vqb z@Trnj*Bx+}X~gPE@YG=EP@0sTIUZab=G%u*tJ{f(Bj5Vsmc4_yV zEmkT^yM&Q={N^7bAaAie8%s${7Z2`wof#su1{Km>H(6%=7j>?g(> z6`X6lh;{fO$O{(Q3~lOg$Z>x;S-2Ab4Y!7*sjlHtx2XBN>;TAahwonzPp+%SfS7lP z+y<9*jo%cqx_AyJJ4yKUt@P>S4M#EP57+pQzH{;9sV-byl8P=}jkU}1_ii(Fr4@yY zAM3)9ece{b?*~=>dSB@72RnGWtyqr_{`qcVy}n_;KjG=zauoe{$rOrTD zZth)D6W*Vj?LJGIk0-t+FF(upm;G-7`%Bf3l+T7gf6JLKyp2o}z0Voh&a&Q|E~J=s zO&4`t5Tksh=$`_7c;bWz@#oQDYr4;@Mu6u@>ZfJI3R90FO5?ZNMRZ3YWWVc^DsI}_ z+uMD#kG2fustPMs7#sNn3g%z5t;*y?_uVH!#^*BB*&(@?}RH*yNB>Bd5MyCUT- zi8dlqTT_9(I`mxmBJZkjpoRJJ@2CLC@YDJ7%x%;~OGDfTEb7cB^5lIH&rzZHt%K9z z{tA5dyT4jBf-89KvV;vkaO*;anO~u54Tw1<(9A!XD2U+{ZEYh&JgMYri;Q}AucB8s z)o7Iv1RHKGvmqnDuMRH}pZl^bHpbESE?=v`x(V<*msrFT%owV_+=!Zny|d+8P2&)+ zQOh)bqu`gcan)KpVMf>TL}jw%=baoXhCg(@tB_V1kKzBPZ=AOb@3f^T*ok%scMJW6 z^<}PX*;(Z$7#-`QyT4NDktX`RrRGj`;`>U70*W-s?o}E@EmwGWodBh%@tH1jV4xV& zr`W-0pT_qVMEN9skTkIN`eJ`B7!5iHU1T^&>JB`rdzF{?jT@m7FMeg)av%RqAl~I# z%qxwQrJX?`&&X}r;6OiIit@w^5rRGQWs-wRtu4R z(b*7G9Y*T$T5@dWUw*6FlM^whw81p#92jq=ssgqKR2eQ$PfuamT!%w9TSy6gX0pB_ zJRJya0tHw~MA}{SQ5IBzcNdd3gsPG-y^?GvH`T7t@sL08dr+pb(61=v(6>7aqv^3y zse}Q%6RHjt%s*)n++b6(!LW|>C1)TIrrTm7&|Iw=!CrcD5%IEl#jm0kbu#V|leIq( zM;#>_b>Zz!Ho4GGDs`(9YPei>7eXRKidk?VFAULu@3|L>MhlcDdWO57JYxT<$+&je zF*P>xS9PgiV&S3_B$SB$N(Cb|r^K-%dQrBQg&vV9wi$|@llVDmeQUfewY3O1#-96@#;9@RgsPB0jkEFv%^^5e$s8t^Nv}wAH+pzo{pF6-~kWd1jyDi+!gi@JRHo)_cYs3QaqP> z_eP~b_spb)0IxI1b^muZQ*o|ES!89Rgc$o`f zP}rh=PG#&|xsk3DuTg@lYGA|bg1Yn@LQ^pmB1pD6xgmX&zGOYY>oCQFx-T9r@|=sP@9L^b=^ZBKNX2veYQYpH&B8m*_>M}Y*I z$nsELCtdH} z2PvS)JQ#rYv*$I=zMd0#{jotU5?1FxZb9a)v`wU8^s1OL<65sw_Za!jUMPY!Z|NNH znEJ3jgSid=&_CRRD_UfkS7((<)pk9;SQ-jc8w2wsSHO_*Wt344aYpHv4XszA z>~-al9>-HU)cT8>*1}}g{fhvmx9NoRN3wE(`&&_gOb$%_&7050ftR!Y3~nVaucnnH zL3S_$zSI@a4!qa#0)*?Bxu>xF-UYu9L>+riy0!=)OQoRXs5m4aI^91k61P#3YJ&Dv zBafBEzCz!hcj1FlRM=R$URyEVfztA^Gx0`tDt9)xavYFh?+FuKZ5@smusD;@f2jZ- z;B`qz&)jS-q4@eZgsR1d`D@VFpwjLo0v-Ga%EJ3NUt@+rB(NQdnSPJ6&hM%wBn2nK_yH#b zJ7(Ww+HE3{kjmp=Z*tkOw;>JC<6Z+h1E%YB1YSIF@eao+7l9CA8$9+jEaWw_v;i7<{ zVQM`J-7tqorzcmE7T{apghl;`8xVWzgs- zhZf_B>lF>%ZvPQ(gD>FN3ix@=e9@3_+vJ0jB|gKk_|dY_D4THQhcnKUi)t2p7mIb$ z)WqT8pYt_mzDaLQ;&<=%r)ln_;bBG3KrYVFv`%BM$cOc#x}_kOELWu9mP>(0cV@gl z!^1AlE!%V7URoYDvHfrPGkxU6ZxACaERpM999tE|4qde$sUwE9A|9*S5AAks02H(# zAEu)fM|HCtRq-ovE<0dsPis3{qg4N~5<*!Cvt)RdAolQuXqR}ObRr;R*g>aORo08F zfLpWw4f|GhqoIe1vjvWh|afM6RMx$t}aqFG1 zLjyft0vlDaOPNJHT)YGn4{D2a;g^x7<2L9OB=N(CA_CxT_E zOUMW-L~Sv<6ES4*1y_3#7?1}45vt{u38{Ul4x(&_ z3PmjV#g(MyWEyz}1ie`*vd9q%mIWHj;iWNZZ4}f)Ev9L5OZq_mh1;pugTT&^v?h&h zJ!4TfmqiW`2@m{@CxP_cUI|J;uR@h`ep+kRDY-`J5$=X&l-ltTM@sdIs@u;?y}@o7 z%wm6qS1so|A}?=fR+30-Uv{wwVu3CGSc|J`I>dZ|Evv2g?ozjGB`FaNKlcZj?`_2J z=(5yb602j=A-SNL7lGSd^6aWXp=3PyvEr6V3UNtD41C-QU7R3B7$<`F7&uB*IZ3(A zLUfg*IM`p_#6y)qxzC?~?anO6lAnY+FV0!1=6s>h^sH_06`*&F()qO%B=tv;w z!6%2sT;c7~SR+^($(HhkMQ=i`7>IpOrgsY<$V**)5tEL}>m{L>WU87OePr5zv4-?>FxHYnW{3%aqXyEU-;NiU|onzl}FDFO&kl^;YXEr(hVUSr6LPqA+?XpiI zO=)bEf3F=|MI*v(De(7Tbh+RDT(SC28tI7GwfMwbxb1-Df1%|6eP)7D>cV>q9`l4$ zb;A#*3Iq1mmaLn(3p1%3IqW(f-@Vb??etFxsPizsb)nRci#*m_hCc)k(7uBHVAhwM9*vw|w212GXL%ViA^_JeO1S z>YTnE#$suko|-%rMH<4$ibWG?HHk?aYpKIljW=TALzg>>129z2X^3f$0LO`fd=m?6 zP9SwLy8&ZMt}(jbgC4CP;3c+PeRiA&V$A6Q}o1eKBrLNFY2K6^qk?A13vVZ|m6rPry#W*-oNPUwBz znyj};DOmEJR>hUkvr4MGO63I{Oloeu$u2|D@{L4_A)y=eWpQ%~bmWQZl?uk_%~mWQ z2%_YQm^C?uq%{vJ^c3uZ6c+0$U$!bHorXT6*uc01gF%i0(v-woXU~@R1xj*!MClba zUJPg61Akw! zHfSuA$sd&46SwaFEKPejZVZ+`uD2ID&ez(?!Cg%;74x^_WOUJK5@BV0FGbV=Y0V_g zWvL*z#Gu2$#nRQ|HkY94blE-FKV*1(XrFFIQ+;#Js8p2D`3>vO^+%>j-l$fBdj5K4 zOZupkXF&wJFJOFcP4jFuN$ad9G4{8(G~n;vBH^hN<<+Ed#;5GxJ_fWm{`H*<)5Nbx z%D=Jpm~#<(DIjVzY)L%zYe@%445&lKbdqgU@x80{Xuff5cTA^Z>>cSF=4h+^z2aMSaLquoMEiUraA}IQHU{;v`zkV zI0>H_Rle7XuZ?H0Jx)WunZ*^H%V*~wt&TCaCOx~z+eP34ESVgD_wFTMy0>qiUr z0d+P?n{4ws|5`n|m5%r}QEbLcXwepoiE^gn#u`GDfCbS=8t?{HIUXC2w+@ZNq6vG- zFqSLi`y_E7-y!XpJmSH%Y1pt6P+-6YW21XTXcxUX)e6rBV&h>y5G+6(*y^=J19ayF za*=c|Gk&&ovG7PGc3}RuNo&iUvG!iEOW@V^%3sICE*ygz<)f8u;I-f?quTt6S#eC z<=b;97G-VEVWrB6(!-X`wH@Td_J+=`}-G3W2LaFCp6$+FQ1aM12P4B;^)ebT~z| zGD275&f#Do!Mu3ZNxN1Fq%8gRtoV6n&Cc=#f~`H*J-PZKt(r}uV#aj?y%AxWwC|wu z&T^`8M_+T?TY(`%8$Pysb>P`c)cUJTJZfjg!Gyt8Ptq)8QBbe*!ZWJh!A&)lB|UyG z_S*)*$Mk1C=xoCr%3vg?H0fo0SEzNEw4XUFb+vx^DdCWjdV%BO*G3|@}zH1ktH26GROy?<-q%vX*`D!KwPbT1Hw3? zbO%>?T9xjOBw+D>`%aTOMU}2rgM)n!tC}0c3MhaABD;Y7+BLc%*W&Cw5^0i=hdqlS z@7;azE5!19J}QR@j`{9AO@KBhg7m03sd|K5oO-1JS;f{GK>aG`+TDPmYNsBfvl7gW zEUDZ>iF)v{(rQgUh!kIA4Y-gn+9&CcK-AcqVl4&JUktqY9&#WUp+#nClw$jdW`+B} z$Ln|C_3(|tCX)n*86(b;kxJG}73w^=b>!eipgVp9j0+*C0WaqQ+rqI0*W0XNj zo?xwpvhke1w81tT>>+lbI^`MKF~Kg9ex?y=LjsdqprdFw6rFI!bq^rcbzLyw?>D8} zz%`BhD|3|`T}W`^=PDW+>=xcq?0h8?f*N3PaTrKxnX$*}Hv0Tl<|fuz^NXO}alc}^5Tc3d)#;Oeze zStKh5SQhqdY4Ldm;dM{R5$%<|fkB3>-V{z|zV%zIVt39i*UBFy7Y>VjCkE1;XO&)% ztTAzLvWwI(QvJ+nz4+y|;gjf_p!joY|MDuRny3S4&quu0Oc@*jTG^oK zZ^x|k9cC+)zdrdj4JW()SJF#-a1WG%&fXqYejS?<`byezJ%@}b?1@CyD^??L;#f5s z>@SXsIwEp{kjf3%GZZp!o>>(Kntz%aAh_asOlc;*U;pg-)|j>zNx6n(d*7|NJ?h9g z?pHV4->Zq<7YCeebN&qe{r&8_UfkJMXYSHJdFg+Sw)`WUY-uq7^`xl2PVtWD%J_Ha zOA0JXQ{X0X{9b!iXLQ6_3Ca4>6}qjp9dfFsSo5C7?R)w((|F#uD6Yj3Z!6?Tw@u7; zxs@Pedjd}a1?gE6bW$!1`FApHhV-c+71Z&9;`IDF?3=T^ywCM>k|W2UiDgl<3dS2# zORFaIS+=|-r-?1O@5JxjcTpyoU%sr1!xUYld?^r`Yv_pfdOr&q%O$Rjqu3<1)f_RA!RtKpJHN>A5z3 zMX$1on@P+57nQ|4s?5)S7IJ?UP6LTNx#^y-Maj*Du~^l&yjKwu+GT3_8gk@&zY90B z$#YpgOOt7;3@i++YEclx@$&Jiq1@d6^RWg77xUetO7%^Cxb#X&S#a;?A*AfRtU|Q= z%01h|QBm3CW$;@D?>tYvv>(}EB>>LKlo>n1ZNNwrUjVO6=c_h>t5;4&`@~%-AlW*$ zsWvh)cqsaW6CuuthcjtS3dRg$E9fWb7(Yl^0S+CB+=xeP$XP=brfN#`{oT?JD7BrKq7_X)0zi|(W*YE zJM!jmn#D7?sSi@$9fr-)nwrnb7X%linw>+Pt8%DDe9Cd@Mn6zSzJSg&x5P9;Mxk#l zv0XmdtF%$q;{TxmQ6O4<`EAUyoKA{-ay^)GL)MIVN<>|E3N)Y@&c+zCs}9(WuG z0^RMD3XC~cQAmpjMge$%*I$$1Pg2*7fA>nMIrqI@g!$*OCrg-lCv^8AN>fvbhLCMe z80Ss>ZoFPR&EcHg>UE@%nm(GwOSr{8TvxgIK6TbKTxt57QtFxOUW$_IFT06)Q09eG z=l;*$RRbULzmu%XE1xUA@%vaAM}+#nbeV-Y3}cT;8D8sQBH!x|*IQZrrv$39R?_CR zpL}9wPrtC67M>VrM6pe19&pVqh-GKB-ogt~73G|xAm*@FAE1v!Cz-?sbWFLDcJeP3 zCx|~iJ*`Zo+a{5JZ1JOU0lK+(a>Un0o8t(daiiZVT&453_33vwQPsd^fhhllaXfer zk5d0&pAHHM!RgS~J(GiR(kJTH> z9+afksw<_d0?%&}=@3@Twm?dLM2~8L9OU!!-aC)dj}U9)p{rxYMam!%U@|dI&99mO zg@yJK2+fqfG!74>Wh?st+j8wbdn;|1$WMMEdvEefouvqS8EJrmFP^?!hA5^@UZ4zU zS7oW|O_#N_b+z(LSs|&z98g3s&~pdP{7eZVTGNqf)1(*cl4vlZGATJ{yCh)?m8pu< z;lM*vZj_s&PReUNd#9ry`r=x zt4U4*@qTi=3r0)*N`Q@Ds(e!b2u16>=GU^lR#EA7bNxRoz%(vKNZkP2PJ}n*eGiK` za7A&&qRLOE5dG2$Bz{|wR$`TGIS5~f5r|8K{%X(h%t;&2 z9V}SjH4%ZgaIa`L(Bh{b-CySh%8>-WQ1iRmW19yOSxrIDj$9r$MZ>2j`Nl=3E2r`C zpdVcj2YA~Eanh2eXEZNzdUx6{rj&j_EJh=2JKL`ol>kYjV{%M`S(9)Lx0%ie7;O!} zgnfg7ZR2mYRwoF%SV1kEq5dSSTLhQ0*?@@yVJWc1SdTae1vB z!Lf*>4Cw`nLoK>-+bieS2zF{ zwEZedkl0oZuh0)Hc)gMI$!SIPKwc&*_G@yDv?ZGVZL1;x{*NQ4T2%8fx)Y1`RFxN& zN`J4=_ID?)?OwOnHc$64p%D|Wwo4K&J|>|0JZ4X1j<24kCwib9CX!|c)sK3Ka(7>j zB2B^#ypb2VVp5cFo%Qzjj_IbmzcQ^RvlvvvTg$-7V2yQIlrH4KSA6_D#Eu!UBWRum z=R-;Q8QT5jn)!z2wPR60X%FW)LyTu0C??~}57*C82M1v>q=D*)@*mM-q-IkU&*>fH zX_LG8h-#x1wL=;y$M0gab*mEnO&`7?rTJ|EkW-4%lWXzCQeghci$c^|3jYK#knJGF z3)$rn&m@8reUr$J)UV8jDkQ4R$1e_>;o;F6a zw#N)BCH35=>uHfW=ojoCsbsPYB&0m1l^+{>$2RPzHL)xxGbO)D(U;V;wTWW4r+oVK z#b|CP8jt@(zIT@z2x>c;;E0F=`92-c#05}{{Rzl+wg9ID$;aNGXSp+S&9v4$WDK)e z+?X)x8&mk)+>REgR4%u8-Z-c}JD@jHf91#g2gf1yKbFWRK31QXy>@%f0HG2ceQ|HQ6tMTK2)b`wcXWF&+O@qk~XSwWAAas~mUXpn+Gfo~@ zp3QggiCP0g#O8mN{%t>CG*DpqyA20#pSa={m;R~vJTO@NEpHyB)GEU2{}fPth3w{f zS1acV$82ZA3I>hl{<5Kg0LszflO~`AE8Ibz= z0n$Zp3tU++R>eNEq+0C*4N}o$%_8W~;(@Q$S_lo~` zwf73KVXXZ)xePgkcYMtG{1({YE>IHio5=eA(DjyKZ7|xhFa!%yAZTz2?zF|-DK5ob z3KZAk5=yZID=jT<#frO^;O_2LiWZkr?8|%ZKKtzNx##`_9teS~thHul&HO>P>`UAF zi&+)HNv^O=+&#@ikGcp6tP59v{iaNjC9;siPq9_mvS+ z;YzQrdR(n|51%uilwJ_^j;I43bIuAvTFOk*bp0!q4PeAtDi;#(IiC?y5J8#t^TA+! zTDYbgUrLRq{SHUO_K4x2tv3y&P#t^p6aE^b56Ff)tRs3>o}74oK=PYgU-zFqtRf!M zxdov;Ickp2o-rohL21JMxul|FaXhcV1ik{+TqOmQIIE3&087Qz#fq7>AlGbpVr{bv zao-A27%E}SjR$$YTYRq2v8P4*W3%|xeMc`1vH#{RdBbjh3{9YzJ_-U)h>3a{&Z!mX zspQzNYG2gwfrOrNa{H_d!-z;V5XlLY1d>hI%0PXBmt@ZyhwWknLk>!cekSbFF&ELT z*P4w@YJC6Ajskwyznb89dC1dx#F#1OlQ^xhr03dvP441WmCl#vA;pz=!5gT5+_LG{ zyw#VmbHN*O6w!n|x~$p=1#|>miKm(ih^^MHZzmfh1nY?Iep^ZXKjy6ei+{7F3CBOK zuKwFN&HJT+Z-Q=ZDfC5^c~*a$?wd5R;J-D|=(=|KR~h_Pg4T{nsuqr_#@T9?*j ze@CwD67)I#bFo}bO=h^WI)nyKgr@)8qG{%YT;9{*`mgctqPqoaU5PGM@?`HmXnNUiY$z*n+H>-c_o`y5HTIf^^a zk1>0mdx#Cwgi5%Y;O#zD`_j7W89Y&i9{<3jV3f1P0kTD#aB_}18>Y^6v`2B5XTOR1_^EQ&>qDJPWNakZ9qsVSECppcaJv`@`#e>8MV=g1mo!;t?AI>31J>-vtnF+)#kF1IC#`bMBx8)}-INh$6+X>3 z<9xb(e1%4SfQF=~+vQ6ZWmB8xb{vy(qb;=*yMsj*flV&akPmy|N!A*K3ot$+niAkM zki%98Mc|;}h@~0%J{o2C+v*xtQ3DW8{vByH^5`nNIu?Cig+@%vnUg#x&TH}wOwWN+ zF{NCZy7{BJhl_+u?M^Hec$Z0D5dkOEE#xLgH4a6EcU>~H1`ZK9z@$k8wqA%)f8L7h zwHF~kWwFQLD;A+& zRz64lTC|k#J1$v255PdY)rWf@aB~*aEVRZi>JL)MmyE!t;2aF*_q<3gEkwl^`Zbmb zD=Xu=Wx1Ov(dD;;vra{#fLHf`4!y{vGWX@3AYkbJ%YbG`d4;ui4rHSLtNL+zy#4)G zE!u5re?jjrP9F92D})^5XCp9|Hv=`h!6HRn2-_PPF)khC%Ae|%- zP}x<5*>i$zDeMOj`ZqKRnm%AANU$zE-dd9Fu^wgo$2oOI#3otroo3KdslX=yRsrJv z_5uUNhM)2Td0+Y2jn>}Q*7E+(S#)e{7t*wcZPN4mCqRD-pj^Yh);=&VR8#EtOH>v) zIXQl(IWD!-=NQQ>>K~O~>8U{9BaInYvq^+^V`1_AF7x-t3xRxI#ns3!=xsFn#-6TR z-ng#^-)tgHWE2`tElvGgDb2qIY<6n+$S3fwn7`lLm5ln3t%mq89~x0kv8d$T@x2+1 zbHiPP&+fRM?Je87(u2DW5BAzH6kUhBNI$|om@myt-y_MI=}l%Wtv6}Ds$K?zu{0~0%!_c#Oz$56AJ9w2nYzL^ z#AU9n!s7e~RrL+a!%4iZ_90s-3V&iw$umdaHK#GnC5#hpScx;_vtpl4+{S9yz}vs2 zlidvXl+x618IpcK<4|O{T28#Cpny&|$#A~)kRD?rYfK$gIb z@^9=wf#x*rBR1kMFXBu%AGF*6P;*L1%U<};HE_KmDmo|iXJvlYP7@n4g9^{U0^v^} z>b5IuP4DYnFUkIUbqkvz(wnVcfo`1T!5jwn6#5Js#5@ZN#{?DcHkeKSga%0JCY82XzI^uTt(G>TluS zfWFgY(Wj{|;0m=g0DWdt&IE4Qxhwe+C=*IOy(_@$$Doqr6W5ML}pVxGi`c#qpkPS(>@CCW(l ziUH`|<^eurYr!{Wa467-%-^-C5s*o zwF%CQk+0QdMrqaX92?aIzu)%ul6C&&FpCR0V_(rJp6O zwdUg^b@=smxZvsejHw3Fa0T>a;BCgw7l!7$h5(1wjAumE%Ja;!D&ALloFuRuqRjjT zBQFf|gS-kf>Wj~w^e<>T&d)_Q zn}{&was`y~!P#(mn=%vwE>TPVyhK8(TtR>_de+|D4ko%FW0eAV371or1DPw3IEpL2 zpp>*z==g)XNy#;)2-0wN(DqvG)w_!WU}|k}qXJcke{#;uJaQ zX&9a#r-o5wE`dc}<&+I+BmL%*FJHOCJYuF2yuOTdJ`s)Nm4ab}i{De8dENjoS3Nde z_`rC6wsYHTBV0W{OHcZk8_l4(#-5=O{Y^@bS(SlrM>jMJrkUg7Eqf*# z#D;9CrQH0;xLwnG0*UVP^9|t)o;SOg==j@4FKRyW*_MjY+DLt=ynRNec}Ez}LKX6O zyDN9QU;ax6hx$1N4h{~|wul#4ciL}#zo>vTa~hb_A!f&j_r@^gArHAfQ))xvW(#$1 zr_FyqH~&ZVCzn8*wP1&A9`PKfNYiVZv#nmR03#^b?c(0d@UzZ_JLlMb?i?cixADvGo~eIpry)pJiFGs6~>nn)BFD@8J4{P2G)(Y})RR zT^85I8x@_68#P%r1nYT@cK>duR9t{c5=jawQfF;x=(216I9OawyWi9C%srI&KiS6QEZRBtTW=RVJ zd?;+tbF9kp_4A>qGL`1Nj{< z0gTKP+XYewZF4n7(yge!Ui-)O`s>$~k%z%54+R3C3*a}(G`E2*UqLT9sy^oo4GKIk zegEiAnYjYAO0|qo-Hz;i1KNmlbuxR#0C-ca`Ir@NXga>_ZAV0(0Qy+QuojGAXXD`v zb!nA;7gH$gpXo)|_5AI~o>eTU+-N_A>&R0enu;=z8LwAh&ZHq06TQ6a$_ii)B6*3R zm!`_?5PNcCgxf)~G9I`RZTdeOn8ysewgAw0#569IU-luLlWWOyI~RfP`=ZHwVTiz) zG1``XN4q_ zD&Op8c;V+kw%eOtQ?1b9PK`yk_zffr>Ux;sEO2RY64iiRL^bD}drQdi7C zaHzX+Y+u`NPho5$^SQzX2~>F>w!mxkOG_Okh!0=Pqql~1+B){+m||7I=m_K48W4UX zP4GxN6H(Qac3gk~q>KmFBTp30=-17-(Cr`_E)r1)&hPh==##=ccu0CXx=iz@2-U27 zuULUCN;@jT9;i7f5`(Xb?hAAStzipzgZ{+u8N!xh)wwB%Fq9Lc4r_stll8GVV6!ox zLN)mJ7puV}VZ_TyTfa^w`lQ}B#9`mNw3)pKrxME%d1$-18yIo%}Le*-6$mTjIQWX zPXh_w7<=Zt$LEPtt+Nql2-Q@_W|$;}jrUOGX&n?3?y(J$uxi+OJ8g&TfBkH2CtoOs zfQTCFi8)zX%NH`}({qKt{cuf$TGv!Gx8>x`U_;Hc`^KZgoA6>X0Jmuewd@_v3W?28 zq-Wq~;dhgI@bNpxHjF8!cZ~>>ZDtg*6SS)L4h@W2&fvq3}g zC~f<=@WFZbZT6VLSPHdi@NWU(?IG>2;+y8Egu~RI^DNq($XlMT>C7ugOJ}h|#ru?r z))*6&rTM2*IB$6S2pP1fU^j7ER8}w70>jNC1{ZukF#tYX|IQ=5L9~whgLze=_*xSA z^FH3h+sn5XV+C&UhWv2nk+oP!*Uy(@jT1^+C5xbvO>|-=oMW_C0$0)(p$M&d6C)E9 zs*!f;p%=qYV-xlX1`M#&NU=DqH$3p9oqV;0U1=I0y9kpe8D~P`2x$Y=%~{zNPOK>o zLeexeBX$Iq6)NoRd^qArbE++B(Iz)w;i}NRC9R#YClzff53oay;?6-0;CPttQ zVo?4~5m_mfb^2vjT!-#5uI1SWsv?HxSCVyxWlE0e-zz(E5V%h3U0D0szpsAUu&%{< zZ7Dr|Se$|U|JLjO>>9Oz(Q7hlDW^+ww-x?VMbp&t(7R8nP^$wncH~xmr?=6?>n8us z<cnBqC@7p4U5A$AOW7y$mXq^%1rB5R}P(TS+rJ5cb2w6K9bF)AzUMdMqczj5ZF zs&T;}>Q|tAGKtI~7JdV8ZT+YOVp1Jgi>c~oRFFGT|} z4n<+AWKfpS@=D=G=Jc&i&sKQ?MRB5mL)U28L`kj7Ey~Yhl&I&xwEFlRBhOGcHnY3} zId^=i_0M7x*vrbSJnr$NV|#CqO&Jpd7ukHP?Ti8tc-n7}9sGgx&|otq-L>z-7t`oF z*N*uYeLv7B8mRZvH>{l|8RG283s1t=e%UtHMl>!2!b%lT?$N zIdLeCj|k-ow{mN{!0>zebJDf&wF3Mu$hYn0TrVm&Kh$?<;+t5~k0XYNt2SJXBHhRM zih~8MfA)h~$qsx@Z|wGpd?C6@hqb&C#f*x}q_-rdgVkNmJ$if^TAJKvuu=*1>s$O6GDQ2YG1{1`gOVW4s-DgQ@$lu%g$Le<{M83-msCw7sYN zd;MIU^U`mQ6fPU16;7IJ!=s{7NqGeGwMJ8nJLx#{(u8_O(?P~n9>X}4#~l6K+f_8K zH-AhoLHzLDWaC&pE~>Egn89(6>V_LWWLJMNdy|z}tvEAoQ$43+?zr3xD`W4f~m26_doE zC1!Nw{UQHcrA2~ABfNXB+U9gZf&lobVKxmde-5Q|m;;5zN^1OY(bE?@{LB={eeQET zb}QoBUW+QY(32VO>Tv|={)pUL^7rI~Yw8_MeYW-#bKtIi8Tm8jP{7^0W>{wny+`=A zXvB3>XtQKoW* z85%kiy_3n7o1YBzE&OXnwsLz4_(?vy^_jJwQKWMlC#Y^V@8pHYE(F(Ov=`0ePM!7l z#aP>^IcP1&p69sZDa177b|)J4tRMS*9=}(gfYS0if(19{j=?`3k2K22ket`^GhM^O z7bnJg{gHqUlFA;Jbt(GveVPpTyk;oQ8m3QQf8YLhv;Pa1MHUP+)B;Kth0!1EH)bBU zB6*5w`q{HnGF^im8c-Fl4Ae4x)hu*y zmG^|T8vQms_ADViBZ?dtwn4sH3w}_FE+M7OgOEVEWYxUQkb4QCFKZvgvAaG*nVjAo zKeNGb+1Y>VSl+i_t&j=oz9(BhnZ|xf*6&#{#~9HQtst5`*$MN2iXDBP>cEo3CEAG9 z4ngk&vV+u|;s8G%_r2$~T$BC`Xo%9i6iuI;C4{>Ne+`o~Y7mFcfs8`?@Q$o7F5}ml3CHiN&4+P4`k#nJr@@qQjHA?F})%_&VEVp zporgtb|XcZMd|5`28whaQVIlmMKdoYB)w*rkSdr zQ(Lvz>Y7sdDc#i8e@=IQDt-lsBqbraZ|tBiS>^oiBHEDpjzHeZL_ebvq;Dz39hdN& zGq+rzW5zzc>0+1L<_~fZ*8qd#a>gW|v}f_h8!lju?!yuniK%8#ESAjJLtd&NKQhsg z+8>v5m43(d*^^S-0A5Y1_pW9VPd}Dw3p)%V8rPqQkRWiEz`W`Euf=?q$E2~9c5zZ8 zs}C2`MR>vJ*=-W|tl2MU(godQJ|SRmGKGi zB!R(&&lwrycWq83#bmkALX+;Xy7kJ1cwN$xxw4;X}{n`Fb zjHyk^T|JW`1>h7Wj+$ICjK?6N zF4nogMT>ZoI$XN5Mf%&<}&q4H|yNP;%Lg;uxadaD^G8(a#1WhFs?yEt)6+~g`8C0Qe%G>WIs zS+FG?)dBqg0#s_`M#8HbTm2DTzyb`12aJ@wLiBkKY5~4u%vzvNHE_+P{tfbtB0}<) zt$&<-r$5lJAnhYz8No-9*rNhA-`Aa*hY(R>6sCoS-ZYFjs;#hh({JY)o-+~=bleEf zRhmap+;l`Ru+l}nI1w?hmWlGaO1c|h^u98=Fcu*X_svK^F)nCVu@>>)-GcLbD$XR^ zcUgEV=Cg>IwGtLzkt?g2?ybDr|11$>H-VGWZ_k-A%jeWi=obCa%L${$FPkxL)s~C1 z-oh+f%PVmm&He)& zdNjiL$EWVor%#sd>soEdF)Esl_AmlkIC@=YAvF92+7W> z2R>7pcX)vzqf}p;-)NW+1j`0&KGYWaeq$9-{HK@Tkml@2l%L#`B`A$cdunxQH&M-= zPkuw`z}8!@!5!xb(%!lkLw zs#{YEgs^xWCzB}*q`PQT{*)1S*heh*(g!uC25I|8V<~7J@RUy{iO;yAUK!=3tiFaY z29Zq$-6r)lp)2sSk0E*jFbYKYT$ zg3PKZBujWKZ(ogW^D;<4a-i-XT5ml{5yoWW3vJxjTu=QjT^90zBt6oMXV9OeWeU{C%j^`ouk}jt1w96T9g`(@Q6X*2e%K- zueb`;9U=E~LfwLPN_=5kII-*GCqA0h8KCXr3jSoI1aZotiw05q3oenZjh)JiWJ6W5 z{z~UXlrPJ1A?EGqa2rtBvMVR)K8n#z|8ck=GP0n)d%H&jzRLdQscUGyH(B9WePB`It z+GAINNWP^TYX*xG5ffSQQJftv?EufNga`fy8A60=LorPtLzJ9^4%bT2H=(=ZJxJvu zibZsk+eZ^}y*oc9*2#LsIvwGGzKiwGxXvO_tUp+m7*h3_*Pgmn4I;&!YvZ7;zGMJvmM3%mj;;F_Gbi#|q z0+x8wac=EWk+|vcmmdmIIcl_b{r|^JV$>O z8b=}K|5Y;oUSB4@ht0n2-AIk<;X5JQFNg2mPFioaS= zbKoQ2y99g-^cbKJC@;5_Lbo`4X7?`|88X*R{WlV|=ArprtP zj-QH<&5t#ZA&Qv?h#L*cm^wNB*@0a_16H7lSB-q46U<|_KadaA$hf!dF0xT{L8`w!4Jns+AKk|v+S zCJs~++G3(7;)IdL7^H}Z_4tgQ3EVxCXekDH=kXjxoq#^#Ox&uD6jJfTv@o51a^Xz$ z(`KaxPGdku2;s6OgRX`{3^!qatFtM0HC~GyRT@?tFlt{yn~v|KHIXF=Ex{8=+dxUABx${7Ss+LgZHC9~?H zv@g%dr%Jq_KkdCUIdPWGEJbB#8JptFoNsCz6Mb5x6r?v?H#(-z?BB!e3p6_{PL@F5 z7tNnsjYf(S#wnU9dz0WduwE}qJWi>EzX1hZ2HSCe{`mO?25uu+J{jq-ej1I>|K6J) zVK|rmZnnz}!~h%qm)3-TBMZZ%xOVN&5eNO&AiUv`k%-D_%9D8G zCOWJAHmfIr`&+nC(;vd~jPI+su-5MrKRh#0x%tRcZXitC;>Gm$DqffaS}2v}rl^lm z(Xs9IDP?}xr0Cfpl=YbI1CLA;tE`tM-%(l8VAD;5u4S!AqJiQ2`@E%w>lW|pPKqL? zyyTOl{i!*e$m-26*d=K-OVMZ%HEzB;-nb}Q9IH5 zv)(%)8n0|Sdy8`%*Op(lD*@}K_jTSP*4E-u4&PM%ca6$_RUZOMvOo?N^gvuSiOyc2 zWrddEi$zLQ9lhlmFq^s#YSgg$^YD45fK;fDJ5}JG(7f|-_iUkY@8qr`fe<$H zK`82Trwx+I<*yZs>8aOBh@`PBT5D$U0yRuzA^3RYiA+E zZK|rT-Y+`$ZS}%HL`0s#QaiIN{~noytx5fYx7FHySgk*;zkgDsoTwLyd`A^Q%!tW2 zRJjalP3V{ccUB=aI$-U1qe?Rd-6QPCRsZ3^ukNc8Qc!cAFI0L0aSIvR@&2%ejYHbFj*M+s-~#BSo)qL01Dgrv~cmKHKD6TPsv z)1lIDe6~^Te*|{9!#l43pvB*@ViEr3U@z@Iq$@+LR8EW4c{@6CknxyCXT@kM!wI_D zQiXi?n)BaID9pFVT#WBW-DUs5gpoakMWFADg}ltnKG6Gr)z?!YAPR&Rh>7dj;k0bh z>t$fx!z$R0k`^#xfz_9I- zC-Q+Qg0E2ufJ`7YWaKh^@VjbYa!tBFC&mUgfj*l{Mgc_|xo1l_PmE>(~gef$z{8c_&LiK7Z2 z4Pd_MDl@9B^4`}Gzn{t9qnsr@X}fL+d+oivWwzPu<%Zp7_*&%qzP-E9p1m6Gt0VR#QPa_8qyX&c_Iem8RnC6|d&*s$BJ zvSi5J2VA~$+XPj|K!vb_bh^gPR0=6bN0s76)fZa2_o%_*p{$Ziu#LIC%jae5rs2`< zp&216Q|o|k;dO&`XVRPsJNp+6u6L`QfuY72e*zhrm4JT>-gWod+%bM%?l`L{<}s0dp7)UePxs;R6E-uzNRm>0mEGUia&!|#5NoE-2(s7 zrbTx5eTLf-%l-jXY%dO;SWr>1o&JB(s88A$kR^e8~!Sm&lw{?stmM#EifSPQL_T< zA+nd57?F%KpWcj;yT?rE)G^5BFt=oZ4)UBxx%0g#%Td%k3LQ{mfLw13YcVimpFiTK zO^TE)XtRL6u(#UKEE4CkipqePR9`T|rsoBnc<_wT;Cut3@E1%?pY1t`>;)bBl@&#k zKQD1sP3sZVqG;lKaOIa8RMcyQv9_4;&m~q7ZDT!L{-|m`IyWz<)u(O)HeLzbd~C1T zJ3bx)`M}i?LuOZNCoql}OsuqX$iT1qmVo8UzrHC|>1xFYZqT!|yWpBeA+Fin`Qr@5sKzUIO%U+va& zr#PeTxk_$|ek=U8)lDGPLm+h@hRk;E=3Ss`>p0d(wuNxNws|R;6ZW$6?XR- z8@JWsHxQ-12?qGF?Y*!xIsHc7Gr?x8@@?!e#w-1CrN|xvv`ko;1i8+I;jvVxbnkb^_w&nXkvE#C z-5K1se~vN_zaftq9-e7-oD+WxjIi$0wV9L+G)w4LRl0Ce%srj1W=DpxLAZ3(zi^mN!%@ zf_Wu}ykF`19C0GD z_jsI&rJbc`symG8<^BV=`S+jlztHox)?4DXx?>t6$P_O%?$}e#>eb_OFxfBSa5tWe zU_+rimVX9INS@Qdn z0x<~k)p8(ZeUq`)=4lTvQG+$V9pADsz3VHMa@6)w2@Jw~F_P`4d!)!S4Xig~ zoEZoOe{JHN+~*vcc&5>)%Lu*|g))n}g1C@Kwdy5ZCuh5Ul$KdllNjjIDx!ckAjaS) zWkgR?KaP?>84s1t2<<#umkN6QpLN$(2*silP0?P80w27}AcG-IjS5WV;PawkikZjS zTiiw}V_s=Hj>Q-E$TeNs!P_AJj!deso1CV3?|EO=exag0x2*n=HX4l%lj{Uqh2csz z={xy@)*;IB`reNp9OpHl^M07#+QW^c*eg5Xx*I&Qnp|1rxsqcgvkn*$)&vn<3sU1> zcx+ZUWw={1Kxq(Zy~FQHIdhuQypsL%6vRt=j-U3_MTe__AI^LZl>90MTx+I`;_#d* zP^``(!BBv|!Jd2H!<}QWt@@by4Ywkch_Tgj1N`mk_YUQHu zfz3xBsD`{DJ6f>4IR-s< zgM^rvFvS~1xNfnEco#9e55zJ7C z{E@wP*=3yv&wB4NZsra(5Hv#BK@%IKH#*w0VIsJK+xg8@(&XjAvr3f~k;It21172= zV|`bRp+D9+J-{r*%;Z@hUtv|JtgOXk6nuTRyO!tk<*SP?G{Yd0RYKgIAhUVX_~ z!+E$oqW3^r_J+q(@>!hz%ER8g;bl9kY+d``qu-o-P3zg6U%%LsSzQ-NZ6HN_2;|!^ ze68J@aI&75EI8nB^1NhvQ}QIxnfCsn?R3$=i;09 z!mn|tL+{Vr4@~{e(|4Kek}6(v$v$^V6T(J#wfTm6=;Wd4w=nCXFu27v3I~G9HV;eF zg_QB=Xxf38{N{-Y(eMft%s35cu;{DF)EF(D41N_vrn3nYq3#W1bhqXU)-TRF(r%O850AfIL#qsD<0<)QleAu^EpD&$tQiQn zeG1uIA*mq1$bbHVyLn=oRAv4Zbv2N1vINHC!2cIWA~K|Pvcl{&X#}?v&+k+v}V*c3w`@G2U!(c%+9C)&1pJJfri)0t@rJIyEzi z$dyB7B^7RG3acc(M#5YRF}s@c!lJI#24pg|`tub?ab1Q~cK&;c%sQa>=Y`>(P1XF# zoa8Lz-mQp@d?t!#Rj+E6p!XaJKm+%xN;(fsd{ z1AD|Yf${D}-$k(V$Az)*i_oSmzE-T@;?I%`2_*b$BBUn9ISs8NBsA%O&~$t%lYJ|9 zi+ZC_^t#LHUZBb;8!cXn2$K+Rcz9S&*No-YcpG*^%%hOMWQchZuXbX0e8PM4O1WJW zUP8psP&pAHS5c0!HRML`e{h*BzjFy#;@onr{^nIG=k@;AS)l@|!qrfg0G7zd{l%4V z$CcX&S?2|vmT{lpAb8WSEojqD+VTgS0!(D5LO-0(wQ%u{dSz_P(4) ztb>lW&FiORXA7FZpu>2<@Wtr@?)u3EylGwOM3Tm4TxFG$=!HZrb?KZNxiqi%%y!Di zMPB4dNS$#=$L``et@!o*&GUCh#Uvhr5cww7Ki9pj2ESkET#|-cWj{oNAc~d;GbPHZ zF$#v!LO&kvL$Q|Dv~M0~cF8)e>$h98aZeAN=qxu7P^DGc*WFk}(%CnP$5)8tZkBEM z!X}nb3}yMhX~NwDXYc>k0{s8mKeQtUc>z_Wlp>YjzX)4S0bXJEHR9mi;eISt`qy+| z8H$5P@f_W?ooYPv=6e`dL?X}(RKs5PrAyU`=rmeGDz6AK}t`C%B8 zb^JPF2nYv_57m&-_Yp!J4a!lJWtOB-6M=@G7dg=_8J)X6k_EZ#5 zN=UC6FO%O6lhU&e5u(PWSG|@PF-}I2%VO&mjuB@1(_O6ErRe<$XO*T`c_0QsFi(!f z>ia&-z?*d2>76i>d7{$p2l@XneraP~9X)z+?u{f?}C9rAFY#jR^ z&f2e1xbr0PNt=1eR%hporgls*{jWgwJY9}d+!W`uxhw!%_!F0}ewfUcdx=ocS5E{! zY&a4aY~azt7^WD^ktqP2`3o5W$E!J2lqn!eXMUh{Yr zq7^wekneo0u4Rr?Y)FAv*4H~d{}6oBB`Ik9yn4Es7`yP;GYkE(7?LX@`xZVua~w%V zp^)}=RL;Px3JZ@)f>OVfbh=Po4tYN~OV0ftud3_Zu<0EVVP~T=j(eRNqS?`UteO1? zSF~l*9(>_H7Sz&wWjQ%0S9<$@9Iv?tILkV@eaA_oir<%i`$K3NA|sU%iZq*2RGvF> zCKIY@wkT>;U*%-SQ+$t4H5D4w{*P|OGPNUnH;UeHRzSV!)UJ8Wzqen*EZf{yTP<`>*^#e1GSP$UABA$C&s zI>X2Dxplr+_xOC>{0l7OD8xekI8Wyh@7#7}xD~uML&9DnD`2Nc5F9^YWZQ`1JY3@J z<-P4xDdqKJ53%I+`)r;z;Z-Bvd;R>*K)T94$T-hp>6M28Uf=3=RJX&uS%L&s*{Wgq zMdg(|($VE*5AtNKQ<^G>t6q%lS}g?N`tAT^69b=6<;S z$iP(XMy%P>PImYEvPp*qq)ydH4uZM)IVpi|g~_q*|Fwxfh689Yy9>+xu(MrECK}|w zCU_#tI^f@By@7&r%35%xh^Ei=q>kVfRpXaTcxJbUq;@FRLjj1Cd9TM=Ptp^lgyqaMiZ(lKI z*LahWFlN7!zxT6DCX!hRQ4%thyqm6>&YkMnw;GoeEhs2BpYRK)ZS)qKWIvZ<@d(B{kT^| za6~ObpQV)Nq0t;Ku-1J7_N+j zByQ29H=JY)e0|5DZw#h#O))(B@Tu)XZ+o1ncZDY(r48U?`DjKB^@Jg!bRNc@&8u5u z@=cM1#-+N3p`q7VAjeT**Gf;$Lxq^F?hBic5eNYoU0N;#%A#KyY^{EzlN+;_gmxcXw;C;BJAF_gj0dv-dgs`@xTdkwM1b znfE>CWwRS$CRWqZh%49?=Z4ala5qm8*pz^0hiRs)Y6&L?gcX89P_{8(lO}<>wQ|mc zxZCh7XR;Hvo31CX6R+#HqW+%NZA_R+kV|17>pvN%X)8{hTh*9!QaVxlLV#8mRjSL`!b~z=Kwai>O`J8vpZ@3LdbE&>A7+IS{RYH7JpVD zY6JPtoG4t2NqoZAErcq$`Rg$=L0j7mIw;?l7D6y7&C7A%nR=BEjC*E56Th60W@R0a zcVCp_xWCkLF8TS`*e{z=u8t7=q&&Y-_G-EE-ms$-r@DCEqO%9Ry(fOygHTNT;`6!x8ir zviqXe9p=um5%){QAsD7RpRcz#kVNG4Otg+c+}HGK&52>oj-$`g$lVVXBrU(7um z9ei!~rlSz8VTBLx$0Es59OS$4!ivR4$BP1$%(y4tt&QE1eib9-gHylw|NL+Mo2PrjiEYA! zr*v=)yYE9en_m1aROitVdbLNB<|7QzaQ#Q$_?(3-+oEuKogc3{U9BO}K_u1>4=ySB z{SCQ;Js~+6jhmahRKH1mBV}}sBU0JUO#oB1`xex!^!q0fD(+B?y?iY%wS+F6bK-$X z6p2p6Dfu?-3w?qih8~#++msf9SE)TOqL4_U;|zb5Qfz&bu`H%HpdUhn2FxM6)}+Q^ zsw&#PR#epW|3b|G*|*rMWqZ_enSibjF+}EWjOjfJ2q_Z_!3O5UJ=U$pZn6AVUc)W4 zCcGd~l=iPmFOWZxEGU|fuW)bDcA246cnFtF?Aa0a7t$ik%#wjGgLGC{0{Q!dr@W$E z_g=Jgh6KKMO0evnO<-;cJg4j!5JfzQ|9kv5kcaV3gD2-De2e(lAL87a)+8kXPw5lcg8$$2=b@!G6-if+TlEhLnrJ61q zi!_IoIV`f?<6SBwU2OX3aV{3R&u$K7kPWJw`Xg3kq@E2@=`@%N3=IvP_GzDBFuk`b zo864H)VJd%E2tEf{w5q;MK16GL{74_oW?X>S%?aj_e{L`ZaK>OA;`vUklsyhUTE#Z z!2sWu{!I5lbtIZke6y>=nj)p|dM02I+(n;# z&1GYE-R1Q0JieC#!bOZ_gQ=t6DJq-9p}Xw|#=FbvbvVh=3+rJ~oG$B2?2;NRml&5m zW&7{#OIO&+W5aDa(o_m2I}7=sg?wFkd_(;i&)$9&|I$EE7hyK8Iske|cb_j=Qc``d zs1YEviCVvjlcyv$1}s)tZr$JB7Nr{#>q1Sf8ert_V*AUC#GuL0&oo8`|7mrJh&bwi zG+^0L^u^$dm-Y!1>7_f|Yf+Yl25eAGmw43?PaBSAdGx_4h=l(rGm!VrpK&RccLiZ( z`@%j)x;2>M*OgypVejZ3|DsEP3=dzs90)LlG<(*`aU9gr>hZrb3t2SugLvu5FdDJU zxNmDBzS(nOFj~(O8+know2$_cJ{UPH6F)vL#CVaB@zuNoYabt9a*$elDQL4Clv(ib zqt}Kk%RvQNnpcY3uqH%DRsYR&E0)D{3y0-s`enJa@j~f4A3?APBE;ovh`qwtc-_;} z%ld3MNbLF8U2H4k+2!n2s~AK?*Uo#z{e*~I#N`jce6G#j1RKJX9+C7Ve$`ezsB;|K ze`A6x@5}$y;7&)p$3vd~?VsPr*`q!=J_-Lh=CE7-(?I4IqMGR|FGanqP`C~sh zgYH>$b&*7?ZZEu6#m;{94)mj5-+4%Zm!(dG>&#vB{h50lqg_u9kw#UnK9r-2|8>5M z^K5mac%1r^s1cwI130M`SurmP?$X=ktb(|CnTi`1a_K->gS0}xh_sPv(!exPOW=V4 z4zjkFZCkJPkkPJlbFur(OVyKjgE2I(c(qUj0%*Q;zTwh>BDJ-q;})}JxG1)b_Tm5^ zIX8)98-*x_7dgzi;_jMZ!9W@10HkbY;FgX`Y!h2zuA-bH%L@c_`B`;q;Uzv{pl~1aL9Hmc34TJHH~uGvk8^*%Q2)kL{|G`!&7n$T zaj8~Yfxf%SaHm{RTA2FEyxS|HJMJ$%tTof9_28oKi8MvvIdkjAxwFEAI(b zu4X@)ghon=*BT9!UYXDZ3n7SMHEaw;Td)MOiI6V@@3tEsoo(!GCvam1zrX}JMa@ob~o5|`Xvj`Q{=P{eF)g*&UV z)RW{`_nw?1?mD{}G`>AMou57Kha?q5jK>MN4uGGPQ?J`2S-{5c$@3P*U<@++fqjzwbEp2-oD- z+>HFrO;;(Y3>}YdL0gX@e;$y2vz~DJl*+#+$At1EGF9Rpn4awY5pFN)$8G?DjyH80 zJZ$*``?D=o-?hML4tg64Kt<( zj@+gei_O3_>$UAOczGYR-1i!SM|*$u`LWd(^`NxnTC6s*n{=nr|CKfD>olY1}G`q+efMRrKT1U`_u`KF8S_d98%i1%5! zmAbPsr_1M?qftD!WqdyZG9j3+>>#vgtf*nu5rD@UNo^G6x}Vq=7_X27FJ~3w?Eh{& zOpZtSv@~5;l6%41cb?k>J3E%Qy^fO23tp&8!S1=Uydgy@ZX+Mj(kTHcDGVK)ZT%WP zy~6upe{6mji)%V>Ad11L93@E~(02xTXN3+Uum2aZ>QixbQ-Ei%oYCk{+>GLtbM%Cf zd?q00Ey3&cO-4=q7pQ+i^RQD)UP0zBo$Wa5;B{K>7ayIF@s39vkoP5&%CR-Dz0J&o zAE~6hr6RZAdnV>#GsQkt(3kh)7s-#e>(b-`HZy9|EjE`G>`vW6U=#PdzWNupH`jkt z{O{&jKB@dSC)U4}5dWuD%K~Du@{|G@ie4%Nni&MTi%lUs8DiHKTGWuj%wVWzaM(UEwO>a% zZ{#hQqmPPk;6lcWG%CL+KckwPlo3mjMhwLmW_|7x{NpS-B%@G3fSGA_7C#6fM78R- zR6-WE1Oc|bWTD!E@WI?U9c^bct7{5R@Q!BROUuRPJN0XE`c~2Nvl}o0}}ti zTh)nNBbB{F%FBaUtk-Z2-oJ49O7~;A(kFY8sfPURAHCc!2Nw*E-`AH(Z3th}6vG%) z^}f5ZxKVO?2(xXLoXO_A)}kK4JnG$A>ot7w$JwCIu3Gz@U&tIrshWyt3&Hu;otou4 z=b6M)>OgtXZu7j%by*fl

e$ndb7MXvj&!p_TTWu_ z*hzHV0DjdIryLF(Z7B8uIy~+iU zobN(H+LDPMgX+Y@&mc!6oAV^v9D92+;cjUGCbD;|ociJ8Yi$J2zr^VzD+c~*S=BQ3eP7vj7{eR!4V8ceGOTIm{$Qh*0!{3OUM?J_44|`X|I(jJ zuY6kgsV>@;`w?Wey(5um%k@+sdQ4?r_u_v6RwjEJI<5Tn*g?o3-ixw2Ki8rF4{ZZ+ zBu3@SfF{9TMAL?kfD_+VIiv>pV+ovf&QXVG-$%1d;Jpk&`#O8(*%9O2iBdB3pK0KSRXhI6o~yfX(9JB~K0b z6w6VQh;^tVX(Ad>p0=@Af%S%Ga93~>Sbl2ZyX^(<+0!^aC1n)Mp%pJzK3?MMl13Z# zE7aQ?uctP@aqr!c-3iIk8xvhMrIP<}T>p)MKxD90BR^)(p66rsp!OVse<8GH$&P_b zztppWFE8PD5lj}@qFbtVe?tye20Xbnro@x2>G3Mkzv=_NLJ+|o zIl08S*BHbEm!_W#PC zp14FP9!;jGEpS41{j+}K9-FKx=eQx0gDy&Fmzx2K0^^@HiIFI! z81?qG<`|8=3kv08S?Y63UHzhqnjL$}lUIdKNKMLs)71m!g?bYzQZ56IQ;O)DFQ2E`teE4f8`4N#f-=xgnBMZ1 z9|2A%|GL`@uqyTGp8M1VX;f0939kM*o4z! zFs0pg2kSKZ%7*WXfjr1FC_VgJw&@=1McW7No4YRv*t|S$gToJ>XfR|LKFK$h=gvxU zkesC5J-%!ifBf>g!+Ob(hU2#1pUI!M-dFY1_^n!VC#SIgNr&ReA}nZyIjdV+o_&cM z*(l@Lu%-L1F4V{jH($%(&b?)_Bt+*$jYsX?`PFfuG`Kyc1a3F7w$8L(lX5dH*?Sp` z#Ik>Xvt+GO-;%yN-x~NsPZW2l4^75oZ2`QLev&>)xBb`s#N(61ozR^2ZU^@%Y$Z)r zN3$LfTc7Wdx$Zgo6?fd16`c@!+3;KVR*-utyzi><=!v_^#eVW7usiuNEf(hZtiMvQ z=6#Hu#>0x{cB)&=PnGG{MBvtJTFXdNEtk25<*VM6y-G8?&^$gTa{JH$467THM=EJ7 zRhBCLhb`=ts_wVtrU=n17xClfv46_pyj>@bpB^uzNZIOnc_hZ|f^9~nUA7zt{!-D0 zhd5W`q90ms<~D`xbxd21&E0H&J>G6f_FB6TwyeIcN_8AJ3`H1wV<3)HxWR8(xU3qK zGvW~%Kd0r!>{QR~59cTOv3f(NXDc|j$Gdj>flHar3WUm_*CrJR&iH5gXrukP&tr43 zR9}XKIDyJXiYpc#`%W-Ls?&-1rAjn(c)AZG;<~@M(8j7o{z0_ja>Pa7r@Nm}`$uwe zSd;6nsp_-mUadpv?M{ZYS6h5UQ@wfe)>mi8T={p{TrJ*&V`3+*j5*x1J)4^J73KK5 z?&!q?t$I~|<)9+XWijDp$I+uK=^Bs5oMgq}M`k5B%R zLiEcdprj5XWKwoedpFi^XWf5Ed^wr>6q^zF;t{{ygvmI!L=DPWCdYDY+L!7AE8nfclU;DwP&Fv2h}J&GUYaL)q>USOxmxNQd@ z%zz!1OiZio7`r{>4CYvFj|_)$1uUXblV{NZl^;1}&08LcT$jIXQ2I|r0v47bT2QN}t+(|mETiNt6SVKD>049ja$lO{xJDT)=G z7YYL_ffq_p%o$gDT8s)_g}o=MePV)x1>AWZC0;-j9T8lL?=?oFOF%y;A)bK z1#@JY2(v-gHm#@so<3RORf?;*IMev`^P(8uZS!z7+{iMM-@ZBNxb4PtFe3nW6y_&+4^i&Wf8HEZc8~<6~au(lQu1z_(5W%r^YjlGcoN%P|Mn+qB+&a$qgd(5QLKLo=hR(=;ZtH2#9-D@{=jUk z!~ZUxj#BAE&xVTOy?e^|8wd491$##%XMn4_`{PrB(f;0KxWmo)yb$M%2sb>xWO_eW zs%>6jy}EF=JkH2}lR914`613WZTCd7!8g3{rowg2umC1Dt-Nx4F2hRjduG=8Y0YwP zSwm&v_^<30mHVUFGCFYGe_K8~w|BBMV@vsoMxL?y=#%GyXKYdMR zMdk^Hn8}Y{$ctQ_-7H7OJFa)0z};hDVhE##mL|Wp?;l!a8XmRh#{(rguhjzQ&fh79 z#j(sVP2JXy{%xCLqOh+1Zyn_djSvIZvy|Bq%RM5_uS!?{@Uz=V{$yCyR4chw#aWMx(n{T43#( zq8_S*_KZ zpDQni7OLgWu$QE`X#dqMg6f2xsb8I%laq%irhqaQN^=%oq2h#4+9TlrQ}Smgk%9VQ z58*a6oGvHnewxR@%ZHgQ*idz{X8wXi<#sY9UrC%4Tf}{0=^Ay zV>%>S>XO1(fiN_*UqA}?hTl0_jINVJ0lJuCz~4Ae$O_%e(INqr`C)zFAj3TXiWW6{ z9qq39M_3vVTqp6wQ9_g&IiP?fm_?8D+pm3%l8D+GiCUEeSF?l>6^%g+?AqLm^v1>* zCx_zM`STmBUZ!+BKL|Ms!E{7Jn z2fHV>ke&>J&B3Q_sXmEp(8TArwG-E`x`_nS19GvO6^1W!m5-TSh!e0_xt1lN)Dteb zwm8Dwlh_eaoz#xnT$2^?5=Kd{K33ByuR8bB3@s;A2pJk?b+n#=f8n7w3to$h4;};g z_2{_OQ~|MAvFiCO-BO}%FOk2lNn=_qw3OTTRw*59;#j*mg#Zj?)CrG17)b>p=|RCr zU(*6PBb$|lgadf#h0+Eh;{KQ^nU)-}K&+1K&J=l^yQ>)1TmRn~>CWqc7kyw-~xscda9XObiAz+(aP6+ad=T3UC84g{hK2*vE% zS2%4+l;j%3j0VW2E(4DstKR`mq>8=ZC7+UpQcZJ~xrSDox;7PiNApnc7eblkxC|Fg zuU6I;)Zm2`%xlGny=*#f%5~mcAMYN~1wsHA{Wox@6w-swdM$6U*pvNOTS>3OYfloI ze$I4&)~>tJAT5P1zX=DAP8^|R6B5Cewz_0vAGHsb6YtJ#<{}3=txO+wDd0%2B(WI| zC$h52Rk4Os{PtiecTP7y)A^2a|4ff%R5Z5F=$}fAcnxl!*=Su8AMu%wDHl1)?*Nf? zE3LB+7;%;jAo4rxj>R*uUu#-swfoYLO%B(5k!3o;X7%;4HKV7oAK&?oq_bD!ezsJ1 z%rwkz{4lr|Gl$QaMo-&EHynL~FBtlkMGAfwaG1-_$F#RuDjf)agg4l~^zCl5UzZp^ z8C>eDbZ6t#E$?_blQdp9w3fcwEom>`lwS6)+SrbhgkP=Scb8Rk^z+)u?=F#IFVDR$ zVZRW+dzZo0b}dWe2Ni}Njs){F6@U&N;XtXwif7M$dal0u08Mm6BImLMF0sd7I-@y7 ztT`C^`jTEj8jt*@DnuM!50tXIT2e58T-fTKdQ74>2fFKbfWU8dpsR<^TwVtqwJW6h zFemI~(w>7T<>f95NpW{N_+x)K8~LN-g1w$lB|EohU)tX_$$Hf8xaV^>_;rdrdyV`A z*RuX>p^?E|nxU6jaCLPxzw?$1=<&3B$?{m|_xGtBvX=D@l~42h)=N(*`vcuOd&5T0 ziOM2>zm?lXw&YW>p8B!O(T(2MkKN--HB;3d>i*|A`yZ!UV0tN%BzwliP3{^W`?}sa zXa^k!J?!WYj?=UVs)rEjE#RUd^Jx$=)b9D5xqth@XlYf~`N{)`s$TTX2^;GQ!}jyB zi|dsx9s69~hH$WUgeWx)wL=zuCAqR%z(*Vpe|2#spo6Xx>&1f7Pws<~nzTKx zrIaJbxi8p)Hh=|%3%QC4gHq%^1%+>Ep+x`h>3;53Dd(M5ujfLXvxJ+N1i!mU9R zY*rR_U|t5$mr|3#NB;f2N2rLw(4HPYOSRrHpx024MQ?;8swp7g-s^$#55lHy3jG)$ zhh*FsIysz&&lA#*#Em4F*5dLA{Pe4^!Bg?lUZKv)+&JRxC+y>H>r&=f)5MHJRMpuq zovJtD6hBjVyUT&z$a~0!rbnKlMOE2}#UExUzX`GOsE#7OXDZU*LP~;=)dt>k8Quv4%K zdTSNnv|skY_h!%0c42SR`zdnQZP>n{usTOwMG{`S_PP1JB0~xB2iaQ3zO*quG^i#a zJKsFF{*bVl%n#)4P4c-k2f>IqS~kMQ_D$TmFAsRoK8bCc`D{+EFPCvoGFD%_`8e5# z3wW+cz;DI?OHO?Y;or>ZE$ECOF?f(F-uA_vbZSc5cxR9$y0*x*xtwS9s2**_lv2af z2S>@9_U6u(Xxq;J+@hJW$Wk7TDcroLF^%sxD$C=1!$#%^81bgG`)TeH zd5pAo9~m(0nBvuiE`#Go`WF0&g!4w4;yJ|xJTMzS3(N{8J3iH^>&$!HsBKn;BEY>{q4v&^s zm#}8Bh@{Eal{Lf>{o6mqy{diuGLJuBbGw2VxbXHu1Ec4fb*h?9KH{j%NEt~&s5dY+ zSjs~>b)>X~IRV=nXuW2_VZ0GWrHWgH-_*GsxkEv3E$+R^Yk?n8$@_duBfAMFNRgC2 z=cxb4sBD_fo3@7FT5_kQAms)$I^E_N2GG&htXKthzJr8X{^TRQ;Ih`UuvNiR5=@9z zBdDmoi?pEL8WuEup=Z6!=g=6s|I6Q>_l-d4k>W|?YCPR=MdP)>Vwrjl(bvsefs1Dpc1gyR4h)!M>QfJTDe0W;w? z!1Wn5?+@X+?7_ahaB4?U%o~i=Gnz&D&KimnnJ;}4Nu>;o1FUKL{*I(rmut-qQo4FEu35Uat}5Z^;IZP|;uJb9 zXY9xKi;P>1wJ`=Gh2x+}%&}0GReZ><^1zKQ^}F`WI(liDXgIX}3kS{I*N`s2r7?wI zd+;;5ungH4`4#u`yqNcKm8JV_VmNFiGoz)(aLa-AKV7k#B5I|bf7zvd+voa)fJ!`9 zQ=bl`TuA(c3FFOtHsYriKm?}nMO?^rOFE73;#Xah{q9;UuSOnyeDxz>@Yvb8y1g#; z3<0T_vzlqVDT?*g6JUB>Jv{?sCX7- zWMo!I>fdYPP)$A6`M&u+mwv-Jh10;zV_Z@K)H$f|zYV)yZvox=EC*sO{+%D4m!$iA z9XLJuOW@ay(MW!T{Xy^E`@bV)V(Wp#x~ zgOE)_{lS#2;Y1om_how0P8-Xs>g^10^7n&7Bk?bg>!-KRknvx|LkAIEo0YWwkIt^x zicCbh_vf+(oQQfIOYsd;7W>OXrPO$rVw%FEKA@aH2cr`k}e@Z%V)hLjC{i%jl|H0gn(J)>wp7+^qPvY^Ls3UXy!iR+vb-?# zFv^}#X;NIHo~PMYup?Kb{LwQT*RZ9a?UW`vi$#4 zE)A6aM6+&ct13oMRptM=Jp--VUP-_v&)=%xfn3>ww^x&N*)Cw=c7qjI;gp7KSfV0W zM3QA0WdCmy=dn|Aw|inor^3zY7s>%k1mG@qhrbJuW$}b zM1xF%!-S+=yEI0f?afL*j_wNr8v+LRVmsZThL?gO{A<^7fiTVRi~v&$vEE})r1=Gfy)=d4`2xrk00m_qnmYDRojZrY2OgKQstaY)u zC#$&a%@=Ffj;=2pW~AM5_|*Kkxo+Ee#9h?tRb%)#e{sLO1;Ifx>xdLMig-pLHJ30I z46IJB`>PATOd%T3GOM10Vw>S>q0N^;xo(GK1&6910uVq1 zIQ!Vk$~py!YYWfcca7j+y#Y@4TEb5C;%AV8fjbdRal_|lFNGPfSPH8}L^nBE?IPnzXmnPqHNtce$N z^{C+0X6U+yioKl@p4@g1E9a`xGe{9QpiaN7gV&zCWr4{{-cLGcwy@9rr@#6Cf(C)( z+JT?JG2Pj!;_oMuRAeg!@6{eMg(hvyYg2i5TJz&eRD!k#H^7`c$!>X8CTN z*ZUOK;J@XyilQ7CpDU%P7`-$4t3Zb%pXEG5K)Vp9itz6(nvJJZgqG|%w#(L8kWS*; zH;lk;?EIli3mZ4%#$-9I*U1W2HlD3bs=P@4jY41Js5WLcX~RlpiU)MP#I0SwnHrYc ze-zaFX0BOXPs4C=t*+QSo5hGkw?Va%?*wL;BiOT1rC;87L(fzWj6n7{XD#c}DH72E zFe_ol@{*Mmo5un)garWv?u{0+1wX%04GM)rTY}!8q@bJlv@tuPkO#zqy1=LgQ zi(8I2mEE81rR{QiS&>-*zj1KQsVZgl8DV;@=4{1wM*f=24dgr_+oTiZNCcJC@ETH8 zHSzRHtG@O%4#eN%Yv4q4BCiW;dl{{smp>#^cck?Gt?LHz#!$l&)y(?x{1!LhXE9M7 z8VJCLDh|+Oskcht*89M$`J;4lX2LVL1~aEu&_AC=x@=!C`pc!Kc(KBaj-BX~kv_q!A zv$>9VpzD$C`-fO?v0%F<-s)9AhVEV3t9Gj4_hrM3w%8d|bd(m(Ke3J5^Uaew!yCh$ zULnma_^+%98~u$`5Tn{g&OKxlVEQ%@0p*$QP1_Xrb43Fxm%i6xAD-6G=q5VEcKMPb z=R%^=E5Io}DJ6h*s%PO6y+2CpDyfU_+9dMX9wN>nzFM2*cWK;1G9R{k;eKLtJeeI% zTb%j)xJ$SD_wd;H-}j7E!zbg;c4;H{7O~p)wN!On zQ@}k}`#mbG!t8QCJ$sO-n&kB1Fgt&zP*$|vAOEb0uajq|!qKW`Z^WYMuju*4nW$$6 z@4xys+_Fa(+^NWX|xxU)nO=eJ+k# z4_x*?5x@UFB10OHPtd!^QsFe|ys#O(PrXTfn7^^Mk7x1r!+QPqTU|UWUV0z&w6r)C zwDcejgoW=Uo|5F(v(EDSQ9$m6?}{FE?q;Znbk^DvzlY-AO|r14d8$fea|?n9#8cS> z95CHTK>3DBkD|}_T3Ab`#t#-YE>3A8MrHA&6rX^0 zeV{>pDK)y_Jf{)UwYg6vO-6v#4Ky^XPz_RJA|zaQfI7~(CDlsk^z4#O_FL`kceyx* z>~W6j)}{JwJ|F=Q|D+V_Mpo;dXtX!Vd&VHb)ELAL7?$oO{TU+T_qX_KB^+g*(?Uiq!iGwMr@VCDBJN)U^SMr6k_aZZeGfmqiAb?sV92$fG7whpFL!C2dy^je%_XX(ge_qP(!LU1yEFN#bz`>dBcWe1vsmedt7 zX>8}$2Osrq6QzGE?DwDxjf5(?j99nbJa#OCD);q@0p6fV{tukBtMVL zlXuKl)IGN+2|zNp-N!_BeRg0isOkR;TK=!vS@8X*3Owcxr!R9=D4D&wzjhoPub_)* zzIc$<^*oqvsZc0)`KLD_Zgd``X-2H2Q5JBJyr(?k^Q_nCe|_4-s%m6Zfrxe1!NI{{ zpPzig0jnXCvw=jW+QomzgLa!JW5HS%ue$UgpwnP8e_ zf?;#yjM@21Ml<(-hJIaJ*%o)wGnWINkdc>(OL$mVx0r-vgeV74S!W4sG00H^(6Z1L zrk8*HP7HjxBOV$}iTgRgrZL2s?uzJvT&Wj3tSb~-7_*_g?^Eu6@HDxy4w9vC!tbYN z$G-*NYWo2(NCRxCj5I<67*GtHfMlc}NPQF3_Ir+ItsgbsHEN-LqcSMCHxDiw_Q8fW z49UdK3XAZM&GoL9ye-@yMuH_ayXK+_j{8x#VmF)0 zs1tyic0>#yJDLX+$3@yk-m)EcN4=PrxCh6QYridE+T>2RT!WVIO2;1It)*nul^n`I z900-ws?pk&K}Se#ZumvG0dV2WtqZVp20;eBtd;7my*1Opy ztZ@XNq|#w<4fwAGj7EEu4$7wuIN@)vxIRrF%ZpvRIrRDa8T*b8-|X+OKa~RUL{;Yv z{6=@r{WeJdGf6?qv!M3u$Gr5VdD(}I!}|mS6K}h3jzT(bLz`FKYlBUimOfR>aZ6s; zsI=kuSyVOFYK@PK3t5&3z=}scV_8-%PKWJbLE@)6A7W>-(hTYL6RwKLTBx`y>#~(9 zo*=;)zSd<3R_lit<*`LgKJH|@tNcM0yw7Ut_V79C$788(`*xr@=;;u1lL>xMQel(X zWHv&;brXC1B5YLj7SqD=W;WzC`hNZ|hx<}<57__CFa1+76F9`T-2IiG3oz#^@hA;E zuphR+ie28=ZzLvg=YiX|p_L9!hWc2_0M4=z4v!1SzqJ6fQ$yb==fSnOR81WkJ&3w_ zt*WP2!Wbvs1+8S!0+sClNJ8kv8h;W}R$NsP21f^UBZUP(UWNAk93~0CD%W)FdJfcc zl0cS04){TmVVXt2DAcT_8L1nK7sqd0Vz$8_ilsF{^u`!dC(6t95k5K_3GfUn`DESt z@yu;%rPb3eLQ+q0$z^mkQdrWM4w_!=87hY^uk(!#m{8a@$DxF$^mTo^;Z95Le1sFL zfGaJFJiy{^@(L~KO!)W{EVma;5`Ygt+2lg=B>5n3=dgo(PC-KLpbK!kIyYYum3n&_ z608M<39(|*1avlr^YZiPz;Z&-3IS=G&|X)Or@(7**HWutp#?iR&!|9G8K&}>9rolX zJtbiX;H_p~11k#;bY&I^T1aH|LY^2oqJqzBS^_hRs zLI7ik!-l5~(U5Cu>*Ol>J=zYkx19}9HE&E|2{UP<{!k_JNhUcS3`&ns)xD_#GYSZ=9_J)2LN;ohCIbWLx@jKkx_ zq3tO{gLcn9t-*9?VfH#j)l)ugmwN?xVMoW_P50n9WPb=gkKOOv&}^YK&u(aCa&m-L zhu8ktXJfKo7SG%Uxs#0O*7qwF2Z&91E4{_8fB)~#eI+~lX;Fa5377=)F35XVZ2WLY zv~lUb5KLns4ddZtPSKrIGi+>#+oFcer2zXOjx_>w4wS@BYesB($F!33{2HhRGn2*i z42sHUJb)dUNnryQ&;w%L_ohjST_Tri&{puDqHoxk8y5n2p;}aC!f*35zn2P-W4=ef z$xje6$NkJ%lswibK~H1MoWl?aXUwStE&;wEhnU*aQ?E2Q)gZeK#gi6Ge0EezF2&(M zg=+m;C~n*$Wr(bysKjsEnv=a`2%w153Q$Bcfa=JIZ*BE1A*sKOU#Bj1`Wf}x49zAM z8@Hx8BDCi<&U-9+t1q5WB4xZ0<|tXf_pp7KiSBkIDl7V@@G&2AB+y^Exu3^|Ui<>$ zZ>e&acRb~=Upi6xYOq`Y{MD9!z7K4IZ0>|z=xJ2DBOpzj8=ee0a`LRm zw+te_JAdjM-G2v~@JT1l^v^F-&jzh!v2D+i)_h2}QW@D9dfzT$U&RH=7Mh?4UW$O* zDKwC$qUFht%2vga2LDhfm>pi0KI;;M|6!3-WBWb@zo468PSMYy*v^M74VTXib!qNk zhe=ZJM%v2Mg!B-lHpR`)xn{?*MHB|WB_jaATW z4k;%1c{X*G>F!`SoUBTHy`ma|%2e#01P3Nj`Ybzy!UzRnRp1S@el0&&xbOKr7SFd|FgDRKv3}DhwQj* zQiVFmhvFw`n(+i~aAE6L&!)G6(vQKPQdVqE$rHVh)EnP!F3k?7SmP5{5;#Mk0uu2m z`)*56^0Y1|O}3ctnkXrgVVX_nx8I1RH)U^gV2O z_5;ocl45bg--vnLmC1}pTb|$KqjT&f-s}XFK6u2%%yu-<#?C5lby|%@*-%76zrb7X zl752Jb;C`Qm|I{1TL*`%EN>~Bu@+Fw806&cmR{ZZV<$*1jWNz#*wJ0Mj^KBs9%;Zd zf=P&29~ChdB(J%dLo}P93w(p7zmvs zfqq>eER47dK?tptyF*2U2pv{G>^SFJLOWSGVT^-?{U*II(iJL#y!B3g zgHuX$1De>mc{G{);(FfF*hDOXR3Jq_ifg51!Ym7Kp>#1zXp3=@b_1_6Za0vULbyDe z8<94j0NQPJp|Pzo+ItHE#E;WUOZ6nXl@xCa{KTiUbpT`27HEx`jGrecOWc{$cy0tX zKi$J_W*V608~$q|J>10f!6&ajNf5cl<9JkYGY8XHX8P7ED4r8r7tKWDiSd-S9AA{r z&9BCP+utlmjy}O6<2JN2){)jInQQmGE7v+uF511FcF>^5pZN2me{V{$h2^ncBjg}I z34sTKnl@267cS#OlxZb56<2FD=9B;TQP_tPb0y^_wcKl$^q05lmwH;zaoo`p9G6m&pE=>`iD<8N7ab3H;3NlGuJ_=fUna92rtLdH=%{>bkM9?uDUkDR0-eY88SH4)l*yY+` zdP5-pyWZ_nun&+}b0vUMzpnljl>nbvR8;Qo9hXos;v3`~XB=0=NX9wlXhq*o&yE&n z6rlEF?c?ZZ>K7!u;6lZfynxn-Q50e1;x2eJ`Fp8?!A@sV#rOKm^VgSOSPx%Nd4|{$ zX9AN6R9?7+VQFes%LV%`+9)@q0?D|Hgf%wfb`nu$P!~{oQ2xkKGjmxW8AIg*oJSOW zSb#U2(emHq&4L(-&rvwSc&4YjFIT=5R$?g3_0{1FuxBU~vNiSUPa zWo1|Px($Vpj82X5R*gg}b)o|Q$()1Lg&N|+bUglwAyoA^~2DJnybiAeSxJpO(BL+cScGr#ZWB`K>L zY-@oUHoRgF#JV!jvJe6YLE|q92;f!DzBYBhCi>gR`BiO1JMLa+qa4Fa=dG?PtoLCN z()xAC)|pGh0#FPdG~pK;*afc`7$FcyvB+$(bLS-58~ZqI3hFxM&-1t%UO|XT5wGLY zvcsQE!w&f65nyMnR}PSxen*7uk>jz_5kY+D1V;Xv^Tn8X=a2VFT7+p&FSaGSDXla2 zHe;2)lyZO~b{6YO9#V5a&t5R)wnDLZ{UW`H6dx^crC52cP=I9i+l25hAP)wZmAc7k z9T~eTmTh=>n0nzr{R;DS;b8XJ9Q!Fby&6?w^yY_y6PRrRNt+Um$|uEYS=1BJYQQ%@ z`y`)~x&za?ge8tQQUY>*cPG~9j>U*76OEdh-Ty<^TSmndZb`#54J1KBa3>Jl-6g?Y zgS)!~_a+3lpur)yySrO(cXxM}?_};=nfIId)vFh<4!!!3+O?}{#~i13*r!#)-B%*P z7}ADsX$%kXurkh73*$X>1&1|&Cb5V9ysTHkWx(?8O8miZ4@z6I$WZsw4$Z z&`Gl9czFy}a0j_~9;BK&T^jqSWVgzMu6Xw?b%`GFX>jO1QSh~1nfE}ylc}cc54!GY z6f}qUciS_g{G5Z*6)1@){rU-mg@S8C7IcdZrDaKqM>`XJ8PcRG;Te-%B5cCeZuZK+nN63QddvCkUlVe#0do`d>du!Ix#h7lI33;qJLpfp!_l<#bkuh?Vu zWH+Z>fUs-~9LX@<8;)0FF-`4&ShToMd>z_E!RoYg|EfG4PcD<-9P16p#e8%so16qF zU|V1IZj~p>3mmR(r5{4*7fw?~3vla-|M#=n(G<23Ie@N7%B8ruObH=vPmFAVQ0eEx7UY9{0xmN zdU)x5wr7Y?Xeau@k~gOL1So+38^5O0tajanGU<7AR7E~-p452iZlwyi&^2OKa+PO37 z4`XChTmrMO%fA{{&S$dOYk@tp-c*CB!7E@3(n{229uh3EKvA;gX#4M&-$@5{G4VEqisLoy09n*GSu)J_fpq`9?LV)3Vd-@yDj4g(La3 zIkDEFiQWs@&wx}mACStb$`b#_R{7V}@K<RZ%g>5M_F)`w~V1QFM}j^EXJBZQ`NT^kz2QAOTzf|Y;pDA8inS%T_vq4g zeM+w8*2u_?PQpnu827$HQtN);6t3xGgQL4Z-OFz&WepL)Pv$WC%04*1c&7QL>+{wDK~_K9e9`|rW4Ml zolGZM-Gzhji+QFG2dJZ64t#ryNZe)w?1PUe45cb(mMSBk@4qmij*c+D-uU{k>vS8GFews>xqrnXH~YB{KXdhY1D;N&S#2LN5@330 zO&k=>Qn>soI0#G&^6CPwdlEPBGeiQrR2RO8P0la~5-hCE)iOuh3a&SwAubw1wWx9b zcI7rXaY0)RzkmrM&5Tu(un-eFpy-U56}C1g0w@t|;@%A1D@3=h83CrehH=jvMa2LH zBR1$k)UWE;%lR?nKH7o!--1qBA6s-^j~hv!MPrEN^0tf+qPFPHV%%%fxL8O^%+bD5rDI^9TBgmED74e!G_cG zTU6=mk1XY2%&%kkLyV^tWj6(V1U>R`=I31>YRB)8+)QdIt6EKvF{CDF!7v;-r~qJp zd|SQYFhG{;F#elpvZ{i*!f+SZKT+&312VrW&a$XHp96zIE;+zM-1|Kj54Y zS-r=F%Usb(LDK=6V%`bNedg-MvzLq z;P~x~vLDCImDBJ*8xI|0b^9UN?h)>&eC+x#O)H4U_M6Pf^^z?ViruznmZ@*pb7p2g5 z;kF8r62ot0R2X#SCb9An!Xfp{Es3~j*!qu9Q@u;|y{O0U#NoDnz#*p8L(K*Kff-AV zVOqY4W7`kZFb$xW?SSE;u#%GC~Z4&2hp{jjd-S>XR$c0*$3|3=&lscuDWY) zcTj_2KzC55`88bg#c3;PxCHnn?%;e7!iVX1VQ<=d2)Fy#Dbr*vl=XtRVwvjGc0Wia zq;-84QpToZ3J0}AZ9pw@{(-Vefn|kp$HVPs3eWdv37$;aKOizt_?7Ziua)kc+gfIN z(XCU)*4-?6zc+E0Z8AQ>LeVblCcQF!XDBht=lKs;z_EdCTq*v)*0SdTyq0|r#-Qf4 zZ4K+!g8`qHJ0;ywx;1n`-#2Lm6MQJN=&M*eyW18ATc{RRR+G6)MNK(^vnyAbI-YED zXt{)$$+oRInm2n%3$4(3{Lg6HHtLIR9o|B_2Qyh(+4(#y|2sl1zr@&M#_pZKdVSQO zqtwnvRTpyFIhyR|CNl@3#N(C+i_tGV*e zdFkGb8X6jow_Ue|tw3Arf_e{0{fH<+jMEvUzAZANPWw?-g6y2xFD;1A>WwA|+&fVH z)#Wu)U3so@RS)w6uMRBV$Lou0Mr^n-N{+^v4GPo1JRJNUJvyy)sK_8e7@N*GOSeVb zfz9!9!3f{4P;%W5>u}RulUaPVV}Ms+0LmGvBq$i>4jQ-f8eTk9Lmh#G(j9b{h?%5E zsK_Y~q5+6pR<)3h1xrJb9d9^dm^C;~m?I(Kj0Sr*56xzBwsADO_ZpzLpaxJj2pQxc zqQvCa59p&rxIb_O)go5+UtBBNWDEL1gNqroIv`b0yZH!j%ZN3M&M+Z_kt~oTSL9uq zlx90;LwYFk(mc3O7^E=9K-#5IyIKoL4hi!}u9})zUA-ibG{{PUgDIr@D>Q$_>JXSx zm2kD#yR{W_Iao-|xN_kNk;aTE?1;cV1Cah1Kf)zwVp?V)2;$h`+sy(Js!7P(QJ`1! zbYbz_3C-vE2dvSi-(ur+D7);qbIrZI_?m^_edDmq`Kl6r#G|z-l!eF&exeD&EDWR& zVqwXhyWg;ykVCOWnM`&FQfW6$`3lt@aVNi4QxY6-Q?)hB*aC&{<`IrMw|<`|p(@2{ z94J;n?p|?rF1_AYBioh4h2W-s!7G@jFAf zgE4ULxwRi2Pe!}nsLKB7ZJRZKPoA`fllgRHhEXw^9qy5bDj&7n|D={3c2LBdZN&OH zKI*De5gZIz606`V{ZPn`k?p8Yl4>4uz-V>NVJ?0pq#~PUX~k3fh!%OxImnH;Tp%~k4iHS^1ykEu5L*$ z(~T5?h&UdgRv^L)J=-6{-cJ=B0`wur+o=smKu&g*8ffaKjL1GK_+{+!@Pg*@dKu90 z*lS%{g2vwWfM(=#hs)+Oqy=}94wne~+Q!i+>l-xqD`pvMAB`T9Aj3>}-Ud6xhM^gJBsH_#qgMJu1{hGg8*%d_>EcAPq;bM z+i0emlrL{9$1psSVBu6rK|?BNPgjc>Vfi(fjym`kp73;n6}wr;oV3=aoiV8*sw)Zw zTc${XpPKlU*IHcQ)z~pgzL|B@4ugj}*30Sr!#avje$@!S)GY6-SU3^mO#5edQwH40 zVR-5c!j+#IN(x}qJ~&a0mhN!qYy}Rx6-J=(gYvgC1=`QY3W}^?(lOyiHRmE_RrqW> zRtozedlW*EZ{HE+d{Q638-#zndunJJ;CljFLE<;LM>H03XxEB&LX=s!h1nGNtV=}0 z+}F~&va;}=W*h~DQ$`35X`E%0w@R}mvfki7dgyms@K6CII_HfBQ5t;CqlxLBoqR?C znc+2mjeA?SZcdY@k(2rB$Ee3PjxxT1>18tnw=bGKJw4QFO_C0#a`Fxi)r;TtZ+XvZ zZQ34@WRlz=FV4HGP2|x{g^QGDC3zz=NlD*Fyl;paf7$RK4BAYlTmByl-y}l9BXp4nl5v+J^x?^^YLG5EPa;+p{q4HmyEpZ%T3KKF#=^#T~(@WToSebo4a*m36;ai0ch34bOCLeXrz{w>Yp zZCHL$6Q@L&^~nSe!@Y4E!mzCF&@L7``b4ijQ~}`2QkkkwB`Yudfd%b}?{L_te&Ugw z3SV&;(T{(4am$Wn&cyAs_TUQ}efTWCI$U7lP||4}{`T|6$3w+y_CEPnVh8drDtJ?d zgLaXy2m5?#nb;z(c>|}?NKCwJQ35Y7uc~RJ$*_bOchV59_O1f3Ln=DYiTv&=d~kv;AT!>&uEaO6IElje=}E<;#MPnu9ok z$>+dsUyXM}lyCJ46-us6#GoSh89Z(G+_@a))%lF97sdh$2_VHAF{=z&A}COg{$Z@p?iktLSF+Fn&D?4J}_iu@%8T58KOUA_h-7#e5n70$fuI; zY(z|VE_3lzM~~?Q_MV2#^hUU*Rf2X6V|SJs_S(;YRT?oD+ODG}1eo#6>9Y=jefj~$C{^Ml1X{a4g76(U^*H$I)@bV{*yqIMD} zvXeIp9r}oBi2YW|cJ;ne_15aK`jy8ks7C+P0`z2%5K}_PN_Xv{7*i&K#$lK>W@;DO zi3Ca9g#$67*lzI8gO^4#$ai`{#q{$TfMWXK@U&#Y)n7ym+F@M-79ag4UM;evA^F0dJTC z*EIZAPQ}WxIJj_NK;(7;I-$ETxi-2Pq;+b zwKtyvFYzqbr~{g?chUAjA30qF1EFjnHc`sb>MPO+KZFqujCAcDpj+`w#*+HlncSMG zBUQ19`+2X1aRgCx-UypUAlJ6#ZT_ewiOzd@X-z^)n<;oB>pC^0(e9c8yH`s+65H8iz4`kCxLy}{l~OKmE3{t7!&te%BHU*Aq#G&w4u zq{5UI$b7#ZzIGd2oE3A>{VY%q9Ie*>ZTGb{C9+I|EzB+e50 zny=5zUP$bn$3=Bzc^8@fgI7QP3S--;;pp}70Md4Xjf8XRMVbKM2$6`PCqNe;H-6sr z&NwDFNZxNxCX9*4^+5gbgu@3BgHo?AZmNSsNpSY_s1>p@T?-l{kj_yQ1$1poA~4lj z1hoYv1XTr%wL)e0pzUCUVPgZ*+JynRnU#(Kk#BjBE6fZuG1xBCd5z&j%#egRSg3m& z>CF6_SxS$}#+kn}M7v$#3SP}*mP}Ko%zWMXO_HG@?=c9a-{5!@TMD$`NU^Lm~OD3MtfJktHH)`TLxLk1fQTtr~K z&z9~r6dNk5syE5;v!reWY*lHer?>vMQy!>FGoS&u?pRVpb5ML#=;u>l*o%MT8^_IP zg|#H88m+WerKo3p443+}m3XRt(kR3*gPMi>R4i!+2(?zS1Z`|ez0X5+v41#R4s#S4 z3omK4Kn(4e4c?E|Wzvo6Q4wQ7Fa;?{uAu6nOaa_$79`MDy30fY3y{l$SJ(1tpTTVq zOLq^C0``V!{2GWkqy=!dRCm~0dR2>$J8C(wnB@Pe3;ZnHV0|A#RhqayfSJ?ZejF+; zwbb$L=@~;pByRnWyuDS-cli8YWqBRaQ;R<>D*HU#nU#N8u5`VIRdD+eRjX zWn|S*Wv9#xzxQ>f0$N-n&b)PgQBS=$;@$Xy^6fV5U+RB?41WtTr^?IO z$_>P@n+47N>AVy5n-2P(f{trCyCXJRR8|)Wt<$EVWDOSpVpyMmEAM%l9^eMA+bs;{ zwi#uC4C(`WF#fjd9fl3;UMs~*z3_3cO}JSgjZhqg?-#y9HFnmYoEAvaNH7`T(H!TQJ}!X5SE;S9(Zp2=~DD7@gNYt#caJz*V6+K%a_4i6AkuydpOzOxEAT)fYF z8uF&Bb3He>mEHdL(rYKo739Ib$!xZm_ValhC!Bfivz_(TC(`9P@jTepSibx9tHyp) zw1h#eRGo#K22+muwB%$zv)ZUfH1)}^&EjFm>uPRO&WrhcX}KvCx~}Xl&_>(a#dNvO z>-=`XrGII;6h*cCKWgRwplUq%;g7sVf1ykD@?gELgvDG>Hmtt38{lK#+#f}0PE(aj zQOZwhA9vbvqxHSW1pJ}90rzi`4bkkTXOy!%6VnJ6^7?4O->?O(HDi$A1W_a3;tLN$ z42*-}{n43S@;iL3z`>B5psH;8wQ}S)v`{smR&Pw6{d3o0wxJDHCF@+IwfBc>V>3Fc z0yR0BapHMX(H2Y`EHfR6AuHqdu|D;_CWu-scCDS$R?>R)qHv1@sAGP8pj_^DK0=C! zDh$(_EtOE=?ivJQcvo8kvtm9u7?M)!E=Ob&Ck6Vf)gu7HwO#DPBhh9;QiuS$1YrV44oB@mpNGdnTddTUKNsrc78wYaS!6 z*Y@f_hgOKn$>s9FU5V07A$SIQ;8tXDE-Mguz~jH8e0$V$2yu`QsP|dV=xfQT_*Rok z3YP(jui)U!t2nJ<$z6C3qZ`lwKQ>>?C+4|ii0D}Ep}coH-d8Oob}5Nx&~4pq!D)#2 zSCLvC_!r7}o+H4!l0M852rP9?@3}uBBOW%Y5IJs7md)%3Xzt=+*Xl2S8%|*oIk!O! zC>ddxRh4a!m^3}g$xHKoi3#34S)s#lHF!pF2VA77toKF8@QZXVV;IN(-b?@1S@8=1 zmFUgA`KgLC{E-TL_5{^%(A7i4u?e;+x`%Z zPo|-%*ML^N-Os=BDEz6#WMdm`M4$}Kgy;qh9oqeYZXXOUW#fE1_imR`uWUPt7u*D8 zW*6US{0@sAipc;KZNDCt?&kLPtD9$)uTJ}CD6@KhW49{S7wxlsWzh5{bWE)BV}Avv@5qLt5O$f|7xBxO`5=vubXUYP+Q0t9$#y{j8RQ6# zz@nTeEm%HpfT?6(mfT35O0cRR46*^FEd3Ue?3&twF3D(`faXu?*~?1?OEAlf+v3Co zVBgKU?KZsqA8{&v;ca4E3>vy;V9?uj6DU$Zp^X>`8!-dViihk#B|@@h`lQf*%C&f@ z51yk`YjAW+0*_#q;Us$Y^cwa>*s}pF)V>~q3Mp7iQ;<@~#2o2T*BGXmji_1W^h1AD zQrt9;5MRFWdMqZ`YvlS7|hv^-8(ext}S-m0M zup#d4dfnR4v4FS3 zb9TLCOphf)yf=T%&Y{IpO6+(kkB_;JF{Ok1=It%>$o80E1jxOae2#(tbwJ(J@>;W~ z+T=RvHVr$j;RMq!PKkKh zN^A482ad-w4Efd7%KnG(=DlGBlH^JnmnJ|xs;R47A{6oLXEKyhw43HrU`f(Y^nXF{ zznr4@!fqM^O;VVX#d+JL+yvJp|J~}jb3apYV~uE8Tx(88A{V#a9lxyAU&;}W(r(Fc zRn^|h+?3)GDYZy5^1+K^Z>Xl6hq~OL2VVQz09j!q$3fU1?_%2{r8=m>w?rt@l0TxPYBfI=wIMD?|Oc@-Sx;r5#R>7|S`R|*|K9UP~Qt`||(767|I`U$~6M8!}nn_MAO*aF%Z zkn@=n;j}Bo4d8;)?b15q!a#yB_^=7AOq{)fwC$xBO8fKZP6z-dBXsVqMv77I{Eb87?exG9jeLBx7xA#>Q%hkhW*};6=CELw1^IIrxWqKoZ;zk>LRnTxv1x


bBTfpQYB8*FR3DN|eV$N`PHUn#5n1p`*HbSIlm`klpDd#b)@x8k(>Bd@AK^l(f9F zFj2`p%(LN=#N`mN({}Y>qf|#T^}qA$z)tZU)%Eo{xev)4Ff;wg1B+-0n1<%^Lv*=>c8YXe-!j?%eMlZ`8$(c{I05iy#ln87h#9SO_D_N(p+OA<1 zF^L;t)~+w8U^bs(!`5%*Ze~ES2b(R1(!!^{1I^K$ZhcrzWR`UCX;?cvNYQ}FUbJhw zh#KHfw_Tr3=6pd2bsfB_&yH)7>71!K!;MtIfiryq94QNJplY)j63v38Ill zZ^pA>nMTwUNum8#oy;v2^^2z-{+yZg$HZrb;Cu@3-bsqLU{PhoZ-H8{yZgdYz2*a9 zxEDyazfB+QE>^DN+lfe&hJMHXai=!m`^Q5dA4uglH39Ijr97TMzandt-qxE)lOad9NLv5A2)1c4YZH?Im&Y7lK}Bt?8~Op zYDJynVVbr>Qz^96FM14t9|l^E<080>hCdqwK_7<^Kdks!(BJOpCoHtqxmM!H3rsUORijRFlrc|zPE^?)>$UXVtQaH-a=*&@BE zQ)>uP7K8*t49$(Zg=a=@j(D))gP)NbK(4kcQXzx+F=73krt+tDO#66%$n$h;$7-h{T&0#*;=#YdH$m5Xb=>oWppY(%_mhR|1I+X0I zg!*QSK|br%ByFkC`FID@%;Z_$jEpSW@G zD(+sRet={COT4eNuD}IVV4g<`Ybv7I5wcZaR{Vz2t`7k|15U4YF%Z(R97tXt2=YJ$ z&qv$T%*l-)J+|v?LW>Sjm|}1=M$eI7Ulru%@KY-d{t+)OUV-rI?xU)lZl1ytifFyRZag zE9!|M9JJHb=99-*?B7=F?_y+{Lsyo^=5l{e(oYquvs|BYtj>!RIU4^5Ir{(fUCU{x z%THT$6+TtO5OkfWKbHeRwvQeUUWbcvg<`~ps#ER%#r8NV_$JM86P&k8q^b%x1! z!yZCFGq3_!P7qAZTPsaF(^Ygvkhikj%HNo2Hz9-1nEFOGYA!99;v{3*8%zEL(ETk$&47d`1;X;j$`Dxg_ z0*=!?zzbLDD;);pN`_7wAwP5@SptsIxO3dl(-fREt66&nJq(XvDNJHKri6A4LD%F> z&8=&3Kp`IvkI0gbkUEi8ZBS6}>)h@1O-_^B;t~l3QAxz0>cN8xugjW1kdowc*8UGJ z*WUzL^a^Fh5V`Q!^;7&N?TU)x9?wx!H761eQ{Bqe(!IOnfh2WQPvo5-qka9p)n_*s=X&>+LlO0 zz!!K6cHNTRcpo6zZp4`m;@|M@ea4K!Es_^%9JuO0MSL5<2k+*$fWBIs@sy-r17{_` zRizU-dT>!oQcZEaa@OFZsh zciS%!)pj06JJy?f=t<(X4)D||Q*lyar2P<1V>!i@#NrWv*;-{Kmi>?5t^fVt2MX&m zZ^onQ{ne?_o#!*~*YoUw!-LLxm*K95uFTu-`kVpl_&Wi>-Bs>EBR^BEekp}%rgEb^ zJd#-J!LeDmT7m&3*l#u#deXE@D(6 z0rGO7^j$f>xOMFWRS#}FFVL1o?TNqvzH?S#{+r92njrq(SHn}@_N?L=y5?Lt_b*@E zDPX?@$}6KIP^xv5&i$z%=+UY8tti|+*x^p61{Fm@{2e`tgy4(B+kIGZ;ue|o$6{OQ zPw-;A-{IyS8+d;v`n>p@x}4gbT~yAf{E?mF#``o7Oo9xXM6HShj5;OZjpMutrtFgJ zQS`vsKw=s*Bi_JX%}dyFY>KjwjJ2e}|4JfMalkzOp7a7kgcM#o*5^)rWl_llvD1|q~FtRYh7x#$vMgXJb6VclDFlFrg&nQwQ>uA{c{83~lio`ic1VaX3^ z6LoqmxoXv&B@D1EIfSk|=z=xTqXttkIs>S5QbuJD38%OYw`ZR|UI&zlx?xTZQz|j> z6^~n^;zVk3%1J6nuWj(&bB#1{`C7|tk{2XyIi*Z`i*Y%tRiPFmeUU30p<6|%Ddk3e zpbUVu@Y+^K&!r?(EZ=BJoR}V%0%43;pf5;`&&P(9naK5p7}J#7oNGugvVu8sb^XG^ z5D_^YphSo9!*727-m^J?0fub`c{UY$e5wdNG7MKxbQy3En~x}~XpD)=j2pq7XCV(e zHyw2L9NwrT5!{Uw7s?m+@{BaVnQX&N#Z08x<{K&NRCq^}NzOQZU~ zez7mSJpc)h(fi3&&50ec-zkF-dr5NSbpoUxZ6+?8?BTQ z6il4O6lB%2MF!6-UA{4DJmU9FyP99?2aTlk_yZ$rJ)Pq1EUmK>V{d3|w=!*rxwKp6 z&u&NR#F#4CLPGV^*<~7;=mt(Tp#)o+FU21=a`~G%#F0btEM94F^iPx zQ6ec!8Zh-c!t5i)dB!L=+&82>v2%(X4WkO=kKNCP3s_2X^?m4<^?lN-A!C69Ee)nd zxcLb01CUC|*ZbKNiZjP{AwoCAT3FP<4*1Oh%y^^NmSR*%!@$f{gn0Vyke)wGrd^&| zOI~kpwvIl#xAEV(o>06+Mv7vIHIf$0srn#eliVM~E0n^J!$uCIIE9(APbQHTn4niD zX!V)KFs1*w6#o2kR&j$wtTH1(tS=HH{Rw|I&U!c|NmjRn;mN60(Mj@rm~>=6Czn}x zRhE;HF5EPR%Cfm_dHW~V`TP>rjI3fWsn}qwwhVy?e9FarQv#y}KD(*S=b!VW(1pMM zz0&gFKq2}9We;%2enWSACCQ6NnRF^|dYfC()1_}erSL!Y6E87kuA=U0YH5T&rYHH) z@kpMxr-IMcv`s(vxh@#8#$w;h3exMU28xM#MI#QuF@p(e@p@sXIN_Zi z$UaH3cRUa)^BivR2luK;*ce#gkl{<}W7bhDL!6?IuY99vMa_e$(bCsjy&W9FN1%g@Pb;^f&;49{W4tx_0@W zB+_@<(jr;tA~<3Dnp!BkDD^)Lq|qCSx+`=iMvCe28WVX!5JjWHJ-V+L$qkuM!zE17 z!<^Nt6b>kbqCX

z;ih)Wn4i1;eoglHkcb2hLe-<8m|oshMLYLuG`NXpaJ5A_K`7d8)`20>31sx$0Q=bTrpGe8 z+@$E!YK=4~tfv%m^GOX5Z+Y&H#wmSZEISzA{=l5upurD5YWj9#6+AK6XCGp@_<}Ab z6s2CWhP_4=;o3eY8H}U5A`V}iu67%-MzgQa9$UV;!$~RndliaIHahH2Vj7Xj^zl5}wx;Lq`<6TX zn{T?>j!T+2hAa&BzA<=U1m=g@6! zL0`;E1h)&@9<^)#=^Xqu9E1L&Z+?=1&ML?x=AU)4SjJn6{;HQ=46r_`a$Rg*=Y#22 z9P8L#`rllfJ1*Y>nCCw%ti$JLSJ$(PBrU2QEXv87s_LZ=oL4KFexA5d%zWw1PLN0g{{cn#uL`mLBVk`K?y{t9FX?{e8Ql9Qk;E=IuVN8}g&tul_*9`R|tbT6V}XvTk?kSoVO=%EF+z1&O>XG2Tg< z0#CtXPyZCb4^G2Ve6JxvgDO$mj$2mqoQpaA2Ap@EN2hUqD(bqBiXB-5i<*s@Oq0aY zj%d8(jpMzsRi=%qjx$TtTS%1KjNqLeTNW!88g)o=GE~-QUyWj}LrDpC6AUmEuDZ z<4W8!!c(^ss>cV&H>cyBT3`%C2ONoa=j!Yw9G+pQz4xWREr+*~u()`v{+O%o9DFA; z=q^|98dyJ8WN!IoFk+@^v!HoBsUW1y^jF&eFdFI}O{f|ytdn|U((E|LfjT%=<;D?w z=O-NI54u)AsSSCoO=z+QmaBXyRqtRVb@BZPN>XSF2JLD|lBjHYF9}Yuh?`P$iT5fz zm8MuqLC5^+%t9K&5~5GAwRlLYkftn5-`<3yC_wqJZH7QR{IZ3XBSyUcx=_@2i5n5w zQj#*Z5M!W9*QEBC!3)+q+AxWx9hlUQZNA`q6vDLRbswIoCQK9>m*FUEIfx4ABhX$c8yE;x8 zhvFVV`hvCPcn(F|3Km|UWcEyXlzj`yZDVWqX0#mj=r+luA_;3lC04A9~;=kOZ`Js zK%J3?@s;)EEAD>lyr-R+*CEF2->6mohbI4n2tR(B3LQmTesoUei}Ad@3ZcnEUO)a} zhIJ33#5mglRFm~L-=gi!6VY`N3Uhx2sR#EGjGWfjb!5dN{BE-ZRHJ&K$%4ATQK0+Y z_pQJAN$d75p}E`?#EGHE=^gZY;xs^asNzR@{&!$TsYz`bY>MqAsbYpx!gssp9$+>S z{FG|7_?VNwnmNoJ3BL+nb8r_=mVF+3#-S!#l`1#<#!GIsQ$c7C)ouM9qY#aq_`Q=` zikfVwjU0cEOlqY=G5a?|rUS+)z~C$s_oOjHk@}D%PGCRq zaob47nQ<-!M;muH4*NaC9HV$Cf3m#wnG8+*Ep)lda2yTm9WP3~aTp^$GaI6mGxvHH zB%%fXV=HH6C0Q)_kfCl0761F;1!=wXu-Qk*(DP7sZhjL?(M7BaSC^q*5DkJpevkbD zTGqTmk}!I0Rex2-TRGagu->EuVqompG^w$mj}_bPWOsMi&=!UTOL)~B(>KS16~aKl zSGQi<7&XSG=cXW;i=TYw2mTx#d*hfilCO0HT??`F+NV>%7iK)w;ywz>mR3d9t6#A4 zJeJT5(66d^X&kf?MyU=*6UP9^OhG#(2<~JVy7xsc1o&$Y+A=)*h2$i$%GqSG53MU( z_fpQwaSa+;iREO-5MyvS?Dsr2(TwQ%;!l6wT@0qw4!8jg8!n?UAJuI_ZUTH_RvtSW z>eM-&LmZYq{}ptlYHH{#rR$y&Sy9uit3?GbOLyu~M=vv=dADMd$F~CyH z8K|$iZ|oarpHK^ne|L>F@B#z2aAPBMhCAGnePse=ecF#U0*K{Pz?jZn7L~KkjS^2! z8MzM|fk{QKODVWoj`m~lGj(vub@xa4EJrzl7YROmL;WgRk|{{_5|H0 z-vdyoZfPeO1LL?<;C*Ar%KrUL+P}x!eN>*w=F^9DIPCi+K{5mKUcTURBH#?xa#nP? zEH^v&IYyy7MH_c&mxHfN4305 z-fn}va`!>%K(O2O-rPw0BmAV*R)b|8S z9&$^t>u)A2j{#mAbdiR{NZzzza~E>DlQJZ)$DDq9Wu99d)${dqp}*SN7->UW~(Zb~0ERmgvHCIuOGjOjBGEj7STV%n5P#xA>6Fk6)O{$*I9R1z! zGkp^#gazfDHzil&Z4ROZLnEiECj>b$-$_{(CdRJD&i66L8B8~p!1}$%jA!m!TNb;? z%t4hy{JaAmPBDk|mM5`>!A;ZimG~fr_0V@fkylG9R4c#{p5%A#QeutmPW04xY~+|V6Jhp1()IY+(F@d zZueqNPNS74H`dld=?#gaA)gzbyYvGk4PC7eR^3}n$=cG#Aw3IA(3nh+%8M;t#J8P= zq*gwvhS6u@i%H@NdNXsRCGZ7xre9OtlvP&ANIa48y6Wn&+FNDfLdCzE{Nk4^VE<{0Ivm?M1e1PWstw#n(r- zC})3wO^;37ViL5HwEh>g{@3pJF_=0>-v^lI1X_MErxWvOXO4;s1@u(v6} z*kWQ9ufl0TZv*}Tm;v5ku*|JMy2noq!yI~Fnd%O=O z06z1>-cUgU)&ZS>Q=!6)JSnsr2V2~3w6)K3b|la^5`lR=3YSEIfXPtl?)JF|+7aF~ z9P3c;Jm3-{m$Y=y5X6}hV0D}No5z|tFK6hjW(t~4*zPS?d3vJ86Sl$yAV({rf}N@c z6IPaBUsXo{EPUi#5=lbK&rW#Pg$YYrFMBSHX14b|Wf71yXsY^VP_V}j%yJ2+nK$pq zvaa&kExWuIq$T}feD%8f31pPEgx5B2L{G zk(`DvkUF4sC*>TPCUsG-;iTzV4vl)3(0?G~QNFWpw{!fGEXe|2dOeo%)NkcB>SYEM zclI16heRy0xn-7=rDP?NfBy{VQ}2hPi8o?lHw*koy8LPv-&v#5)`as3(xByXFxjXW zW7ZRJlw;hK3HoOP*T6V85iD2?`pR;`v+41i-v6xKbjOJ#z_ukIUA=P|HJ>e;>}Hs( zdw!UHP-F*1tcXFf`u|Wj{IvC)F{$D9EQBmLr)}5ED|I)U`hUs?!%`7LwnhCYu^+X$ zzD?B&SE9ZV#Q9f2BHv`>vnh5H*!}aod%}`lO4pH7(czqU*OMQ}E9cVb&LwGa3hKMI`uCAw^ram(Q z$5$L_n)njro{mf$1&sii!i`ZM?Is!Tq}=SDjJ9l7F8hu-+gjmTeO%d7KzKCJBuz$m8f9W5S5w?-E@% z!3)LfE8o^XtGx-i*M8I&_xr5^54ojX?4ms*c8#g%%YtEi=XxYJk0azrg0;ut5b;iv zyQjY4*dI1|faM%=GY5U=fyQsOWOUq5k&S3fbd`4x4J9igC$qy9Ed(QzE2L#W9@q>M zCoy!`e1q?TU^)$kkV^AU5N`f{H|V=Ml6cf~hj5droYpRjOxzL$W+s?ek)BL)cZ<=b z*r#~+QM|$;ryZXr?9P;Z)%2}AEi)&-i$X1yg{bbj!vYseb@wF6b}be+d6{o5_+fUz zyC~eU#FQ|JsvZ^f!`8*8|HL#wCQeka8__lNwj7>fV#V+3hT$0zp;%<`DmSe!X^+w1 zNaD3SVRzxE1+hRy>oi0lo@O+$yV20;TVLUR%+KqSt=G>3{>_TQK3gIazn*lFOFrCZ z*Y26A4E#0Jm?6Iyd{dYiMu5Xf~*i zh2tNox|57ZhXAPP0waxxvZeOGN=CRV4V|}$HR~HxCXTOwfU9-Sw@j^PSNIJgp(s6B3qIIZ*4*7m-lHGst>m%)?;IW4t zPLf7%|5g*zH_uBcpq>r zLkYe%Ci?k%1!q&@wu?U9@*ql^-bTF^eAVu^bNz2T5UP!ijLj7)Q&~6jHb1}5eA4|% z|Nkt9{73qGbp8(%JRH$~BZ0WR)6jZq#?ce`|M}QYi>7)+1uPc~Z41atloR8Qr33n< zeOHMG%z{iS7aO(F1W8|>u$CWiNm!*NPXJrk3S@LL(D*TyfB;|_Hc9ARS`z~!=1X); zw8+$tfa*cUNa4>IXsYQ$&nKh4qi@Y7fq;{R=yPbVfqekIZdnmQQWKN;& z&cOSj=T?|79{~L@gx98UZEf}#cF(s&pyAAJ zuUKg35^Hl}=xZK@CN3Hlxe+vK^h4rzyHsv(l_kDm6|+2`%t5Iioj?mtHwMa;hE%Gh zA>drX?%Z2b6W@tE?B|^l2q#YrqF)Tpu<>b| z|AndQgM3q>c3Z77g+x)nlM_5ss`26|jjl$$=$2dGijjTjIa^?;I6^8dXB2?i0f>&; zc5mIU%g@2YTTYmS;HRfKS313(63BZ$oyoB2IL|fb!h?Q{9={hM6WY(IW^f)eBqYQ* z*5%{_Ty~}huEZybGbI*VA73gxLGkaK=tDQ1;$?;ig66bZU1kgx-&T|nJI=*?SOQgJ z8{A&u-4)w?pbMhs#XVaVq`Hn`1+9p%;c= zbC9SL*AJEIizCzDdd?VP_{Y8gNvg$C_tdc!reDC-#kZ|CjSC%(Pw<5xqkNa4pUG*R z#S3W~{DLnBKgAd>d2S~ntJn;C{-&nNOM=*Tg@{$l;oiN--89p>WDoBjH?@2Anw^0u zeNG#>d;yI(%D!w0OeD@jf-3B+ow8$0?{V}(5@8;&>ildaE6cl88!Md(L)^omYT)@CRwj(j_ zXx=Wa>3?{C8%M!W8EEHJzhdJ(m;LbO#gE$;tgo&9v<>TbO1Mlaqi^1+K}>A}RU-x? zE_qdL25r1Ev1Eg!FoT|1%zVDs`2}5deYy-eFM8aQDHQO3CBu_cU#EN zUvJdr@H@kT4yxw-OH+tlwl%r`8cukZa7YFD^>{mz}MwZo%o9d^hq+s%v> zcl79XmJ1Uik_iI2OfQY~-Mvq#P4zStTadi)Htst$!WOg3+j*Dxfhbv0A)7I5{VvdD z^{4a;p@-EL`v!*<<{^&FrO(eja&EkNZB0wV5I#nqx)!-d>g!tk3A^qQj<}-#w=gQuP$3nKYIe*n3s64 zmuyoMq{l`V@g9?2omO1?1UW=JkB+#``-~7mlPO6kkLqF7?u zm#3WxAA{a0`S1q6d`dRgBWQ}j>9?9l7_KGy7HyDvzg>(bn<+aquw9b=;SOdXQ_*$v ztCSCX>YggwHzo*XGjBIUb`yW?dv*V|>nEj_6h~RH?Bh!vLXwmP%&_<(PE0wy^V}UG zy*{=_L}%hplahgI(N`0hJh}vtG2BGXv|cHDYU*;zTR)Uh>^H6!B~h~Mqn}fE?}SCT zv{nps%;FpP^gG|IVWd{Qbi+&{K_QI{@(r_dun0G z)QRQLiBne0XRkk+{kZS}zuoi|n6v;WNX|)vezCgmckn#LG|BQCtYjOwLH9=th@bGh zlj7adsctQOLlwSay`T#Mva{DO6PZ;kOl0D4P$PIB^yCt=q;RF9T*kH_$aH(SP(PGDtLkJiKO=!v%70yH2 zWTJU*R_<)ZUU^!)FRG>R(Zw!}EpAgB_)n5j|6|}0aU#uV3^}*O!so|!NLTg$%UB$V zxvznDcgEQX)>{=p?p>WnS`cLt{SUwW-%GjXMVMW&eKkT8QbNiJw_;y>kEr;hy=+Z9 zkG!YqqO8zxtliPXf}7Pakz^dQVe^SBv$b|K!x?6B)k&rU8MW%Avv= z41)x2Uj3}^2wD>CFp6gjHP8TTB)V@9gnkMJKuWaruxONR9l(%6VF-=YbVf&prhxE< zGbx~_7eDZRpj8f;3u2+L+~u}|L};;lR)QVCID)Os{o5>DkqK@`k@Gx=2bKPvET{dk z2{Baw3L6O3-h8bizz&|K%H12h+CormOiO*|v4B+CrG_lo)qau>QPl)#UEnl4dppDQ zfUe^_2LPw%H_q(cK7*u{MqRANqggp=p&Vkwi(|mdf?7tsPJup>MZm)Bx zl0>>3k6bG4-zALiEgGF_x}ljeK!;@((hylZelpG411>3T9RqPPm8pVHOE0(oeG>P~ z(EvD#Tt2*6Bb$|JKNqr2zGj8NW%KE?@g+po=n-!ct~Zvo@X~Lv5RS)Z+X6QJ23Bpb1vu&ZIC$lvi!KJr>2;arQzVTo>6{ssZc3GN>kWjVKXKt z`@JfguDZ8t6#jq#gMH112 zTKQiR)#`S*=mM4YGiCpYwu3cEo+QDKB%mXkUpQnriPep)Wz)@vl3%B7>@Y8$f4x4u zcYCCI8b85iv5@NeZZHnS%r#4W+vgvo-A7#eoWWG8-*(fM84*)(?HI%>Bk*L1v=sP~ zQ>;u$PHJz&?(XR?4HG#7Pd)QEe)~sRuicizM<*(7yjOp_c3AXhpoq*nK_z zJKY1H$(%x5iJiH&T)=JY=7uevk&b$co9m5?x$5^_Z_-sm6J^U zyY&{UINhRZvlumOO;vM?XO{dY78n8?4D|kZuR?{(LXL!gr8?=@ zh%FYdTl-6w^VYUj9Z?iPbkyEzR`NlW*DPuF>qyV{0fZ06DsvETRb@)mRxa%o zD7=x0orh|i!K=r~gI(c|)-I4rC>4`##Xb*#=fSLXqh99gxJ0{qQkzOe%87L`J)21C z$)oDCr5tcL#m_$B0o0?Fyp)YULRa9K^i%6T3qd8xJTRkfZ5 z9A62@o(u!&?^6m4-#K|S3FTgqIsOi-VW^&FB9O3?Ytt08LCvN>eJWJ)K#1)n_Fp^oI$#ZO$<J)1#EU>*E3H5`brjR|- zVHxeChvM*Kz?d@`7Xc-}ADr!a&gcL}J9P9GS3E$V9IG55kqHEc6XiZ%l=ayF{)5k@>u-aR+m?bjct%Nmh2Ya))5J68HK6M1k28d|xjag{b zN#akU8KNMPP{JBzBFCZuJIHjD#u1m+xn7TRD8DqCpEF9xTwIW=MCAME83&*ab- zhMY@Y?tRlf<~Aial5B*Se+&?JcPSZo==w((z29TRt|Bh zu9N1g`m~LBP!Jb;^n-~$B>#Tm{LuC&u=-)b=&I{#$#8M(`ggAsheLe|Fs~A7Q;Vs& z^q0fnT)%6itM0V1?3O)`r>RFv_J%WWwU_Y2RlS`R&-Bh2CEbJQW6-QSkF~zv-+pG5 z`LjUWmt7&(X^~K7H*cPI9B$jjYp(g%Kd*?}G9wF)DT#62W|Z}S-y`#TZIxH7wM$6x z{`5$-?JC#~NKW|a2O;2W0yZ~vQ}fF-cgL*<=}*blSpF8q5uX+_pER+jmxY)K0!UtG zp2&Ksxt~=s>OSfz*+g&&c51w<3%`CRRL?X?WQTo-jV&cn!vkr5YFENjCIht~%5gu{ z%TBC6v?4yQ7Ttw>$Xqsy{M6amiAX@TJ&bf;&-z(i|6K@N8l;^2FhQ}8{mV;fCx3K;4t!x& zwao_lnmn!$bLr!e(MjY18h+EC|&eBM;q`H=K*q#;|>=o`uu+~YlgYQvoKqhX# zpSh0;3}h@UvSVfY?sX%`C*o5tInKLuR$J=vCno3kJkDEPIvTKnxx{-$Eg{H(d|>#vFoEbdL~LJo@G z>xurvxIQ0BZt{JudG0&HwQ}R3i()7wyM^vyDYi?#}q ze^K?_@oYY9_pMRX47F+}Mr*6qs8w5QRqfilHc_jHQX_~2t(uj#s6AqkD$MQahdCK)xG)S z^u`f}m8yT+`cGjc`3mwku%b``%-EtNtLAMvK?R`e6YL`iFCYC)1JK3aQ(W3f=t^dZ z#%H43w@wtoFi1O|=eopb)a=Y<+1{nNNk$)2LQXa>P8V_i<+qdneVFh-^SuBQs_=U6 zTXAdV`p`v*&cn%?eons5ySz4P?NF`VXACYW%Np0pnb%sHlzC&qM$`GAdV*q?DRL9v zLa0%%ge_y~W&Cxw{{)*a7kz%RjTPR$Eoj?J{v7^s@ zD0>#a35QfL-xs*M6*g&Mp(L95iMK*Y+g$1gyC7~%U#zudfcV+5=hF>l0rAGQs6lhV z;%F9g0l$tc6{r{bwgz{%x4<=ZyaFpuXxBcP=}Wnh^(}r#UD_%Sg^J$1@(Gi$D(3Uk zj16=0ACj??nASb^f|D=86!O5SajPQKgam)L$?3zQEW2Zs#^D=vyl>fpm^yF2_`x0* zw5{O|`p6AI>-p^FphkIm`jbk1WdLTO?R<2OmTaN-zjF&i18FVhHvIdS-xdd}e{qA= zVRJE{ibl8eYlmzA=x=zAIllR;TmAsNlvcJjpgTsIrfkzXnmbF+fPy^pig+DS1w2RG zv7xmIe^Yv1_EEd?Ic_vr44Z@BWjyi(iBX2xUpTtPB++jUtv)vYQ@4YSl{c_s6DQV& zZj6V`953V_QHAv1oUQ(zi{Ub+<=#1vq)lLp81+s8EIC~r+&gy{xq}=djRrxZTGnN_ zH&>?FMn|KA#x;`l^FmPbRaGZs5w>nu!J22hccjGGS}jl4nY!*F^|RgW{^u4z0!ENX z;eo@R-z^O!EUs`;bZKA1gs(SkbPOE9@%*&8Y^gBqZSkWhT!jBL>VWvMe*C_enJekU z7lA%W+=iNfPF=RQl^AC)M3KL;2a%N=a;{34`&0iZUCo;@@%nfeUcB>z8Kr3>!Na^& zpWhvp!zMMe+Nyrz@CTf*P!R9bw$wI+U@LjloXSs zwXcuti-o6(<;j{=Z19*EZ1`Rb_O#S@gt{tD?6AJX39f@$Hkj_e^RA?%42S@kh*Uf8x@{AW*jhb zJ`z1sq@o+W%cmjE^<}<&VSA#hfz3mvC4CN^r>iJT4Aooi zpOmzDsF^KA;^}I5n3w5-n)h?`r}22Z<-POcU?W|AqYnI0T#z8VI^r^k|@gAzMrPlDcI4 z5_Z(u=CLz==`W?PA7@RLrk(pp&~jbI6?=x3sDfO!q>q|U_ha;o?n$9PL@ZR;;coX# zSDS|^{GkB&4oCuccHit=OW!9Co@y3 zJuo6G0{U^8_8IoeRakWQpEfWu&SVBQ-ja43f!nRtj45A})iwyYvlO85c|!&Xjg*^n z+Lm<&VKu@8KCMs$L7@DhtphEH=L>^#)sNm4Lne zo83N@v*SjP>s02Y`%0&-woZ-$!|k^Qa+yr3Drjk}ofxdQ>{tKTZPuo_q?2_Qv>kMG zaXX>Pn9rc0om*kXf{iIP%){Nb5o}Y$oM?xj=mUA6@VP7Z2>fj6q@J*fU$dG{ojii^ z<<%#%W9P}(mBzT#xJ+9v4Gpq0JL>M|2G(hc2Io~Nfe-6fTkPQCtUdjQ!EN-A;U5ld zr(6U}%g(dFt`2PT#A(g|uYbLO+CVBu2L@F+T&SS#JrOoYedRvon!Fqvpie5k&J?I` z!3`YdPh;mR6BaZA746n@7>Wx^zGjC`4ijR;yh+)a@6kW824wG^D0-WYt)a`Dm4Qso zQUw01*)96En|sQlcKNp?+1l6%rdhgx*Ga9uw+McDqEIjB7>;V!2o>D_diMcx&7Hfo zw#E9?+3nc0tbCrQnSrnMt@0yyP_JeqS$4VllCD|iVAE*_R&p%fv` zkHQ~VfMeg$-v-m?aiUjG{JEJyGUY^vw&FW;M2UfBZ-iAJ<_RU?Ox}-Z5{YY2OO3$W za+rQHN(oXs{FdE%)J^Ge#KE{q@3899zHej1-z5JD3pu(u0IqbgT%mE_$p~`0w+t3s zaYTO)zNa&7#4B4Qwg=Oo# z%Y?9#fovI<7ekE)+HgICPmIptidU~@kbsC=W1_us;+)apDwu%ZvN5Lcvh#I97NkjF zNa56aNgT1lcru`KY*rTA@uBHA?}c!HV9zQ!*i7|0qszASR(Jw_s2P`E{iM|(_}78? z%qK?hZMhYHDE&by-?FYx`5k%l(Tr73F?kh&eCGrt#Jg>#zIwf2H-F(qQMo)afS}U) z=BSAJ+o`O){=#U~vEs!$wZipZT7@KGv5!J{e&ju>W9|xNaUjEIch!=Hyy#=q1^)L&qzz=y~wIn46F_5o}oweY-t zgtRAnwIKg>7t5PthRYDr+vivI$U}2(NS(a1L~|_>r@ys%MvcywnA4pqp0{}+~JE+xa2CFXuEfK_Be>{$E4hztM8Sm#>=3ZphO@BB8EL ze}FsjAl1^o(~i%jxO{APZRAqd_-@tP(w@~LKj>lQ(%tqncRfjyj%a)^a<<E3P?{|OZ7WfPR*m732Ha!z)Z}}q_I{B z1SHC1GZ2CnTVb^Yq=mbZ+c2e4+r>$dw&-*VZ1VZP-aebO2rVk^qrvMu?U{;bN0yFc zTSI01L`4N2FB-dQi@U&P$)|7J{cV}tMWm0-QrQ#|m6h9U%vRSbyntu(FYR(0!1DU& za9}_Jpr)!KWS5JUiA}HJ?^&E)zr)4MAA9FYu&rmPE&nDHuKx>J+z|`)i)m8zT+F*~ z)|rxla_38KK1p>APCjPO>t%Bg{8fN2t+nK8s#ifF*}7~zX@3TNQ#>LcIxvk#Qu?vo z7hibB$Qu_M-Yx>1#~;5@>ra_M434xlWSReg#m93+Nsn(E27U^6RSmyJ7-J`<5mxe~ zwa0C^Us68lG#lfxn->g;rQ$lk#4?P|esCZ;WUABBhk$!&1ZwC%isBuSfpUp;WVXEb z$`K?B)~haNa6@bBK1vgsgS2!Ng#H$@o75)cHaAZ&~bb$jSy}un{BN&&=&cz{{AAGkr z>fJ|EHbxcOxmjCJjpiXf`=AQG)D62A-`9@}xD^GePdbeK_adULyS z4$1dGCH*HWApOQ7Av?%}=O0f3c=1Lg!D}Ia2T7~wL~v4jC&VE&$^7|cu#>YDYn1E& z%X|{D?-QE)wT|{a!{749zEC16*~rD4?sjL8vQkW{@|Pg2LQ|5O@121C0)dBnT7CY;YPtww>R6(1?8rakuGXZx07yQmWP80)Y2 zQq+By;b`a~dI)_#(cmdN{`54oPY9`8^ZdZPMn%Z}#V(Tl!NIo(vtgn4rkFe-k zfmfKJwS5)J$LtP~p801@rjPg-?(en+uFv8TXCB74&fY{1y3AJFMa8OJP0x0K{RX5iYF7;sct&o1V@U4& z%i#@3Cbzc_E7vYpxbe`|{Z#MuExGL-^NGK&Nk&VaOUqZNTW~2P%X44S869Z#iag}m z`AwVF%=XUFjZ&gFMp(|50Gb$?faO3|jc`AX7d2b1s|6_{%*Kv=B*Vv*0_nF8n$L*^ zkCv|R^*~x{t|!zwA&yuw2)bD^(xDw(M!P1Ex}&1pA8nC-+hN<`Z9YRL1F1-+n#kVY zgj`ItHQjuR%# zdBdY1+`ZDa_0FfUd=`hUlRpY;NXevL_x&3g+={=$)F=wx<%Gky>dxjFr^@xq>D?xI zfHbb9!&-x3>|n++8+gYrQ858xTA-eeps+L3ws9| zPWun}mn0PWSzwXL1g78Sayp=*K%!)3C9>6&- zVaDe0)P!AjrZfVi$LV2D{jm0dH8V5# zq{q;E)Y|ALSUt6rHfjYl1gk~JCnBf?UG8466?$*X{_t8LIWSIHnRW_K2Nr*IfD73l z_$iL^$Q_hAccA*AOT7?08(%V_p1F$nxbl{( zKQXA=xV5;mujKo7m5R@nBRXi9Yjs=+4^xZ$8-e}mQecVoIhNp_S>_}}^?%vD75=uJ z$~bMZV>YDuj}^#k9PNRtA!*x-$+H%+R30~nq_UpwzgEE>j?%O{7`^u=C5`Q}O!m}0 zc}=`n#GefiHyGPX9M9@r+XJHaElYI#F@qT%%=dX zhyd}k(u%`kWNmGC(qiOL0yFzUS7#~q1J+CtSl7HudxG^)9rGs^C=V&^$0&?j<#E+w zslW{8pBl8U=ok$6uViOd?hnoMLRD1NZV~LQgYK_xsVb_%zqpGj@~Y|}eV?p)uRHt2 znJnfi7(gD4CPXYB#bvCgx7%pTJ6G(718ae$e<}_|M6S9Vm&hb+1T+3T8bjJ6=a#k0_LhMXC?I z*TTViEjv;n1ewX}W1NvAu9M?!W6+(h^ADes9y0jD+lZaEPx+SJK{=tnn>&Y!k6%t) z+Js$(;P}5dZNpM_9bkXWH(a)KrRQ-@BKGHhtXlS~FQ-Yn(_t}Z1N<+CpV5X@@BZ7S zNj-qDm55yFO?HN8@7n(h#xH?;nGS!v6G&t%nG$m{ z5q6p=2@f}|AfxC$M;=+$rPNX$=b#0!v@1zh4AkkdG7k-LT)T~F@gZ$~r62iYvo)Zp zHF3Ad?uK3i`9>8eS!{(^cqV_PUP);P5--?L8DT5B^MUz}@u{agHuI|7mK0bkANu0wh0MoCOUy9=@EKHT5Vn?+q&)>@Mm4RWS{v~^rTlq z<75hSV?jJgNoga4riT#@l8;)bWRm%XVq==s^d22?%a<_Em&XVw%)Vl3Y|#jC9Z>ve zigemgJUY$eIeE>_yyQ&r0r^_M(UFd+EPx*-(=Qpzm2&rl*3!jU{&NUm%-S<4wLB!8 zmlta4Qgv~n{{}rm{gvquvM%MB)yE#f>D^=%pur8oHoikkU;J?B#B#dcjhNk73<*A^ zYxN5pnCKrN%Ye*CjCUPKg=*Ni0;^-sesw;g2JIys!D6EE_fT{3VN-Jbxd^wuD@{Fd zNJ4nZEw=K}#3ukBD;^LnsBOl4Gi=N-b7rHh)}%}apaFo-ir>F0@G2HEA4vjD&*Y~fmDpM8>TCdlA?7Pk zQ^bTIs4mqvad6>&hQ;{{FJIK^L+8uC}p_&m0&p>ynRl${nB& z=fiAFAgYr%rXAv=`};Ir#wQ(howE`S#JadI&R-=K`afh%*piQncS3e<(Gtf0s@Bd< z3#TsJt|oOD`v!I%nqE>S|7`f~QZ&g@jJK|k6g;|D5$fmG!m8f=F@fk!kAd|10-jUD z*$Ab-V^ZVXJ+-Z;n_S4#>1f0^CBxLVYdUYk3LXnpyiBS>!<;QqA7}LFZZ(146I$EV ztJghHO<|WDUf_^{L~hD$=0;<_5um8dG)N2ks>m~PazDoP?Sk%+9qnzgdhnMDBCGk(m` z2}}SghFR*L)GKQjfQ&@o-$NBEMW2DM5Fi$@Z*`&G9X86_K_Sdo)~hMk7Z<-h*OIlK z)Z11M)2bAlvJjXNrVCFWdn}1>7SKb&a_Daaz%m)p?+|q8LC>~+oJsE^wZ5}BkJ^dI zytId%k~L2h$2NR^IviWCCu2P)6V_Cn1F<({I;?ovtkYpc12-^AE^j5xd~wFxz-RpB zte@zo*;`Gh^{?2-sH1Hu+kF)AGSboHgh&;Al0}%CK64Ylj5h#y%?=7Xyp|_80kzEs z_cgb#6=vun%H&*&Ww~be87;Ju!haSA+85unHBLQqDh6{)YF2xa7Q~F-7%z2wUVlhs zP<~t%IjgdJy;JjwTjin)WXME+t=4GJy!^Ru=||Av@LGev(a_3_wOkLZ9P&q1)lJp) z;9m4!8w)t-Nd~(+Yj99+gwuN-ynJ*!7v_q8{wsngX$YN6`{Aj@)+@?3$4}8MoGxqj ztHO!<{*d^;(nrUQs-=}~bfdIQ1h*hvCDM;jDtk@8XZD`+M_1ZmYEEpVNcJ7wpm_XE z7Z+a=4Te|%*@=o(hD07ju~K2jl<(g#@>y=~r(2Coa0)Be)tT*Z^rMS`X0z&Ym?;CTJ*UA8+)e`zKA->s z9`Rm9dAgu+e!4Q>;c1(5+6VT6{IQZ^8UkBs`FfElG?G!5M9+bT`5UHeRTmZw#Mh(_ zw~|mxZT$#%m)K-0Z`qk{NF5KrE+o;v6v!L(9C^sp9YU^F4+;gO2dHP%aV2fn)jD16 zai;l>;oyJ1N3G*3*nT#z@e)-}bQ;~<)@EYwHWOKhxwqTNy|O`#@Z0G0XejhY-(!nN z%iEchi=F&I7 zRRYe6ZqW-5`kb$ZG2xVugNw)1`;|JDOTF#dMamuUajSL6$}_mrZfLrDw*MmOBr zNibNV{C6JpnpEpTvCi7qL`nu1ZvZ^%#MRc3PIjwqER4AjaQ9}S@^G()fvUHY2Ifgi zpQP^#qdb-uAdBjnb3D=X+!1zs&8EI5Moyf9lO1^1-3O7Lxm@>~kC? zrHiWp;P51YB*HK`P`9H{jJ?J}+*?#;&frfmu5fj?mEgw(mTkLL;IDVOq<;{1rK{M= zW0bCk_Ta_8aJbB5PposFMmjuu?T@-g8Fa$Ha<(3{QE(t1cz(PwU@k9}JelH$IPYAV z@vrevK5*DI{>CM7jWqWU|1#{85;nUe1vN#yI5XWpJk0$s&QK1gXwv48i=L{->N(fW zPQX<}7#1%EnssVQN<TeJDpLm@kNwFnc7Q z;_P+}SyOR7al%WhVB{gX8+;Y@N#8)v2gG>eM=}9@cxki2puyrhxMv$DvwI!&$tvrL z*S9}6+3!zHcwj98rPR~I?DBsh7SgP=Yzxfe)_BU{Ja^Uzt)+i@oh{Co^Spk1Vu@up zFYIgDRY#R5f79Wb30D(H8RK*IB-GFR>~MCIbG6@)J$|ih*hv}cf|Jc=?jF3Vyz1}5 z51HDy#7^|saGATKxCbXfommsZ^60S>@vezs@AGvLu-(?ML;FRxwT-gL{LVDn>Dqeh zkZ^@#n=U+hx=J3Mcz!r3Q~Sca{b2NH;Eg7v!_ifIE#c%hPb3WUti*A0&8Xk%{Fd=D zmt?2d#2P!8FG}Bptt<2Iu9`A5#OyDaKn~PxZC4dxo112ct_2MBe;Cbkx-78lF|GBt zM|;70*Uv>m%igSEd;}ic;cIYu#rh#f&Jo)IR*u@w2^Cg*@IKodKDM~PG+=q${%13C zC|mVY`5uni9KFMKa;@-&A9FE^XIv{Fj0$3$KV&&;UEW(&ad9Yq5mynOW!`-ZJ@%~B z31%e($gQ`wn=O?f!N$kg3%=c;15WEB@Do)AP1Wve(KD|0 zZo$Mm6LMyG2SVYQH^^?uc+w!h9VS}L&TAEO5PPnV9)!^Ua4 zcq^!;`gkL@tt-ax%hQWu+bQhP>$rgf?5GQd`SNmz$kXmIM*2^^at!}N8b0P-oNE4W z1^;^=eL))|JFWn_l=-n-(hP`;_QScBeQXN$lWxRyfnUFrKhm;;Ij+K=X+qtCg>WDK z+#fU_pyeM-4~XMA=P#rYNy~`0!o!0#Er6$hMCg6jV&qo4_7w9uKXquMti?g`KzSHN z4iwio@&?*h4^_eDD1*sX67Oma3V_YYMceYtOY4@uZsBx30c|^I39XIPv@3=UaqXH4 zKo0w#M3QCkGUGG){I&T+=KCZv=GC+s7 zmoohk%*}E0q!H9C&Y>XsEu9PW2Y3Z+V}4shY2fp+p4{MB1RsNpvfU$M6qgn%qC@lhCf6Z-p)&JacS{XUK zDmANB)Yp|tYnzJFriA!(CozVVtkO97Y}BrUSfk%%dt$#n1Y%WdVR^ppW|CSvL7FH`UhsDs zRwy&rS^BZWk!1^h70yt31Tg`K;dbW-Kn_JZne;H*(VFC}?8j?1it%Aq?LzLa2WW_*_o{D8B5_QZsTGynTl@0iF74(4DP7ikp_;x)rTv9VrA_^v!Bf$LCR##51!$|NPXJvy6qGmiXUio zHt$&Yq)DwPyk$y1_!$VQ92Q+|eMnvM;O8Lb*n>AZd$Jzq!6o-o<}MNkDv0Wj6Xm^| z5Gk~0uDRVwPO$gG`2;L@gCDoR%D^`2%Fzu#ECHQ%UAG^_-# zlAT*Z0qZQ;xi>cj$7|%Xo-)k48IV?~E{wTD_JdqjPl5vnArTBo7cSdRA^z>S`kEKE z4!P1kdUU>jwf;5CaeSv%KEh&>bg4EY%ce(;j`;|0LFZVSQ&N#=!XokOSl}K=E!kr% zn>|Q`{#{UEVuO6c_`Uxj75~A{VK@1A#zQ-0r>gC^UVk!3^{zQ{w|y}bA2?rE(BkhV zgi8B>zB?%G%1I#4m}Sr_;{QnJ3HzlX3WTToQ!6`*xe8@)DMG0=bD0QT-SC;+gastJEV<(wNvGtNerwjXBq z@q?g<1e?{D09SHlZix(BE$RmYkUfyz7}p}|-p~927AD6){bEZb@VW>xg0Fp3+Cv`2 z7>UT~a0>-1!UNuCe1f~(O(=+A?UJ$B99|rQ4EW73Gj~sLu$q_~Hx4Q6U0r3+jgqz4 z`6)uid-R*uiBE&a`mZyxNqn4tGAW+B*IE6y8oN1miHssY`SJG3T+_sgDca(%SZj6F zopG_hDn1g?Cl!bsv9;34PA}QE{rO1g=;84oo!}JJH4gRx4WqKKQ=Rz8 z)fOGH(i=9O{jzMKL~UM>3nWi%5OVlO>NIj3@Azdx+~66qi+@T&cn0+@lY6FfDo|uZ zbOoua4jfOlvyfUY+4=c4gvW_IktJklSLm-&!n^f`h~v1C$lGlPZHwD%5)C8nop>uo zT2m4{N_w%q^ynonU_(a125~WsuW^YWsp8KYI|{~#(3R;e>~vn zFWI}Le`(`iaC}0x8xF7)nZ-Ekg#77By@$2VihLPn$mJXn*wSJRQ7;uV ze$B|#TvV#h1=fze*ZN7>rXagVp0zlsZ$=R5To}6UP92`xK4;a9q+jxEPqIg^Er}t% z@(~mkQX36jh!dFLG3dGL)aeehJjV9_>A^ zP{N@fyYK?l!*vgP&ak_#D82K|Zk#7;i($s@6-1Q_Y zelkS*@Y!?p8b_=e`&E|qgD#))o2|vMb@?&=t;7=K@i$+DMSAv8cU;YG$qnJ5#x<86 zIU`Bc-ic&mM45tMmj?9i6G`Rj^=^_b2A3OdTP`EhPZ6d-zBi)u8WF?8bzI!EmOf{# zdF4K8oXR)1@Z4AYyU5Du^l^z{z^+C(5+(e$hp`1oG@8G}-}z_5^kzxwvkQ}!U8d9q zzK*hz@IOD*qF%aLTu(&W=1Y~?k4qgOZiyq}1#i8YwrBBzdn)gh>TT^D0B_hf7vF3P z*ZB2Yv0Yt}A=tE!QLU$jn5?K-`AZd2rPistW7YaiI{s7t!acA3U&M-fRc5Ku6YO0olx8? z+q8_Z{O@AzKY(JH2B7COvx73luH&PWmV(DA2|EvrXIt|J8Ua7}DboE0m4RI7`x=Uf zr+c!yDG%aWuIHo*2G8D0x~uIvZ&c|Fq=^EHAyZdH zjY+bX@iY@4-U9^3IF_Ogsp2Eh1dNN3Jr)1RuMEiUucBTNo?4vO3qljI1Am<1+BAY? z=YTeWAAr@{{ky8JB{CD&y^4WWB4ir4KNY7*Gs{(Xvaj0*NcZn(763`dX`wg*2W0A6 zom%K=FZUsv4irMnItMK^c0;T{VOQy*Aw7^377~N!NTP(s8sVO_XlCQg-4AfJh?(KU zv~IT4Wvxv#LlLQSk-!@ethR96dE_5umeRSWlHxMxYyOuyi)oTg8K!uF1-Y$&L?!j2 zqwf0|bs%58~`GQIr;b(tL}({!AyeO|_1;V_cnR2V#7;(C$^^8G%PN_59{ z;nyxI!=R%cmp90hBP64)PhPWdJduc>)~@X1#c;ne;8vNLrJ>mrdq_m1Vx+ zvWu*`MOwy)V(#P7-;3DigjNNUZ2>$B%cs)hG{Jb?a%7p0m@xa_6xK6RRbOLIX4vCx ztr<|lbA+`4)&7;E4ft=7Z2N!`HKqn7>G zRz{tL^fKkjR(=BGBBOydB=RY+j^gX{3SNonAcPD5+N0w8^R-^DHhdX||2XY{5k%{J zF_A|mmb{r(a*^~uIhHt*@O#u~-}SH1}?dzvlRiO4QaIDF6G&U)PM)P@}qG0Yt&c&koqB|GvE9lQp@V%x;|Q^ zLeaNbv)gQ0*2^OZZpXT1!;S6h&R_}`$qA5qL)n1R8f&F5O69%E>Tehpr!yv4x2s`+(* zZ|3+{E;PL(I5L#e#y7p5E&bQ8T+H8+jUAcD-VTZ=A~_zk{JP>eYDM$@H{PxcZ)~(# zlE{*?zQxWIqbe4&+qy8?H*hebES1O6Ci;7OV(j@k?ZunsU8i$$svu6h^Q$)@=DM#c zQlGbmGm1FDIYW;>2Ht7*?E&t8m+wqxy?Pw`$D?5OM8OQhDatpOpXs6o{#mN9wuu-> zL57;qVWcnlvQWt`dHWL<*4|50dU~POW&Ll3M!_~UcQhdh_1y8;ox{F!A_Xg<+kQyn zG`pVcBoa+HKrMIk`S{_`tx~XN-s8_DN>f!A|J~^RTlX=4l?)#?pyV77ApN4>AUTgc zQskVt$c`rEsN{uO0fFWOb=NUW@#U1K(X-^ zovshC81P0<+g0)j#|Ol7ZC4;e$E#b{TmP#10>p~>$2!6*`O}lGtD^aRwC@7_0qv4K zzz|PMuEB>C{C92y!mns?*HbF$ach{p`N%=>iISd{85)Ha1aiuhKVetD$;>B^q!ph8 z;a4(e)et|)w-T=46Yx7LD5VQIi7h&iCnTK$Dwg`}DQ>BcG{|HYHEP~e^fUPaOp2$b zxW%nfa<*X3NWSIfcGqP?<4Hud#lgVH<#|B>+Dnc?&?(JZJ)@Pm}wg-{xJU&xYQBzVBlCdgMDfgEA>A=(S8S~Cp z_?*2Zvr7-%+K*{4Ka6oG(1Yt`XL@JNlm&HSs^x}krF9?QQA#(hnIux!Mczil;T*Pg z(R)IwP7GfPVH*j5OnqYS*|f8V!$*ysvXAnlTB)`9&C5rueXQ)FnBDtXlHSGYMF~Iu z>EG5Q0p9V4n4?|ct~pQH>A7i=IXqI{9ZM34`DaT*iS@f#q1h~vO9Vx9Ae*u{kbh@! z;H@H?y_whiWA?b2w4j?t#mVyt_s7>cd-9E~(qwgs+b~qH%olSuF1~U(z`k`{KsP-b zbvRb76RXtS!pV2Y^u7n9!N=F|c*Tx17|MBS##j3nQ@=B!Z5WiG_VjD)Jn8pkDspku zIo))=l4^4bH`}^c^z+{v73a8#)p^Tv8{6<+JJfb}{I_`};34KO=X`fFME1k~X{QNa zSU_n#wd4%>>{(HVd#JygzX@MHpEF;iIGKM-j~-m}2}zrtd7kDhg?c+<#4Fgv9F)A# zw+&(Q7@{)!oz4Y4T7x+oVc|fdo0Vh@l|(IS&G%AM8OY;>ekxS^7?ZOip;-ZwKo*i* zV*0-Dvk7mI-j(%GRTgrVDyUOmvm6kr(l@-c!W@07xF?IxI&F2Rdl)I$^|O;+$N6EP ztWA*AJKivje zh&0_P@R4(ulLPd-o@cl6w(nVKEA*H`s(fR)qW1vD4nv7+lrCO$$jLta%(d-o{UgBk zQ2g2k+SEsIj!=Ji%yzDRUgh7P{x~gamyy`5=YZSZy%ROX4*9E!>VgbO;V#`w7hDCK z(?f@7ZI`4jz{Hi!2$;IO?M!^pTZpF4P1 zUi65ADRHoQOeiP=v8U!cJO=YDyI(aoJ9k3Y_0_0szrA7Ih1A;~uWnH+dx%Dc(+RW#wvJQxvOeI!%`ao8|fy=*U!TjV#W4sF+e$5U`Tg5n+4+W4dqz0#n{|FP|un zIpWEjoM^DoN-_z2CAMj9l&?|^zA_hlAuF7k95k+?2G5QTtK_ueO5E(4P9x%fCB-@6 zRk!_DFc0}78i4R+Mtmqg<=mNr;@&iD|TFVPP3-}L`Y)Br6 zB5J*ZT1`uCcM5;1V$2%VZWM0Q@lVcS*O!NYUR%R@AlcN z9#z+Qha0ep3!DHtaLWpuYGCM$BBt)3;o67Bk`;?Z7wqQot`(XCkYtZRw1{p;a*Q$<_{aCLaSV? z=!*ifqjxmFd-rnRHQGG%pV-a0ar3Q-~NB#R*}I4BHd&f=eTXy$%ZF)&4}H zd{=;V9KXA(2QQDeDr6((MI_Vw7)wYw_?gc#PD%Z=ws}y-`+1aqGIP?LD)^5#_H#ui zwQQlCbsoLS4?~KjZy-ft|aWj;sBt-{$AJ;+(P|>&YClo-f30 zCmqmu=hG)qVPMOZ*3gt4dvW1&X1?-RAq&fcS(#9|ROqRx7U&bRQi@%(>aO9YZkTgn zit?DyfWQM5k7SlL3ha}>`hT=;GOY-!oLj@Eq`YSmVysE{4pVUvxuaIAH`3yWFX97x z!Ff*TXh}lX#F``axnk83G^THGV7z*(%(3{9RT*&JTrp$5?9p9mt3DAd9?0-luB=v@ zL70kpU|41?YF8?-K2qOi_iD&z9Kd<2*e>&)`5WrhNZ~2g(_tQ*CRjYM`jNGULtr_F zg+txbTxG3loISfwDok ze})b^9ZRXdNgkD+WNhhv$}13!dYDiH&|B*VsoqAsOjU5WzV_u@6rVMWz{&N^Jv zmP;EVsMIj7Wq%`>E0o%~P++Pm9`<4n*A7%fjed(kUJ{3O#Eu=V&k-{q1Rh(8Q)RqL zGwR@O8ch!_2MmuxiETu*yvu*UZ;@Nkb8!kG7j_49m~4%1M<0(BoBtSd6>*2L+k zNvtu3?~;6M_n{Ny-h(Le7HD!=-y2^bEK=$>?a>@ATLhTQfumP6gw4CU3z-JIdh-+V z0uQ6i&?VlYpCA`?QM&!w>~=R!`Sv$I2+`Su7`N{SMH5>tWb&jpJ=q>KZ2$Wtmxn24 zRZ>;vt*^PvCOe3(jbx0~)o-TmHJUm26*duMJb!h?Q&|50?ns|elR~K%>Trid(w0N= znzv=Y|5jj6NOT-=9&>(ZRSz+(#PjeSEu3`JMG)5ARN~EaR6nQ1=!sJxCz$dEWn3l6 zI0D~I!qR@}spA$ikEZNCmXiyJ0NS9oII-`Np+2DfhSUd|JwS#nXCngV6Tqw0-rT)bpn38ltBI+!UrVi>G5xv}?Nv7JG;GLuJx1Z_ zK7SpGJZmf@4U{bLk>A^t!-E7XL+wf8n{JuHYd6t4)E2Y~*0)nG9L+k&5BrspTxjLB zwHnGYs!z5Vm1xH6>T)k74S~Kf0@~Vxv&z748dM*rB4Ayrb+5Omams!aZ^IW-QIblL zmS$Z7hp%$t({7yCrd%AWC=JU_q*UEW%8b2WieC?eMon)dW*_NZ zqz^?h3ryIbiMH(~>cT=?g*Pun*WD7;JCr@bdxBywj;%2rKe+2SJk+lf#-bavH_hOJ z)|TvEX@4YDdz$OOx##>C2|9ww8P1eVnCM&f^7VADkM(=nz6fU6q3Z~TM1xqLz9|Sp7xo7C44sssj+Iu3Cb^j zJ%Wha8_48XSt^TCrE%5G?*;rgYWGQCULE=NsQ3fxJ$H7?l#Mlc>&(wktc6)%E{6?C z%$-%r+ti4kPXP8RGxA#?!P&9N^9LWQJ^QJoCAh2eBs96&=R7v2^B2-qBT@>_9x-CN zwwR;Yl1MMaAiVs0Q>1@ai4c1IRXb9`I!2#cqFFMp5jXzo9$)VFHc7wJ6~;pU@0Bw_ zhi@{vP>Ea4k&1T73+O+hlp{j6V8QH^33K(=9=%8mfyS30=9$L5tHfwwQb+)DGq2Y7 zh5fP1w|PK}HwW^kUU_e~N<-~1yfZH`+e)^3tw9T9_&lUTdl?d9MH3{n$!Du`8b`809Tk0J7@Er&*;+i@P& zQMBv=_%}5@?~J)vIX*QDVExSh-wX}%* zE_*v~H*_*VoZIvs9>kG!9gk)M9KII^F`SY2eWzfG?yLBn!(CJ7tN$sgJw$I;{WEi4 zBvtx<^d8Z`&#<#LZf>Y~8wf=hVCW zrZ1wTDUhVKp#NK56^UmU71{J^oGkA+nITV&shenYiCF^KSB>3jm85H%6=u zEoEg7o7vFkr7HyW)qapX6P*1m;=Mf62&VU3HK4BmKWgs@p`sBuPRuFujML$o?YXP z^8t-9y>@lf?1s42NYR7o-6%0X4QI?KuT7bg3U7LLeA?;A#+%iI7kaqQ5V(XlkjLHZ z^gnDUUWC2==K>^4$DZ;M*5MAkWOHqK6F8P}%C*clmdwWY`OkrPP4nZb?_CuCWNeU2 z{H7*`85~wmM4-Y$vIW*lZ{s%k(E6dA-){j8&HksMPdAh>l82OCBf&HUb{Qrkl>+BuHppaN%#n2pzah?2;}j;&ep ztp5&?-!uXd02wO=ASYYS0E*IwNE)+ScgRmaOqzsFe;k=OXC!Xx1a#~jltOHIaQ&&xL|w+9#AttU*vm>*L&s2bi| zMeYoQ?mc>L`$M<;{>W#GU40el=IruW5?PO1L1#x_R_z=4~?nCXx%1=LCsKW*4o7Gcr;HSzSt$>}J}S2G7`= zNHyb7^*%qv6xz3Nz0k?L`dx+5TKWyFWD&(TdWW3wG@8Z6jwX(vjE(4STm_fN;bLM6 z-m0RPo^_uRcHcf+B-|Z5`D;ZHruvAHamjD7E`M55kF1m+EyDdJucEDXzWk(Ch?5%F zO57&xc2ZMUnPQ{J@S8PZ?UJ+_%y^Ewz9reqE5hk7_7!#^AT$$^Gn@12RZd(t4wPvnSe@@v9LL9zC*+yz_8gIl;KwUNsPqZ>n-}+J>TSLi zcfx*)8V8ru#Y_aofpZi%@xIjG3i|4r15ZLntsj13_YeN`fviXeDuUV5x_`Sr7QnX% zYs^TxefQwf)hUFMa5~S!iF0sQu5?9mDC--02?&^#n+YMqxN03c=@MMi|0j=@<$x+KGI_^`pfl>X*V; zInd-Xm?ybkFd}h4$zLTd^}eRs6HO}84|yNm(ykrUkzP23oA@E~U=3j|mm-3?fxaPa z$s0rR1;nmQ8jUKsp}IaD!O~2BV+AIwEH<1k?}x9 zk3ONKO@)BAhO&98Q_4v~rhHiPLcY-8Ve|Xo+?gc#2NG1`U!o(nu57ocwrkjtw~>kf ztG<}`1WeCFSOro(U&S~i*Y&HDCv*QU`IMC4%M>W`C~I?0g&}2pi61n`iK#v3-WB-< zR&J?m-xaIqOT{Anp?u|eMSF6}C^-*NYoG8m9h5u{Xjdw2GLm=($y=$ZDiR4AEV$0r z#r+7{d0jif>5Wt{m#mR7N{*mNsOXI$gwGofx&|Yk#rpe(}XS_f9A@jY0X!rbp;fKW4_^SsU-_pBd|4QC0 zO7u^6yccc=Z+%K7x*Y)%UzE5LT{q4F154ef|B_6i+83r}Dj-%K50@s7}1zg(i zyKAZb#prp$OHqe7AK&XE2Midx-=3tW@y3)Dx3lh-2`e9V1%cve#RPHsCfYaa1^k_) zRJAK#P7oWA%0(w^QTSsNb!e({xn-FDC}eeOJe7OK1|_br3TH+70Gn6NK58xQtbMgO+X*^`u8071ZQ_JYh)5F#S}Cl+LOkh-v}-#l@C6n zS^r@ks5PwFk37?)jF}#&vhwsDvFyL_;u5F_x5Fgj$#UlQ{1F($N#hdv*Y5IM%&nCe z+#Ja{!!?&vAg>Rt42S)fPh#u(@2;#5F8Rk903bo)GdbHrlA*79|3ppp9g z&WT{PQCXstqb0?6Mjc5Y1`<~{kqAk@dxBU>UP;Z9fx4a z*w6~xHyL%>^m0Hd#7^Hdz~a;4QV+1wS$0j|Z14Go6rz9KD7|oyf6-Tx2!r4qE!KOco`<}Ee&ZzI zk|xhd?RYQ0J=aR>Vg9(q{jD2xaE0M|h6&s3;Qjw@r6th(YFhZrklASr*nLLOPPQQL z=8a}!2Mvj%hFvOYNqy*U%Uu&3YQ%7>xLEIH$g~>c5#3DxK~F^u>SHq()IN~n9&Mv& zN1v@+8~Rrs$l`>nK=gT(4j0md$2(FDd)FvWcs3T;5TeV4kGnK6E-5TW)B2kH=f3>! zt;9{qa)N9p3eH|ah>`DIb!8mZkUEUfg}5GPC?`WVlx(jWWYbJHg##?v>N1UZRZ`g% z5Sgq*6YUU6UBV#Wj?;h3MXd2_eF(2TE#Rj|4RmvvWtvgmE-Do`!dM1G%hg3H46;PxVxr zPUbXt#5Kl}t6sSwCRtMDi@H zW%w9r5)5&$l;n}F{pwP*q&rp$K!M$!|l>c;Y8oFQn>2qgHI=o%PrZkYTE zQ2G(dcorxv`Ax>EWW&-xpTS6&=(OjHI3W7>m@&S?e27R@u*0XerWeZ)A;Q(}n1Nd}wIkKfTKKce!!F>*Oy+6YBQ%@Zqw)p{o3ecTR)lWaJ~ z^V|?o*U2b-SssTk9Y{PUzx{HaEt~#hw5U`GH7K$laBHml5*jHUP}(dF<>UT%LIDq& zsN^xf98jxUh=jb*8gLN zC%|6`uCLvmH4@r>g$OQYrkJL@+2zF|nIbui>%;ZXuH}pLjo+xv4|vnkF=X2pBFT)& z&B+v35&eoFOIPICrq=noglA2bD5i3_s3IMPy_HOBE~Tl zt(1>QDx!T%t<=P-OcYEsPb|@+eWRiMKhN-l%RMA*ZOCw9I0n9idwUgWJNhY%Yi@p7 zU#oMp*K>_~Klq(S{#Dax&1RXE%asf0&$n4aWVg+9n;{~sX5iZn=%ri7=^)2N;?Cn0 z*Sc$IKDC-~Joi;yLalNEZqjC2q6;5c2FEmQXcnN73!DANsk^5jY6@*!p2u&B)@V^P zdXDeHP5i0&ePy~F%ev>{5UDN{0{+*$gY+D$e)CtYo+F%dm>Z4gr;zJhdJ1#h2+JDK z9rk*EW@m{DUdqY{U;n08ieyF_9WaV;#3!2U{6-Z4k$eI|q{UaT;3TAgX=o5Ro>Owr z#nPm8)J?W?`xya7T5kB^@u=#iaQ@I*!w!`{-dNY8M?VN7+}c^m*}t}+J~pG^($tKL zP=J0rF+p(9w0o71MY?ZdoUCJT#*c{ojHzkJa_ePw`>AKalL(|!9$!-)WFdJ%aJwO$ z1H1`dob2@CvmW6X`JV4~o+1WodO^C*y=gK?Hh7r3sK=gWPj1qlo4-OC2)BsK{Y;b! z%c7Xy%;bIeR!7l72XGETs81XOuKjX*NSB<9%-3tAJI}4EO}q0i5KAYU1cFzSH-B!2 zi_M7?>1Rd8K~idD+P-f;9EM=ujgdYK)PNXTZz2+0fZzjbZ%<<3RKx_U;N{)&^71E` zl6h<~;UY_bFu`C4=sG%|iTPQ(?eNhlVxjxR=-=x2mHCZu(SQj{&C^GVBcK2D+gnUe zWxO^Mc%|q0M3NOTKBH-;qxi>q5bU4PnDOu3%0E+`L2NkDi2fjBb9?%`j86~83pMmy+rGm2*D_l zPRdubW8RRA;|Kg^`Arnp!pBO5N_dK?-K3jDZ4hG@&XGzWr|~CD*}IkEbl1NdP)!`l z*pYHKbfA4|uey{T*VBNXQHv=T`%&oybL6NN>wP-T+n)Ujd`E|`=vu8woI0jH%&-NX z_}PY=WV9D0ti8lyu_zNxD!0{~8TcvjCl_W#tnoq+ReCu`RM~F=j~bR*^i*X@${ws` zT|L1(iGuuCzK6`6Yxx*0_9?ZTN_z}-`8dOYx;RsM5{OPdOdyWFV!4yfeCSky8uPK4 z8B_^;E`KF%MJgO^38Uf)&G!0Jk~fQ<{w9K_cOh%(!ox3jH%s+>-{>qcf9qd@lS(Z6 zN}RyMczvx=UR%&#?4nmCG6f&I1t7IHAw5bCZ)*och?j=UZ6rEYEg&OkpFM5b@vJ;{ zlB&IcmVjdp$P?tNuFv@juBZhZW3aYiitZ+t$edQyabXeE#$}}7<)&?25&dks*6-$A z4*y~D&3y>GkpZjS6ONjL_T@ldB{m_Vl9d3!d%#aZk_lSJtBA|%;CVB|V+ zfLY?;e3st(4>Ag@FWpdR>%#42v^W}pg3o?@c^E`m0snd4ta*Fiau+2s20WF7*}{rX>*GEg#re+Git9x0kL|BXel0LZSY`I*tVx824=vu|57 z^J&_T8b)izkUj@915cbVt7SQCXhdevcbq3R_d=lHgnR_ zxUfyLb$x-_S>+CPd!qW7dmMhj(sPr+Br6%uE=tYn&9<{;YHWI$(!p_FW?Bf#*97}5 zPAj+8Buaf=W?#~cyG}!Gb#C|b=G=_h(LoH?%-NQTr}#V*KzCA2l8*vnq|{MGXj|ON zB1CRq<2MT4bUf4X|03!y@7_x{$j!d zdSeoX#~!0o1kB{>^}Tho)Kz>^?)ld&M0}@OR zZh@5j$A;f?Ekcj-3R((c)VMv${7m~YBC7mids{$^P^<8ixO$(PgLTxZvE4!G=GMu8(M$QbyE+D2+oeU%91sT61fFgS&&!>oFr z&O*g?vu>`s%+5e-AVx(Sf=EWtp%(6JDtYOMW=}m4i5=ZiLkQjaC<8?_en$AM0=&*7 z1D^Cg?GJyX_oBqAR?ZY3(EYNuwMxAdBP;kPN=@d2+nLo) zoj>t1sTjJWe&(KbdhT_Dc&-+#{~C&jQesamu>=U>-lv1kn<@Tl7kIMrkAlpv$D;d$ z>FO=S?DhRWZi{C;7{^AQ;Evtb;FA^&S2$UpHf59NC-9>0==kYv{UaNeFacM)6mH64 zi4v;@l_?i^E9F`A8ym4AKEJQb47j|}D!}jiFGvUlasjxEMBSGbY#}K@IGQlq#xnW- z#G2`W4jQIBCs67PAtx1jIJzNvxJT*?wYA1rI4JPR_CO0Pw&DC z^(We3>`zX8xen$u@8*i@Qk-xUjJ-;DIX)!n}R{G}&Yjy72Pck7fc!9msR%YfY zEw$`!az)G&xB(ZL<0tYTNKKgZiJ>?qpdrujMwnE^I|<7*z@KoF=3f>+LmnBUiI)fG zw!|>p#jtGjm&)23S8-FECNZmT_x$`#N}G@ZLF53UtJ{hd5e;aL`Zh3wcOpeZ7 z_^BZ1A~;Uc7P3zBy2}kxrR3nq)2kIy9xmoYr3}y zsuv`p33)m`mj@Len4GpET(k;x`D@<`{NjlxN+wVoTY6=sB4W>t7BF*7EMrlzQd*e& z{`Y#Ze?@HA=qE$PI_1K0MZEPfiJLvb-^-LvQ@tTd;xf(v85|Wv4km$j@MHVR9ruGF z#>n-DXJY5zf*04^9YWQu4|ksMJQ-d<__M88ONQ39(3>61f=`Q&V3Ay8`q#?>;K93p z&wN24I4(84wGeA)Mlma_^JGdOaOR@L-ycl;Cb~6-xnB>1m6NrUU;dEQjibYZ)g~t8 ziCm#r$#Kk@k&GU#)>z#8$J3bZfM?LxC699m0IP&lEUKBXy=A3k`jyNPkZEW*ruK2e zJ-uV_$H+9@-w7-$DzUuhyl2D$O`npZZA`%Z{k$5P1ugxT<#(HdD4s=GA2d{Z*qhmC zN!4P@1`5vFDz!@|7YEc*3IGx83*d6AkcbD>eE#vm8D|7pQkBL;isD&5aP;c>1^ zHPr;4oYUH$qW!YWC1bxl579hp#;{aD=nO{9-A_PWBOM}Uc^5m{vC=^wuVExrJyCBrE?j<%CEApyS+ zV0DHm9WO_aI>Y%GSjq%b5qU^NmLZ|V)LvhHfus_GZI!MAPo*R_IbBi?hA=njy-1*G z`>;|O1mtGuiSv6Gc;3FuP;GVO%bBtNvu+L-!MC?=51;WY=|qZTjGMWd({!ke z#lF7N7p3*ibv^uP{A0|PepKQT20Y;=7`WSf2Yf5hMG@cGy7AieMhd*qu~JDqe^66uC$ARMuFV>(g!KbXjN)a_ z{AJ`R*P?&bd*20-m141MzpEzexrq3Mqg5ewKSjSzhMR;`3};SHDVhT4$;Ad`43Ge5 zJ}SQXo%Njjy@f3#Ocv0ts1W@Npby~wV0yQ9op|2915q71U|GP~wx}V*(Z6}2rSnG# zg_Yx!1TF1e**}y*Cexa3@h|QVi+GC&we^tNOT;DNw0%l`zun+j#q}O61tB1@ppqJrE+jt0)c3pJGz2H&f==I~abvDKfubXMB|LIo@!}vY0=I}412S4f zB1jI2SGUhmVgIGfFywf`Fbf&pBpPSOH;2%4l0^Iskx7%T4@_x zVm0zq;BE912>yx$!b?nW3K9eSE)z-+FGDG^@e_=jJjQXJ!bbw#YsX|mD~rO~C$NQ) zr2>tly87<7qwQC%)MF%%p4pcQ-(+-sN|YfMr((c&IGzG@86d$%rbI(5?lbN`tmS8g zQ3=uxLW7GPk?QZa1-r_D{@4X!?b3ZHd&hUEiPAg8F21x?fn#FfAYv=xs>{EvaJq78 z!$NVFfJ@Lu4c~T;_%u8FOa$hN#Iia0{LEh^mQNMG?GK+s6PiSm2v;#?KP$5L^Xc#> zLO(a2O}Yd=HO}LB`d=*OY9o`Ud$?wPR*OM*M_XA1IYBoYXb&OYe&Dl5$RVRl7W>ns!| z)osn&Px+(j^iL*UuhTXyB* ztU6ulFNWUlGjCo5as91Ktbj?pmi9YEY@KTrL;JZNPlP-VW56#qpozSvdzsY;{an96 z8mr0s>&!s*ciH5nUGQSaP7roxhw?GY-_*1omE$3O0CsFmLYySP?bzNw^tpm?%I9Ks zz@NoP$H8}F&$-rlWdh&Q@;wzch)O&h_5Pw-!eA!h-C+vLkWNE3Gp9AF&54Y6Qre8J}dO)4EO;Yk8Um^JU>2{=lrgp z%e7cF8yEH&N05qw(jlAsL^1Q4J?|*I6Mm<*pL>zGi7_HjexKf+B61eBNZZ^M-L!^o?uqwe=~Vh-q8v(LdrKqMpb21!L{=o&Tqr zOQ4`h6!9Ncxr3bJsf9SPL|kGQD0t)Xr9I8cT#C`iQPY?5jH>XcqW&l{vb>MsBJ#sg zL8G>zD$2aU6-&ZUpuEj}k7cP$8~#xdZ+pV)+UVtp2pX+pWF_kbQ>}6@5vrfPUoJKi zPUs|B=zV(7TN~0ZjOa1xL@7fuHq0UTXBYEIjqjRBmx&5n2!9@MVr+j`&0^NNq(gPE zMBUq`GZIP&ehVh5Cg(7@tudc}i}n3IK}c+g5^edQT?m?(BP!b57kLR`=G>H;XoYp^ zaDL0grj&E?579Bn)6TRZ{9jWu$hkXMlD1e6nXJ5+qK*Qu1obd;S(KT*PaN1wl>O6$hoSaY*UIp5E!5=gg|T`tzM!a5x}v-7 z-<#dG!cA{!3@&+5va=g%ZHXj&h(jtAlqG{C??Pg%RZ7K)aatPzxuCy^8WLh=?X@p? zBrY%0Wj=VU9?W$K1UHJM0xG}MtE~4Wh{h)M(eLGhXI&F-=$1ONm-gWt{S^>wTf=m-_AuJ6Cs4ZK6w{pLO%ACal*by>d5}j6`E3Kdh} zjPU`6_q$hNCc2dwP)rhp=&cD*Wr)e_LRtVKsgTP-K;cZn{jDXEQ5Bg3*hl0^sy*?Y zQ`%$68ev%eA>A=hNqd&a+2e!jY2ShDY{FQ3DQrT4<_^_ooS831ofM8a=+jGW$zJ+D zqrET%uYQKE5}!tw4OAZdtx19fofUij#D;O2sl2%QTbfVIQE;)YZM(aFSKg!IhAf7g zk=FQ+J6lbc2TUwK-h_2WlRob9Z4^58snIhgO__s8+(#MKsx_4=w|~Gaeb7^$>$|X~ z5_84ft?__~7ic6zhaxPYzBwHITWs#1@slb;OU3}+))CjXdSb%Eog5dgv0|M`1Gjpk z>p3gkQV-|1n4b6P=FtOwkDqevH(r(Il7Js}z-C_hh&_S*ma=>W8^GUs`(4|z?MeEezglT434&4)datmVZ&%z}E#Fid3BMO3 z1&J_>d0R92LBIYC<2&OyN6k>I?u#N!@_xL(&1xRqe2O<)!=?PkkD+gB zE=bVAb>)?nvsu$@9=c)6aE+ePPLD@U#{6?hf4glEY|i$mQFG?D8vfyKC55u-@!<}z z(BxX|UuL^r_18Ss{9*k}SKyi9vuJc?v{@A35L=I^EM+YL1UDJi#v2}}BGuz*A;73V z$jeACibu;$l^y)CrzDW*Ey_%fT=0`^E;JDZ{XMzzjs%F@g8Bw~5&ao6ra#t@<5&>( zmo90pB(()u{SEA0fBkO3Ow?;xagI zKn+!tb{cU#K#%4^3Dw0ad_&>I`kIl;D&};Tu#iBPRbrKek%|#-7sG*?!iuPeeD%Q0 zM!e^n7>Xk%5t6H)1*OK*5%z_Okc#Q^FzsLRY&>Zc&C7Q|!5TJP=K{RWSkL$X_SE=R z2?)xxQQF{NgSH8kj^%&e@SyjE@_rM1kxZU~Q(KXz*SBk<7X2o4GSWoOHI7)e7Cj zmcv~_MtSQnzSA_XhF*_bCP&)nRnS^ID_j4wSm{1rWRGIwT>QL8#GfXto38$4 z;JkhL@AOge(N9?O=mk}2vI$SDPps)gkA~~RvX6`Qe{nqdOE@=BYH3QX-xKd5xuJ?r z;{i0Nv~Pw&_ISyn<0?!T1J`8ps#N5)*pz1ij`_?mCEbP<42N@G;flvpfZ*2g5KU`P z)ok11U4k*0h$|{$c>W7cQNUHejFN42z{3F6V%2vq(uKw#duH=fZ1kZ3_9g1bLzwNlcXP=@cnF~ z^z5!13I4ETtWuaN%NC-FmX0BPKyuSS(kJeFNa_5Xnl{tS$GUR1-dX*_uVgOv#^ELs z<5FFatqf!s`%kpcOuRdLStMVSu6g2#^&Nx{o2R{`h_C%{`E;%nv3TY`F!%fTGB>;= zB<~!Kgvh^pZzIt^d?xGH(uGm4y zCr)~ol@wA8vE)0y){fZxR_{9Gx3SVnTMx~c?h-o>yHNttZ1{r3Q|rYR z&RjH0a{S9Zrh&B9*_XE~>(OCX38#&!7Aw*xKSb96cZyHe@D4GFG^gK?^l{=P3XMc- zLK^B~K$3>_?5g(y6cw6BEY<$e@X&t$$t6W-?LzW#C?`p2@OHbMrM0#%Atk9FdP)_F zANaSEgkyHr=Aer#W1crkpVH(|M8o6{^hWha5t|1$TH0%kjg8lw&8@<0NJBn_qW{wa2pu5r%Z$ z-vU8^Il_;IN|G4-L9eq_8rCyeESc3NZEt@TY^nXy3QB~WKn`hKIUY?(bfDCz+GiT= zk^u4S=vX1KlcghTT%ij&L1LrHDG_@+Fm&kpogbbyI!GIj;9JZYW5bvSMHYO zw9^_K@(@W^)VtYX*Qgv@{x5F~~ zi?!e7<~Hr-t?)%#Hp`sYa8I@BG$dj|-ae8Qe;X^_FK2!KU(a`%3}uJj5=JaJBi~F>4Leb$ScPwCz>KvB~$VY#S0Dj-M(k}URE@F z1MQjM)aWT`kB|lPFH+f9S@ag44jNhVmgmV+87Pta?|Y8Kc(1RpTH{d?Qyh0C2aY^QpO9#K|(l;U|9YwJ9<;;hM>5OSZifzVT`cu zOc4i;=0yS$?|l`67xC;%00;#XGKOO&YZ*}Nj!GA>TWYV%M?_hdfnyqMeT0)S3HDBn zFiQ-1V;?YI;ky_i91p!#))njJ;L&5MI*B4$)xdMJyn9Yl}6;jUG3%5|0&~ zT=Hxc-BoQRj_}VYOAF-~CeYyBdyUm^8dz&M_1Lhkm zSB$SSd-)+CuQ&R4b6p;DnK-^>zYTeQX_wPibN|vSfcpWz`RMEwnQ$tPImF(Wz$JzM zQuy9@b=+=mw(K^8V{CWg-w~Gx8B`>uB1`Y1jD>Xn^Ijju*-ji;BZY*8- z^pkaW@amg%0UBJV#OHJmEt<5;uKFGrqy+BX0t>WFp&($6HcyN@^gHt4&xc;v;vrlm z(c-ToHq|#)-4n#DlgryHzV^JMk*=M%w%KUps~EgPm8Ryns=@8ghCOCJ@l225uH2|0 zt}wqf^x8&*Ijl1e+FQ*B#W{l~J2v!XZ2O%+lD%hJ=dFv+Fz5D3ScwK_M}VXc6kfPA zUl@7|m|4hvJz28fUDf*U{#_i`tPAt3vkq*|qcM2c>~Gor0?b|C>AqT;b$u+h9&kG% z4Z-qHq>FRt%M9}QptA@X7;1SK8U1}v=m>#_YKYp&e+3{a^FIbe%Se(semW~}3S zlgphFNz10(^`Jlb18!$aP)T0LesBn6YddGK?Vy^5PM?zU^*lEP-NH0SrR<{%@SMf|3VizQ4X;)V z>~m_4Qx?XO)xp#&ujXovi;dU*AFMU$_x#|e&4V8Q49K*D=p6u zAGRM59N$Lx^Co2;)p|3+E4?I3TUT+J-kiaa++IY{=a7Ii(2ntaBAa|+vG(BZR?6>y z)OwUm_;wb0rX|7MU&~Ptz{&nDh%2$#!&sGO&&Y*bOU+craOxC=kVb zNW7;`ano^xk)Md+2JT%*yZQ$c2TNGtz-Ac@4c{Uz8r35R4hfwP@Cm>=L~;ReQIIL+ z!F947B#bCn=s)!G(K^Ud@nm7dDYEY@O3E#R&18k%|C+_*Bl3AVh^>V39fB5$$%%oc z>RYpc55msHuxTTLCAW8CzFeU%V#iUU+K^qa)!~#@)=q$jd;c= zMU7|$D_hU37@{7a*g4~4Xl#~ch`qEj1-VPnJ-1pK4x>e>NBPKDAM2IxDK>M)mnK;I zyq~PlW6!t;4U*VlI@6y5x)otJ7@0NWL~n`s!A^dP(sub`Qs&xeYUd%F?GEM|E z5+tdQ+@oG4K-qp2<|(ot=7&Y|{#4sIZS6_CcdHJ4oiAi3haS*6(6j(u^eY_z5hf&x zn9*ZTlG0h*d?J zY3gHo0iR;oe$%X>S9oI@E$brNRIx8P!~w+iC11$o>#dQOw`zyn-F!YYgw?)AFd)DC z3f1A&NzrI9MAYh%kf}4E#wSD&g`uolfmy6ho8v91(&$Y~5n zQ15hdlsPK6^M;v$n&;<|G7+7yK^wpFNaI7Jg7a^)eKC}}5|L&MHrDEgdJjb1JtlP1 zP?&0r=;fULrhR4!tu(bfeu2CMuoR*xLv2qFvQP&(t5(i}WlaK3BPOe^Rfp->G;!%{ zw0Q+vH|JOS`832tK@9`s#{I+gc@$~nJ{nEUr<4WKcu&;+Y|fRYp{9C7GCobdODwG4 zd>w405*5P^ zq0CD&|JxYC=`;5uknEw)FU!fME?a6YK@ypvtYM*HZ%YmC)XxL(t@HMq<%^y#XPtXI zMw9kW5i)%#egSJNWxbE-1;M+vc}vzybB{Kb57I1L0YAHCd(WC1i_>^#u|PX3=Z;5& zq61lb;{xD`%3WOlKL*KK-$gDTk@s{Nz{^+&$(d~kom3-h4t0UPdz)Xtvk4Yi+yBN5zNbh0qSkq1^?u`!TUI;({9p-3w6 z&#u`6g_IW=Nyh2fJ&ET<)nRuj-c;izUl27d^e*gBD)Y;a(Oakda?oj@ls#VtE6=B} z`z@&wt5%TZ_^QMVj%WQ%*q8969ukWu8@Rh89Ruaba8y0n%#ai8=B-TS5g>XIMyNV3{3%;IyznDqBL)*t7U zo+d$qD%okbe-45Fs@|(BhG-Hibq4Wy4{yK7RghnL!kgVw)6S=F53WR4x(0S9u0 zeN0?K`GLxT|HC8^h+bWu{O9>_rSBW;o@LD74lwW-5RCdO3I&}JV;jwsSP{khDX77a zykto9Gx^72-0Zh6S=_p^yoE1L@Pe_AQ7y)C-&eb@peNX7Fk6TPpjZxE<6gMrg=k%) zm0l)<7r9zk4YvuqZJ=nsL_sQ{CR;i!MOF|(QzA880UD8R7| z-X$l_xg2p0A;NaiROBsb`_Mh_Z}-8hB2|dz*U3r$vsuEulm$Ktujksd{L4F`a<}M< zsn`~oCRH_;Xin&NAyvmLGN?>#tc+UGp7VUDT<2g5`57`fl)~3@WgU$cmh5b(;f=PH zn(v11@i>h%Wjj{UQ9hikQR%^)Q2C#*_#!A9p*(}iX1C{3M3Qt2S}YG#?e-`GADQ-K z6J7$g6KlU(qkIfZ;oV#DOgQ#_7TYv5asM%*Yn}o}O`1is&ACfy`KreoZUg$x6)H_< zytSb-lmi@x2~f_ndn*b;uantYnc(TME;9{8cvao42Q-YDNth=?y|z_jIbG?+B1E&h zgcf-ZECIN|4r(ahXaiFdwH@=gd?vMy44u7er6UaO$LT2xWLE4DlJuFNoWvrRZ1k*U z9W?yyU63VTa=l-HuW5wts~aZtl@*1IQB+Nv+K;)va!sVdf=0qAk{k4)plf`{kJF`3 zH)~NFbD4vKy-MQ{{U@w}&P}(4P{}`}i+{e_lVN2G4IOJ2;ALW*ggImi&vSG_=FtXA z=b(|OZ-&0Z&EylJak5P<>65+O!6wZK!^r7{3rM)GgqY$iUJ;WO&(~hX>;#clV=v)+ zo=^yKArX~Nd7tT8aHt=nKa)`No^!wM zw7uE*g`YI?FD2;O^|Tj!|8!~ZVa`K+gR|m!e7XS*rLp^6#BSq$p`q(s)3Ck4k|8r0 zL!<`f^~T0_f6HI9#Ob>l+Pq9N&OLOxEKz4I6GX+r#+QO;adU8uBzmd9fi;diQU&2f z{uHa@mirk>pQoUz-^8II=mZP0q|rX8>ij%rtBspX#PrEc<+11Q_#h*?TRZDo@U@Ij z{LE~DK_y3pNJbnrrj>x_7__(*H;=M7g0`LEWyJ4mLNDuj4r#5il{Q=sM z@!ZiAK{5`p`kUHMuEvJT~+6Zd?Lngu$vbB>6dfywf|Xn zTKC@Oi>09Nkm8jF8Folm2YiIjaS>2^YFvB(^_2wPzJliee(o*XAeiXrowHHKQ4s&M z)nzgZeCU=wF`cx3x=eoErx7=eMHu7L{+I23Juro?a&C zolHCU-j;=Z%xInTw`+{CvRB!<7`Rq?L(5j?Z0DxtHu;zJJ*lst)Lod;cc6tRPQi05 z3rz~J&fN)h0aGwlb1V5ulKH6pRcGv#2fr)h6U=2VZpTE~IccugyME;Z2@~QTc|&L5 zMhxo|WhYlMWskf(AicaSNQkxU_D2wUUe=2Yj3Mf+p+O4+s0YAD()-8C{f>W$gPgd) zEDX1R(?rp|HSa0c(Iz-~_xxV!?(FxBhp(@b<&}^oYQcVqHHf+&?5(WYV@XvVJRSafdP_L?yhmY`zLvap$A6XDi#(Yu zn1CMxC&QYSNHjP?R#KMjjAZq>MRZnZC7Q`fgFU?rqsa_M4+($#A2x@==dS2JQxM|U zuVe6j_CtU}PJ=q-nz%2r;_OkNp+stpMyX%%Sh0A&kOtS$x}v?u_>QilM#f@|DqgNX zU2NJPTrGA-_i8xekhR_rh1ph)Z)lV3jM2ETT-fFh+EBt==TF_R||DE*Ks|Bb2)es3D`of-_+q2~^QXf_8Gx z<~j!r=Gg$jji*#xJ#-#UlFAN(RgElDRHAB{7uK$NF{JTd&1WcIR&>i-UC?&QR;z}W z&J%yV#UN=n=sqq^y9h<@Wfv>%Vt9Qm7Tt>x;=y1Qx2x=N+?2uiL9)oC5a6KB8PR%T z?&9_@FTjJK*r-uc11LS(ZhKCAG_s3YW{!~(p>BTr61s^}aKzz^5>L*`ct_Gv>voK7 zIBJIVr9BzX_HB-ge4)AzjL8kxA-D)rXDIc-*_fzTTw?j-;n;_*P%8h9kME7qJdepx z3XN@444$sXZgN=YD}x64ikAZzo!GQDfBP4-{`RDq@?Q9;L-d|;a(>bWy8W^0=vBz` zxusCP17Gu0Q$Bth&|acF=3R&1ZX6zpjhuVRr0O*O(fp{5{P9QF@?{W7N8#PfN??sH z*{?*D@4@o8v0wTWNWFts<2#!Y9Gv2@8GyBKpBcY!{(oG3^+S~Xn)lEkNUC&~bV@UW zf*>JCNjHL$(!vA-(j8I~10o>J&m-8_3J^QG68qGSlh|1$DNZDff z9k1NROpW74l#Me*lt4w37^-fQYIm+aPAEW#{!xpT9vLI?pF#r-hdGSc^w^Gj{imNw zJ7`a`rjZM`ojOA~$BvKXCd6D2Dj=3TkHoSFH~pomLxP{>tZ06SV;0eUN?-0NRb08=T$Wxr0#{jzeH%&dJHu8xPbLu;p*?3hP75J8E2O{Ogj(lLYuyLsdd^aZ}OU@!EnFF)+KDI5KDc`a@?RwotUcuP#%{yx<<)T80;SiSP$R>DHx!4UmRkKq}|o|i_JOdlubM1?Q6 zbiO<<%E<^h?(6bcM-ufOH#iTMt5nw!_Uabha!FZv^{l?1zi1#4EA7!6bDW7&cR%Ny z-1q|$EQa|`+l2QRwSy#W9$b^QfAW5Lbv-jo!D_3vCt;@e}<(M$P)&kaEY<;v;clg6Ol-2MU z2EHoC4AjKOqGsv@Q}1jhE>Br0L3A+K@%EHRd7^)(xZB-;fJaYpcyH;5;z{C?M9Xed z;7)6A5Hh!&{m=bVMj4`+n95AiFw>;0c5BA=WV#nidIgwCe~JVFkj|I;pak+R=|K|? ztKwgRiK8{AG}*b8IoZXReybx~0%Kq~9Fe?@$$P3#&lECCDyP=&ZJiP!9_rG(Bb{8k z`LUlby~wyod#e5&pK{_}Mv8WT5nan!gjiRJHQxb1hJ1mGb-zuo-#j?+EAQE13z4@g zRRahUaO`SKNBT%O>WKl_RSacUeG_adeOAU5>9$a}?Zek+`H$<*@;HCcf;&jG`|zjF zkHYX?qt_SLMC1(|VNg>eB!kg^p3#4uVfbJQq*wb|K+UgZ+5$59G;ucc9GViom7J@f2u@X~0pJu1^^79UE z$N8kB=_!;lQ)@(Q0C3o)vn$p%ERjhpxM#fn5=KxfY035VjD_@__A1fp?~Y|a!u-Ct z={9Ag{oSPf+qO-B=h*Ck!^)%F2gUs_m0sgBEEl1vaaKFcJdwg!B=&L2m$D)|8UiDT zs;W2f^nJ-E(n>WKxQ7H#CPmv_QXDEe%{ahYoCI3tg&ARxQQXRL>oRFBEt`u1%K176 zZ}2nvd?w?q1NkwHaVW8BSXSscM`9yBFeb=c|`*YAmX7|pppkI)x{ z)~R_e!t25jHd&VhVdFl|cIK<>bIdqMgO*=8$iy@@YWQ5h<>2)IRUFFA6~17PlOW9# z$SL@X`zcv~77!QzvYICj#Lmmbi=8ckj)-6PLHYs!URMBOD}Yx@&yJYujMt=&A&DA) zGvGL0=jT$UzAWxT>x$-&H*d9%BZf&B5}M8;*uv^4Bk)#eIUMCadt0g~4+zriGgoto zxK2kGXne%nTV(f1iMqGwIynS>3K&~s{`mH7c|iYV1#|S4|D`5*NeO*86mOmv51p&r z7iGWS_RPJ0F<|>G^6k_4fHNrOG_&%#*U^s=NwFOxk!vY!e=Z}R_dc#uvU@YTsQBwZ zT_3xDR_8&#ruE@!KXS$)^Ld)vK=<=BE1HvRa8GOVYraT0b8KG=N&y180CVnR|?gWL<-RH~Gz6oLi3Bdrh_p(dEOA8iw{} zZ{{*xc`w2ghlK4K%3Hnb(XlDnD3PbrHfW5AhL2bXWxeoR=KXLkl;4DEaqa-?>Qyx~ zS#esNW<{@uU26MyHZhFU>qmG9QT>9N$Lmcwb(@P~_^$c?mBZl^D)K?LOp8KfbY!W{r&(PQ+o|DCht( z@a~<(9&e7aGC#@;^m+?Ct$u@gu&K=6bg|$VdX6GOvmV5jZ3kmUy5r*+Sw<92&hHxg z85kH=&e8Gll{>XwF`08x7jLNFaAmwh%pX79^A4lvc>5>#ga6N+w+j{QulLLmdx=#O z0b{K)=-&*^_*5jW79>Y~#=!>Ge7OgOHW(Fcw&ld_}1#h&< zxs|z>%(zcyhCF)iH}s@njUi*b#QDnhqXc?Xtrit0XOEA@T*u}xeHIO~b{YcgtFk70!xi(8Gom#w|}KF#Zb;M+%5Q&+(| zn;jY(r(wS5A*=Uz53)m;&eB$eocF-cnJb9e{v()`?9XJ@*Zkkd!LI6i8o<4nYvEk8 zf|4{@dL4E@4VG$<1w`Qt+%df^+zg=LZ?78Jbw7`7KOZ}$^ZhEGp*To;+8(?;s=v>0 z)mBj<%mVg!*8Rg@cdGUA<>@Ys#oXPM5g8c0fI*^P?pIR!PQLE4&1}!Si3e9)e)-3a z@QZqS^>1bOPN8gcI3_#DD!NPR{0|X%DYhH@Dm6T@`Vai|-zz(XCP0i#CPh4gH!ePT zxP=R+0uJsbW^22r%s23ezRZj82C$9`h@(t~v0`u|2VFW=jW=pfkLKNo*d&|iX-~(Axu=u!C=bXgzjRkio zy!&{PsgKIPlrpU>9>Xbv8>RShp6A7?<<(Bx6^`zHovC4P(i7Ksx_G6~D*|@-97%fr-30TnY?8RNTr}*ee=tta;U5Q3k}K^^b0&u$rNtWR_0M0%qE&za_rOZyuzW= z+1|dzbTRreh~^5x^47{n$qvamc=FfhpOA4dcI2sc6;_AB`10sqq`|D8v1@_!M{~TuV`g01#yB z@r@-mc?a(#N>bg`PkWaBo{A{FV}fQ{(#S^D5kh)O!ZuOq#61}prwJU zthb;Nr~e_q{Hr9-&$)7Kji)Lsuz+k_QY+7Et740DC>4XTptP-@~0R;k<9IQg(k%el{rW9LzyKu8N(Bxz?%|LU=tigw2Ymf3*oj zX3i`YJfm3AR5w*h)q9f0yg#k(%RZMC_=E6Gjq5z;9CYVZH~*~?yZhU@{T*2@Zm+fQ zDuSVmw&AoVtWq^jGwo^V!8<)mHWW1^7nLr*S2tdeJ7v?jVk2;t8!qA~9CKXMVdKy< z5akBbOGB?NX79`E{(9ARf&Rx3dFL*eY5=CKDV5*VUN>%B8O1(Q*e}u> zPY!&HYCXRW)Gw=qToHLJQ=COL?Y5{b{2;)bo*SXh8kVR#ZcconOI=QOCIqzQPkwK< z3_rpQ&iTCC$#$LA^wDjO);5e34M^-MU@vefxV6hH6^=r#`8Px9nhGM8b zLa5aIk|Zn`j*ijDxhX&O5%;sYeUv3gPMGlQZ8C)0Yt_ye4~WlfmwCjOQ<>;-{;<@? zEzWsAKvGzCcQgGQRa#tb*%r{F%INywhNy!1rN!$wV76(Gnb#xiADbe6%F>@*YVPvL z2>-`sLd-#0wg25sa{Y9WMUP6s2WpBnMa~C}*F&sfcD*sibeul;>R#SC{ zRYUd3Q z;nFD`Vip_`$Zs>mGT%Ct)b{pnToFvs|HQCAKC92MojR{I+S*;D8ksuXD_F{J!Cx&i=cd7!Typ5V*Z^VZ1>>3fQrddUxDjLMZSbxEQ2~%V^bRH~7xU ze16_<>fDs68HU9n9QzxbivH70{tI1R*5?VL(UxKVoS8|(cC~&Ui5ap1nPo+q^FLu< z19SjP_$iw-e3hagT7HlQ(rh+@(7rKJPn}z^8{37 zAOEWF9c5$g?Ikc~0Q3`QX6yUltxY9B!s`X=CZ@*66V+u1v9J01PHO2|>B*q+oDEXG zKI4Slgqy{z?6n{|l}DX{Y^7pmSI-P+-Xei;Ah@rQR{2&K8L^$YW~A(GvU0e zZB1+7ID;=m!Ne$gM8X+QrYnnArdf}UTO)7tAWZ0y(6tLqSD)ibS-;0p;~4V#m!e&= z<#yoD+lJCG{eR9e*uy9mGER<5Vrm@@@YoWmh`WV?pKni=KT2kwegzSMWooYATt$F- z?rhX*!l|Sw3(whiIQxnFUppc8ZSs)?GiYv1M4)7`7_UGTh6+60H1gX;i;ME1f7I^H z`6rjOnhVV@!D7f8?pFth1hMI);xE;leavms+*hP7C<8^!BrWCBVkY%sh9ieI@h#%9 z0;ZnFi4m2y?Jm!E-RC9dCf4-wjMm=4Xz#wCQ`SsZ*!NDZ-(gN-La!702lJSMUPuLk zH}_R$cZ9AIu z?&QNxwI+{wN~vG0-H&M45R8$pl#zF9y$=38-{&O~lj+vN$#ON$ytTgwHHvBrgqTWVGley^feJj@ zbgaAB1whTw;^B6AgC&R07zt4GP~8c?pvjIkzA{iK-fNuMFP^F^yoVp3Zbof2-&o)l z$N!+ROrkB_LOu*rPOI7*!OHsi(tEdUgu?dj<}}uqxdla&eB|YQBoZ&QH2_u1@w9|o zZOa#^2m#w+%M7BydJBynd-IKd=V{P;nU#^{^>aXSdYG!5gTx<(@<%8iS_HF^WzrkZ z13p@ma$qYb%bi!MdL>Yx=e9TJQTg_xo~>S17Jc5>cmt%%Z$l(%`Gi{i{m=OwtKsxh z-}(2G@#w^>L+rf}ViSJ8XTz&nyPx{v)xmw(+V2_CmG6uqQ3E3&frSRNwpDCmQ4W*) zpz{mhIpYYBp=bvoP<8TmPJ~?X*fYlw2?7tM7ooDtt9^qQJiyZJ8L=%eiEXe!O28QPxJ2Ogq(BlzJzc4*Hu=p z^~yASS#*%&Xyi(KGJD6IdF*%#>1{%Z`p8H#8jL9ajTGL4VX}=2{R3)a){TAEMu^2+ zE*X(WLg2RjHcwP)Z;!8l8>W4v1oUqJIyb4aNFA5)=5S!F(Ry!%9qJxi*&9E3_nn9w z!)Zj8DS1`icu}y_Q#x)He?NdM&b{n(O=mo-6!~vX(top>vD{B0&E;+yotS^wN`F~z z{^0ea`+WES!EH?*Nsx#8f>=du^0CP`rh%Eq`_E(zN7b-j#xuQG_AYdR)aEO#O!h-KmK zY=v+<%5ggC;y1U{*0j0tzX`sng(Xgqr%Qp997~uC@OlOk;j{S0I&OncNdoam^n2=5 zyCaw&N>PV2eC14`MXoqRx!fjf(isw+dY#=%!T3rVPypW$sSN>L3G)q!Wn)?lykhNK zjf-If=f%KSa63^H?)xE&QJjo^hJ<2pVyedb?;D-g+x!x=O7|2d9RE6Q3DUwF>uOoN zahBh;;qwf>?6BXH<>JRl_e^oJn-wywrObp^>~TlyVfx#M6=$EQ25~&a3!>j%M~L;q z{oG~sRLo$~v^aT(EglTCopQ_)vvPQy{59+pxE`@}5Qm%S7>F8eVx&fD4Ol<)<19vs z-z7srmUA$J7QVp!UYU1O0ZN^VX@gGW0c{t*a6hcsk;a$h68z4ABXQ|$>AiG`ha$)_ z=Z|7D zIB|v~(kNEvtlGGt?5wsa_>%VSX|<7yRx;~4m+y{vN`AeG18n$1lk(~g8XQ~W%Rv5g z`A{{t)zxh758P}UUN}Lpx?B#E^rC!@p$V$Wf?v4RtP6Q&aZzI0`<>xDtL?ipXGv$9 zNkQpFY$Q!=T%TL~;O#f<{XChf;RUn39|M~Oa=;gLZ%wO2CvVA&$gU`E-D^Jw{O7!M zKKLRRi4>Fb*~^Nsp}oHTyV(Y*e7jU=$YJ{;f^Xt8rO>p}4hN7SQ^e5SDJ2<~&~Lz( zsU;)3yT}?$i-0Shb)N6&up1!>W{S_^MtoUqiNC~aK4j^RH4th9-^Nsa53?V^ygTQb zl`edSS|NIH^}EgGyZ4|~xoxdI2R=WHzk@pRG`Df%d;PIu$uQd<2K5|jVwZY-B}nm%y4A@aXwq1A#q+#5JE>J z2U2IAY$wr3F_{%d`3~Lk(Ap&$d|y&ZdYZb@b&zr{y4L6=^>+@9>JB2>L6q1vH49A% zw0oTwV}^DcBKGe4M0P%ln1K7fl`)&+~t|Ufq}Ht-m-F?!Qa<-Q_Mt(apq_ z=ZFYUh+E*L>8ih*-NOv!b=ekFxF(moK_X;}seR|lB(4R43{8g*S)gt%S)Rx7D=Cgg z$k-*&X5aa|^l9YNfjaHO115zZKI1Q?d-B3rnm?RxyVkyokvroRD;=utXI;rR=Is5D z6pDF?cp|;miY33Ug*n@nvzv9#Y~;|!mdh1kQ;9>p(5>S-F>`L$))`D6=KH2?2H26_ zH$P-f%Dm9kq%~S$Iq$WvtfpNla3n&z`HWbr^{;F`-#*XK1vxuOa)y;RLTa5(=)cjurs}$#yQ;kWp9LLw zLwydmoj#kIe?o2|!s~c_bCMWo!>b5!n?JT4CVYRhL+aR_!157v^Xr?c@jqv@e>H`J zjI>U5-s}f2U%o_I>sn}iR*;R8-Cj|N!(C3>akLi2jZ^~sdM-an;() zGvKc@7k@N0nW#<>KJ+i6ERjEfn0kujS0UPG6ymQ+4t@*cg}@y=*&KtY;rY4uC?C<0 zwn~EVABE6ArSkHe;t*2bZ;W5)yVkIo0AS_5)v-$o}mK-RSgQFzBHtztlECeiyx z#P_mbHUZ=x`X~uUe||lop;E2m!Fhm4d}7h$4k0IG|MO`O=KN%Rfo#QT?wYD#P;<24 zi zgL?Eoqh-R)O%8=6R`v-qgU5LsgVWG%$F3qcd*#y0S=zXHb;^$O`?ytUBR{F+jt3d; zT_(s=F0^hUp~{bDzsv=@oz~>0w%LJ_i_v~RQ%-kVWQhx{E;p(POK!%bJ=vVQMK6QM zA_pCIW>rUPQYRM_C%-e1Qb}&z4qj^my5}5)!Jp%%S^I+R-9bx7sytt1o%F8k|F(PL z>-F#PBDk;SDyd2R{zQHw$Jw&AG*=DK7$qc%+ut27BTP_CoE@n=`Ik=UuF~ zlp`I70rv_GyRnh&fyEQ}|JZ*!>hRZxvl*G?ePetk%|OEK*MHL|>-_|sbP+pQSko_I zDKF@6$)xX#_Mv~jF3FNYJRO9hf|3=93&o9Xl1vX{eXr-QduX; zF}?)9TUzPIaw+oT%vTptESMlP135Py@-Krh*>iF|x-IL@{`5go;EDox*gn<3x29cSA? z26WiUIxv&QOyCn?`u4!q^JhT4gK)L@m|fpRq1oCp&5rIPVhLPwN*~4CpzFljW6~w(jyi=Dj&z$oA`nf%?Q}cEiJa z`s~7Z^r-Kk^O^3b5p5Zn+wJ~d?4c#4ZpuVNTjr+qt!cC0vCo6;yCYq-ScZp=i0t_K zdCYc8Zw4<#wCtFaVTb9m@o-_p-tHmrSyC-|+}nc8Rr5X@NQcT`>43d1iMe-(jy-7f zq4P^ih#Hr`t@PcCH!inK9RoS+n(9De>jPSQ74)*&54v*~%_(Y18zthB1Fm*~Bo>6B zkJ}ZsrILGFX&G{VFx%Y$p~)lg5cgtu(Tcmcza@GwacnbrRpC4eY)uFOdr2Byl_$K8 zjT9Lf+3-t^y1F*s?j@2M_AFbq;t`HwS2RPR-G}4qYl^tA;`@hb@skJ~+w?Dwt=iUZ zf{rB`0LF`6QzICCjhQ@)wdtxqER#17KLC=ck?Y9l$_N*KB zrS1P&EB0(a1AyKuzLC`#;iBurj;5o4(YQX`2>fN7MLr;|T2F-JO=S zqEp72ZvcQ|DL@6T5=z^5O@?mwUJk%!E7bahTs`vj!~l{CWtU~|D0Jhx$0yLB++8}( z`Fb~rM6G&ZiwG-%2);hqswMB*OiKNs?Cdrsq^7P)SzI z&SMQ$SF#sBc423jl?aV_F}3+1gM)|tKD)*}vbO_S*D4>LIZ#J@5t#gODLH;|AuD6= zo{+C$RYHBdm5$?0TeTo_^4u-hX+7fc8y(#IL`hPdq4~|KlUGgFrgr4OEw!p>b1JI- ziP@IHrK4LDN3U8V)JWW0gUC|ZZQ@r<@Arren#)9)3fqZeVbT6>cAgi`QWTj^GxJArIdV#XMJX`O|N?|U)1ct1Ew-Otcu zJL@DS<`=RuF22l=rncgEEM4Q|9+YwJBj66j^wtv*ZMtw0Ku+oMi*vhb0LC+Z(=N#s zfc*{Me%N4sC?*(P*!;ihP^SrheJGQbSv%DwyBhB-GLmKIqW&L)2Y#|8?p38 zLYcuMYKaEA!u4PF^-kvZG?VnExAP4)wq&n&gV44(zbFwJ{)N{xhQ&Lkz^1PG^BaEI>fTEABV_*^0eca)J%+fK;6|8 zZdOV?Fb6m2lQapjouAw8QBSwa()!#{+B~SA!&4Mj&F2g59ZzIcnPeq%{IUEbD_13^ zCx<_b^S~Q2e|m{}vDRm2z+K?_Z!fzmX1}Py=RVN_%Al)X>2$EoTMR0lc110m<&o8r z^V>s=IX7R3j5ROlF`d~{#kgFOiWi4@&d0do2(8q^;TxUaRN78h`o4a@_A_n#}U}lxe;@# zZq|qOCZs{yT9Q`nKzOb^N<&0P7HnYdrORWM4c?kS&IV+1`wwNGXfwTE{*IX%!~X7V zcoB?BDtKWaqYV~sa@_% zwfH~8!tHbjM~`2>26yrgDgNL<%wY7OdvQVD?^us2H|6g{sN}G7LQxe7_;=2tLuib} z0Trn>zO<;NS2*X4a}}Gq?6rBbt4^VlQ&PI=6o2Lv#J-(pzIRrmN-ulV>)LPIfuf5u zVM#&2BvyFTb^qJ52fr(w?5;dVHBPdx1D2orU_mCdSV%V_ypo=@4CSTCE{EM(UVg&9`Eg0kdQ5n52>L7zY`2vWK^fvx&2QOW2;qDUW64)34t@ z9)Gk9)QnjP=36Q=YH z3E->~%6vH8sG1f*;w4^1ob_qUG+1O^ZIM{p{tx(jvcaD+UzLapfNUZ%!B^vi`|zPX zpPJC7nf$C0iiS&+kgc>ns7>i*##+R)f}be}V(~Z>0K!4+<8JI}ZjV!svlihC_sTfF ze43zX()r|2yZ^EvHJF<}vB`csDgT0ljYAa2D0VI5oxfsPx!{{ihavc{$sCt*8riF1 zXs0oP)SN`vnDvl@!p>8~0uOFR8nEK7!fUS(QsNmBvq50Kd9P|Vk(b}e1k5IOLE)*O z+7hv`*pDUklq4u^+@l{*P_%=g)KibC$094(xKG}DZ_aDMt5x?9OIf9TGvI=#RHayd zy39J=o>Hdx$MR<2(_|zG<--*lc4KzXJJMoGI=2eAw{xzJr+UByaHMNLA`R}V@`|m& zr>Kbg72ERJX)DB>IGZM-N;vM#SEI?$oD|L%JNdkeKYLx}+6{7%_IJ($Nb%XKu$(&g zs6Eo1QKMBg^jXu*4!w#HEqxzHLqi*)Z+y+-3)J=7W?1J&HDW8--vk5=pzV8K#JY&n z7l_M^`>69;Z)jAW`a-Kyztj4ld0XvDSaGGA(Ji|99e>!9o>{z0!#mdhB{;)6EYwk_ zg92fal9Js4jAW6dB3EyI`8v%Kh9 zd~!bR_Q@l8s5H6U%BHcy*6F*OK@n!Y%i%eXKL>f3)lDM1OcCf4*!$o8Vhzugt84z? zUlW|RBLyMOT3M&)8+0hGWyF(k7pm|qZ_DHD8CmYo1lJPOqw$qjDHANM(!NVqaD&h? zi_%oF%M3A(F-!*Bw~sx%TRQOl3k8XC;qtD+CxKy03%2XsSZ#wCCXML!I0)>y0gaDr zJDriA+?f>T%^4a=$Lsj$V%fIwS?1le*wzeQoD7hB-fy4lYzn0lM;b;EdlvwS)B-jf z`JvU+u)(t@E{NHu-TBU>4sFaJd7HNir+puhN5fXJSY?~+=f8|!0=sq^4{FgX>zVG_ z;KTIs;aIni^_piLo+CaUT_;nh5Dc0&9$Z`?-*akp;RNxRYWwnLV^OTA(2Jif9^4>1 zEPFQJc(Jijc;&a~b2jZ%X){)&Irnw^g+AnIz(^oJjA@AsJHP)-Oopg9YwTM~t^V-` zmV<{7m6`mBe#4Q$!mf$z%negJKC6Q5k8Zw>@YxhUN6D6nSuGMQ!N(7h^+Qk3uy=7y z&);E~55rg=iZo~;E(5Do*D3viM{&TOSNDY8jftTQ^t(hY-^?In2dUDWm2#u9ALi*T z=x;KD4Ov@krJ?sSP35B}|EnM(CsiBkf0Vg$A%@li3zStsywT@(nO1g%9gkY(t4FRJ zbPdU_q^*8Co&T5a=bR#;lhDYO^@vtVC}%6jdH0d>)WSBN<85Mm(){79h3#dlAFXut zHk=u~d0)VGIuH&t)Gzy|J;W^@_xrg4zaGPpTYiVZOnUk9!%!LP?ES!!g7-i9id5|E z>~IGQ+ZBb{LDxU<-*Ses|7A198KZf{L~k#!%#vg*tBphQ%|2c38Rc{&U{Zgg@Og$x z-m`YWx3r>EY{X>X56=k8Pe{F0%K)G85^%bde1mY6xJU1AZu*_EHOL&kIuTdOLX!~S z|MW=O!x2r*CrQ-$r5ohsl?#Y4;fLbf0a{Ub3EuOOr#KUQqGF(r*-B6V=+{Gl*<9!c zPG#?vGV>>qS;srs)3 zn(~YHw?03|G&6vPiP%m{q!K&6qwl9M0`sMnb{?@w`(-VnqF=xHF z><8&$eq0iOqB3i7vDdq}&x3Mt1=e?Px#BZ&ri^^0S7PDuR|}=C_JZGOmft5jmKdx> zbdk$`(-+=t5}`VA>NPwcPRyP4g%spc9zJBF&FG(%dxZOf?Az5Vs`PRb{AZO( z&=+qmUTTXuGKPV_MKs)xmB=GZg0)a(6rjYA1Vow6PhYo;sV4a7x=_^{SEg9wxNOSD z`GI@-#Uti1tP$F0*Y_nGSU00(KSy8qzfRD;Tg`X9ac*Hks*m*5Fg4f(X2$y$d#Y`a zPFK3-%CIRiN6#c&)rAi>G0_M#+U{B$(~=AZS71A7;_WsfW-G=I5S*uklhA4qCqeXG zOhpJW1|CUb%@#6b-$8b0_g{@UwtJcApsV)>OA{_P^L@l%XLFq1Bk64$$98SCju!R> zq?`UAVn9L=^4@1QnMZB8jMZq19Gac#;Xbco)1KyS)drS<-lw3Om&X3adKH4t{2D^&>TS&5&ln=t`3KZW zFE6nI0CV$_K&+K6h2Kb{i;?RXxIb|P@&vm%M1tT7sKRUo`MHo68rMX=&L!3JLPkMr zBkZ56`e(lOnOyVi)_kO@I?s0zu@{$OY8h&rPIU2AAzP)CVp&q=NY=zEWuGOjlE8j! zMrM5b5#}Jm@KFrOJ=DbH*ek5UG$wtgAs&ojL!>y3?sU_~b(9*W3H5FzQnh5i+@=@D zWQ)~U&2*MB<}gQhw4?8_&fcPi z#yUToR0b-Z0uQ_CK$XxWszBl(%#d_%Z(=VKtqv$j-R2|V{W#@#MRvE#lS=d?V}%An zmfQ=09*GS4D6Ivf1jWQ`QGwR~$WyJ6YoEb9dMi=wYS~P#b5FW!MA{hgTR>N(v;m>IWHzoIC{_ zC0F*GN$}p%#`LjZ;zk?VG#hq7k#bdd0q*YK1I|lk)lPqj+@;)>T#{{w@_-jGdDLCz z*RnUxUBGq6BIxN}(mnL0G|W@g`xuBcvKBgRX?r?F)nP3KF^~LcSEN?9mp+o(B=^Kb zbA`Vb9dO3xb@=^67xH_Bs7op)oYmx7Ww-VnRpd?sWU9bc)lR=v+}FI;MHgFme;<~c zO=u)QzqsE8mK`C-mm;#={R7#sPxHo3ygaAk-_0Yi4S%xZFnX-^?e~?qQ`7BCkI`l@ z%(|B~gRZ`(&gx~*cTcESr#1$CpW=LKr0{Rv|J_lR?NXGyB*VYL&t_qP^Cw}OledZ+ z=8!VZ%Y$KU#gpIZieTRoj+0aQadd*d@C*COAd6JEd8d?`4DL$Mw}N2WgV&+V_z&PB zom$-NY|@=ab@UlbAyv4)@$fkY06UZ~0q;5kb2B$+tbx)WCzW~(A{`wFVK|k+_d@Y) z9Z&|uE(K9Zf0f_?UsF9lsqy{UHwFUO;FP)iY(eRvzmp?G>i(MaS;C}2X#kw6*>re( zr!Snflg2u38t*NB6z(%-o7|UPM3lG`%*qX-srKJE`WhQ3!}JWlJ+G61OqazTk3Vb-wr}4JO|&%v@pE>f154Cja-W3$;hd;~ty_sU*n zq<`^zOQ6amGgxbOG^aCSL^I{_jmxaWlDmRETXcpN)uXNO)3W4_T>;O!+u1|;1x`-q zw=ZLQ{BUV;9ivICQL|EbQ`|3}JO2F8AEym|RP~aVnTQ`yWTp1GFyGZ-eSg=`IoNyQtX`HC+KeIKzvR*OICcw zTleui?1SSl%C3Bqk#ZsZX7}dLm1o&E>(C}AH8%J#BhioCj-l8b;^nWLc80R01hSh& zCu&l)6jo)1<;teD2=hzbmqW8`?Wv^w6X1ayUDqGaI|$@ApR%ivGbqT&otX2%V7o8J zBu`?~<=tUqB@s5Pw+KBSwSQpN6p5W!CO5Kw!pcNpE=BGnGSjbphb9}UudiS0JS9BI zSgmXRVa5>a2(y;|5(=$O9;aH5_MBj;FDRW(z88$_m{xN!tQXEqXWpI=EcEMSz)VDvSlH2m!yjoM%wnnfJ zc%fQs>bjv|dCDn|P#m1}r6&qhU2`~<1vj6_+pX{x#YK!!EOZ!4&q_;ncb-VamUo;N z7=znl(F+7Vn7vQZ=<xaPt6F{UHWvFS>V(Jm`Y#kBlvbsi2K%9t^ShLt%+Tg zd_e7il;8p%xx)4I`S6rq3<>tfXLT>JG_PIuvx82&BWoMTt#$A3htuMY)hkAZ6}X*F zv_(g=omQq1tsm(8LzYLUaCEQK@wY#i9`_xph1n_Yr7s2(^(@!zh@8Nu=H7esTenAn z9(HB5lL@Owja(%MhUJXY^4?y3P9L-d3HXuMGDhBU5puB@ax_W^m!8# zcr$S<$T8U`F%r2pH7%Oz_L(7^+232?c*DH-zU+>`)&1y~z_W|#S|f`?f~}fuzcUaE z@3rU%G+L-W;zl)#_Gr#KPF+NSMQyK1g$E=9`LJ_cuJ}bcv-zt&AD>3uwrS|Pm1V$F zbLi95eu9ohvG&@DgvLAfN)$3~0djFc*SHtv@ryp+b^*39u6WAP5ySc|8>>%KoXxtC zx@G7vTicL|UPmE><;5ymq-eUBhWvl_N!b4ULC#lP5kr*I1>!fni=il{+{@)){=x(4 zT7*lmnQw*bI6!xuuXGHB5m$_EKh&O&{%((kVUGU1IF+=TQzT$4sZ8X$nD6hk`gAgB zxkfQdn*_6_Kh0Il7eb@KP6Iu(C*)#0)xY8$EmO)Kn8eq<3k*oZfG$3Y;=sOYY_2Mk zFZ%c2S$w*=eMW~4Y^GgYk=;+HbxygbiTmJsPzJ#wt=O(m<)t9k&S%;oFKk-(E!yz% zG-|T^?k3VE*+C~#WWMog&rDAK`5GnFt*6Y+Vrh$FO%%Q+OUaA3 z&%4i2Fs@p7((8e#-MOgHLgJWmINAby@l}loZf7U(%fPJEg7J6NY3cVeoh* zFo27iT9ka$Qe1~QFhYq_G`A$J6BhtvF4e_br_wi9Qc8)0m&Z*dv%9t0!^x7Xzi%I2 zQw80d>79>_ChPCQ1XuS`4fb&`_l%`~#GQRgi2b?&fX{$O#43(=9ZP6BhbTJ$H%rCy zX#}2d>6Z}e5d4p#%Iy(yu(KF2{8RfBZSJGQ9+`eKv%Yt;5~^vPL9*#)KaDHHU+FHwKXRQF!w@CVzJiJ*brQJ#im{U#~0rH*O}JXsSJ)B!1=Ha}J*c7_M^f z-uuD@XjBcBFhZwhYmQK>?=L7=ojqVPKHxmu>Y|vL zwta414$YL#5XVGrD22Dq#QJBi=aOtKPpY&-m){D(FW^4l=<7iW_MgT(AZJ{BqVCdl zCGyiYU#7H8uZKOT5o{c#50sobA-pqW*_3@Rk(6!G@)<*2M_(rk;FJ@Ua?d&EsRmwa zW{ZECoe2ST%#`J66DGN#6KgnEulFu!$B>4L^X%)HZgd&#jr@k}I7W?k1L~q+uhvW1 z*$Ubb+6ST8)C|cKq6fq9H5@lTc0OZQ`SqvL4e47}`?JJj!`&^07InQr*~P^9D?blh z>uxCDaD+Grbe>T8N0SlMSZ?>oM=yg|?%Io?zto?TJ`#+mcb!8KDoaI?3b%-_`|Z$_ zij0e2+=uW>`IO)b1$|A8`Sa}sEuxTXm94N!HnBUYTv2+i=eB9OJzl3@{lSZKmqUY- zM@>$a^&7`UP6;Y`E1b~m*Y{2-;ayh5pOMf9k#_}hC3#BuC&%);Ngjq%Ucn?JDYjf+*MNo6wJL_K_M&UeZZ zV6byou)7lK_csYLs|^(nf~EsU z+>2m=kU#Sg8L}CBO`62mS5hr`TzpldR za+^nwNpH`mNc7mO)bPK#0Qn>Qcq`Kq!ZB|GDnC&_Yp#D6cO=}zAuMwfLXq;T1dp)$ zEKcT0^Qjqc;)AVT=gM}h9?(p$_2uxcmdgMmQ{b&&g~Ij@wvRMuZv|QOpM3^`sb~UYogPcrUi+IYR36wiv~zZiDQ5_mOHl zX3Z_{7v~$72Wimp4feLvKW{F3s5nKUUQX^IAZu5PgFxxPsQCS0qXRG(FhhhjRa-7@ zT%rp1D%7ewzJ7mG0J`45tMJyXdsBU8NhH$C8wGv_I;AfAkzP08w{~awwC3+r^}XGgXnlNq zkQ#^)La~_PZ*+y8#!W1@r>-eF=XvhAf5ZOmiF3}y{l4p{q32zG;ODR^^Dc3Z@!_=R z%B8dZXMxgH@mgeaJD;=d)*<1>8{2buevw_$(`U(9juU?Wd7uBS+U`)ob#Iji`nGP} zsHw%xAEuxGh%&n&4Y=BnjHHwDS|3c8?T}p`%6J%#xE#kkVt7v^sph6WQ9|k9 zWEifn&03{{dIG;6iKq0qi)K2!HO!pAo1l|31V9rMf{Jko#>bymnZ`ZH1Atlg!7O(Mg!w-7F9NEj1_^OZm@jXEy!#v7}ge?QMXWASip1+xubP3xy zk2R`PtbzsF@#65#5R5FmJQ zZE+`9u_7e|C=LaJQzW-ebyhbA+H9T zxRtuUlco-Px*Mx<33NT}(nHBnF#aoN(s*t@O)!9ufqpaO%VsEXe87+}$09kbVub=o z2Z28x{7^31=?1f@4b?{NEt`*%NtU`uydeoBvvEyLs1w3TVa_Ln5< ze#Rxc>!PsWmTOZQ+WL&&6N}TReH6WJAly%aM+uEPgK~G>s z^VXUuHIbj0_V&}K_je1y>R&&9Q1;%fWX_lWonx8S`G9#ST$7-+QSQ(S1=!$p#TIRG z0hq=|SsufWrI?Ei=X5gG7LWaGW4EgszxTwA=GQNAp63IJoxc_E>$|#d&n#>oCd^Tm zDgNBVYuF12<4*f^3OC7E(O5#M`F`eFkA1uu{3G-^sjNg*J;x&=*}Mc)5( zWmc7(lMXpQ;V(E74PNct7EBIZZ{GL3y&Tq7ob~<6t^W(Ddm%TSb6D%lGujrZ^!euZ zxm_|0roZq8-RFT0Quk%dN_jQnYF_{wO!yeRKcKc|QcBqCYBTQ;Pu`z)l;I04z!U^M z@#SWu-~Kw95tmu1FJr~a;dQi(0o;&&d3;9{`Z=^-BYv-~7H@Vji3?eHWf}UUgo_|; zM^#<0=$Ni2t)_H2N#uB`yg!KjT)y*bXm{F*)Q+yjdc?}_c-sm;w@eB{(~I5avzZxQ zj_F78t)8>+uPNtTvwi*?`CpawDOF}DKe?W(UWVRY%yRC? zdh3SVoLNee1lnX_sNn*1Lb`EQOV%9CQ}^yQ9$a z78Z2#hjoG3;#Nl5#z;w?p&@J4W33QOWijTmG!fml>q+xVCr7rIOm2_j&5YK&R(>)m`P8 zf3%@NN8|LqLvLa#q62(y1r=H4_JfT`OkJkF*+}|KB&`WaEqYuaH)}t@Tt}N8NX|0V z?Uy5%vtCZ?gy%#WZk-oDW&i)=5e1+QJ7?exAxUa}a(CGjc< z#6&~B7#!Fdx+eHDW=&UCaL#WhRfMkf)x~z1!f(IlK@GfH#6vv#=1$b(-)-Mzf12xW zzMT`$dhs(xsFO#MRaE*?=5)yjGqO?o?gHJ3(Mbwj!MYLO)fhd?I!Y)yQJjjizx-4r zD-VT)%#V6AJex51=ZB6A=AqOrQU<-`5SrU8oAVvsJe&t!r=1%5_;U$JW0`5vv=nci z4~4}$En`AkVcJne@vmE@W%0(OywGgquvu4S(O3!$CYfV-06Hj+(W;cbo%W&Q)=iFig-CGOqA zXNBbb1=qj-|BW1>vABp4dOEq~n>(+5-Q{B><|f+Y&Pn!$w?Wo(y+2*lMQtd3o=K2y zC{9ag$e2oqO}vXdT|e$Kjw{w=f;Aw3kP-ld(|#dk%hW1GLM&)@rrSJFp@$cYCNs^IOoKl!EVV#_Gysmc#YVm-hUpIsm634xUc-$k-%W9k`*IUIj;~ud5hA%pg7p7y8|z-9|p)e{Ib#B4Rf#43d(j>}T;ZGF`q8=H-*jX{o2f<>y z$4$YCQB*^slqOJnFx@r3Z_+aB|n7#jO`p@bY!khtA8Ljq#t>RnV!< z%FX+he=lQbUp|BgI5(qtqUf7S-{qw*{T01bqr#_9qPS-6HHbr{g2OGj!@vrIC>!e3 zM<&kZcaKbRU50p-e?PS$%0ufSllRIzxwsh3f8$+i>WlhT#Z>j*`DQC%KUq%vbvXui z#;L$Y&Q8M21l&8(~I*2Bu_`1>dpgaY=$o0b8 zVd^pY?b%oV%M9LCSYIxL;T@tEX=om;Pe1}qF)$Xqs`(Xi%GCU(a`C5$Ky+(^V!wOP zkEHd297WPs=&s~Zgev(0YrNvkYx#$+qxTOi=)nT8vO;QA@8Sp1pElxslKP%+K$Tlu z-+x&D2mRZncD&(n*YWI+lTBs~&a)iDZbNOebRupkA(WArwxBr!Hw9KsH7LX_1UN{;O)$k(^;7hb~awc z9sk_Kw4&piYd;Jf#bcUHqSq)2Q3^s;lo%K9gR$ebI{Sr#)-O%`^K1Elk%7aDj=-8< z=W~a~yKS9L`zO{{gDwOV{=_QB!{R*Idn;zqS_q|FO0| zL&Py!tl~;OMG3mp;%v;nX$Gz(^P4EN`ojcf7hX*EdEtZd|qBx1tWsZ>O4%Vr!O! zuIopyd;G5%??II;Pb^kp*}7trur!YBFQa8R5$I8J#_LhW zB=M|c;b%4IJXRi6awasOF0V=E)Tgjo35~6K=K*Vc+Ek)@zO}fn@VZ>CrUsTB$c^o2 zV(MJgvMl>@$D3Uw`nD2Cb6{p=4ipPg6Mfs^*G|{ZJ^d5WTQ8F5DZg8Kb?`K$Np5d2 zP^;2SNR#^lNU8M8Cm_3V3v9G3MK84s6Zix=g?}J8RDjo-9r0H9%!-fwR~wiDvq(zlSs^>*MY+o(Hu)PA~@&jbwGXF~(1 zu_C*SaYgrSY3oRd*q*HtQs^7VS<;sB;=Jz^9c&~e{Dtn&l*EpBB5ZjRL9jzfEcwEw zJCG&6%3>6t;llMXnMwJ@3tlj%H2qT4 zN!Q2HWh4QZUU4bi;zY&1v#rEF;INCW$>J78-r~mX?vC$j${Jo*55vIxgR}=y6*Mm*XJe9x$4H>`2Pht@wSclGylM)4k!Ki40&8 zUBI9nS@49^ysEP3y$ulGvSl-*n5w&pPcEMkY})_jmZse&?-j#wmQe!$PMvQguK`NGi<%#fx9|$nSn> zJ_yBzD%L~EEd9MN`?9X$@ps?*US7Xsy2bSMe%Cd69O~b_jDUvO!dy<21>|oMmUou( z&1Ny6P*3Ba^}f70jW4XX2b~DQXlKJo<^NPy&BvARkPjBpyA#sxo_(*8c(M;!Q6{J7 z=d@NxZq|D}=iE=00-1{@{iZiL!M*smQ!}edCncBuHMC&)cm%` z@h_M>is=RUMJnVY*tm1oDmc52{tY~hpk9vZo-7fZivBqrJs0GAp`|yGXY|+@(cJ40 zWOKE)Yd=9@HpZSekL)+(TaT*-zKGiHo2HMB-;)WEUN)7_oQk?^y|G`Hsne?E5MK08 ztr0?%zbKWxr?P~awH&q21@DN1lQb4wpp{l-I(kHNvP_3XV>%*APn^2`2 z{)&J9XDL$Badd;cDCO}hu2`UWeL1weOh6?`*X?H-uUErkZ^L27 zBPMILeM}USC9k4)52}6nQmNj-UVWm-<1Kp)b&VTVGHW9b`9KCwP=NH` zC|qzI^d0~j<+c84}|{)?4AES5n+XxPp8?R_I1oYT;v9_ zok!`Ll`lJNvT+TJ)3k*#r`XiP^p#GXaw(di`U2{#R$e&}!C=?eOxD}q+Hku)iy%Z( zd=?F`V3%%Md70|h3Oy>%#iYksUpuO_*exwUD9X%C z>0AO3b0$cAyT}i<8*kZNY;x}Z?*25G_CtncrNM#mS*$qW;pmw8)LxW}0&ID&1?X1( z{AYzj{(s8SZ@R08CnD!AUKm1f-R8X>Iv5=={5!4JDNfPPkHsXp>OX(|zYnd4RXLd|Rko%eUh&-Bwu6g44(1dopJ-BqIjtF09I z;1Bh@YE1KC(+8JV(I2R&!SrmEKj_`Z@J>fLr=2NEU+BdN?A5(Z{A>!Av&?V`@`GXl zOj#k^0EI%vQS3yiGQuf+4eZz0hk!-E+}mEKQoB?f`rDAj@_&v708IefkN0Kff0;ws z+N7ema9iSmiUFTHTj~^v8^m4H0>kq9MIaz`JxR%9X#z{ZqsJdez8|0n_$(c*x#RQ2g;4;a z5qouUzW zs{y}v!RbouwW)AJQUPuCxYyak9F}dN{gxZ*y@`u3=se$SO!fAISzPP-4y)Hihiz~? zOvY0ica@CoiMmN#I79h4JI)*;E|&B5txP~;R$o4bsf+Yx3cqXC2+72}IUXemRr8se zrP^yE`t7@o1A8Xyb=A1hXaz3dMUq%r;NgS9N^S5V1V=da1H?BM4+CPd2>!Pc>XY~7 z3qG;~(T;QVQ8ux4&p=lghv4MpR!>+WQ`?Z!8s2%ui_O`gYF}ZMbPrBRl^v`VNoc|> zl462!mEroicD>}a3xq_EVVk9Fuz5I(l<>r|;OR1h_XDI?M<>s4#@cl+!*ES9&-)w0 z0bzIm+>S+KFk~w-$CWq6wZdt+EtciDpRJFpCEnG=fUV<`(s<;c+@M`%w5@Kof_&ay zxT5n;MxH9Ug1C-RkX8GhOCB@>XV7XFb!X&{`TURIyl(jk&{;TZOftJT?EltsD}IM( z-^}VOi_lG-+dVRPEhVQKd^K4&iac~L*#rI=$65dt@kfo3FY3{UK9A)9v-%%b+Hs~s zg|r=17O~5yryhkuaEcK2P<`lT-WN6x5*3Q&Ea8Cp)=a=^eNTzLlI~DA{Z#m4(N=_R z(sN8aq52g_`(Hh0Ae*0!Kp{CWf&Qg@_o$cfJrz+@ypyw)8Jfs5lV0swV?8nbsQ%gs zkE264PgmZdef3!fKWqA-3p7agOVjB_Z)A}3ZOj|^*M}Hynp3XxNmqmmp=iJVI;9(% z;AJEr917ySvAh#!W(-Yo+KMJWxbGJv6j)!kK8YQ09inol@Y!N!AOH?-1OUHBJCqDn z)wL${JYpy*`NL;`Zbwg)r6&lPu5x*NQ&h0uTO~y+h-O7=Lrko=FrQ(N8aVH2C)Zo{ z^99)P9R}W~NOgZtr$|j*d_X-KcyO||w<)h+$9k0B954*o*RO*AMsVb^69m9hYznMl zN-a>D4A>PoAMve+e%;46bg+R)2lrPwSt4EkI`DtP)~QivIi<P7EVS&EcaBARRPJa(E{Gpsdd-+GF-pHDe07mgryRp9mRUdGTr2Ii{aN z#!r56sx>^|!p}Mzs>1*K4K5B_hT^kG0{2Ec(bbsaeM6VSv%}o8@BTN?o)EFO4F;L` z|8My+lI{(_Yllsm)b3YM{(UbG$y2-mhF#K@nZ(Kjg}C|UwKx=btvH!s)m^a4B)o-W z!R$Ufz!Lb2y0WrT7NAQFka-?9CYl=N_qB4$Fp5h94#34`7b9)~U+nTf$0;KaAv!uS zmcjNc#r{gj4DQ+3KzC#@uEOjofMb^& z9TV56->(%;(|q=s@?MQV9P-8<&Q`YAEA1VOc{I~y6cXb9lRC|#y!12 zKDXJ&&GGveWH^0w&9PB;VY}WLecJ1&@L1ei?B;frUXrXtOdS=M(d(`OcHZGuX@t@~c7xyiyv-k)sGa*ceTM!Z}79COd3R%uMj zD0S914idLQx}FfU5l#GO0IZFXIC>4Q70x{ykhb(Rv~r>wn3WB3st`9I;NT*ot{8z(c#<`CtoIDjQm- zdKdhY^CJYWmrtmp!6>Qb&;7_Bo12FK-QyJ9J;G~3K8QXQo}P#P#-I9CEnF@%4{KKE z@ICEy6#wNn2zS4n@gh>1E61ky2b8&2zi94_ZVKMOiyQc8T0Upo>t`C*H)gjdl)X7O zPI>l0s`lz`K{_6!%xrWH6uiVR(%CQB5ZfRVIA0xF#>|QiZLWcO6Q;qD{Ah%E{L#ZI zny;8z__(6iBFOW8D{6VDZ>kyw3BwGmzPU}JbI{rg8`}t6vt^vKruP2K#EWR8(2;&EU4~h6D z>ho)QzKTC(u$6Jgc$CP@W)oX?N3zYWua6*9mu#0xiorey=4i#LQR9EwkZ|8b7~S>H=& zW$Uo^M@zZId7}h0Nl@c97CqkQIlbBs*5$FJl(kI4%~h^!>D0>Q=tLr{A^+{&z>3y2 zUY@_CtW$$LKCkrCc9G_X7x^MUM zT`C`6C{M=8lyG{&Vv^2chos9tw|3wpIfc6<@bdq#Z3KWNv%V9b+w}Yvo5rh45b$(i zjF&kTa{&j_cannNSx z+@IP}LtVF)E?V#h6Dt=zBqcSGiKN6Q1J$Q4I?inAN_7ujx&U7R3Ypm-GVU<5{YhZf z$gtW;FHe1D-Y_1cbU7~9<&f+JiD&kXZyW^u@DXApUWzG?o;Bh~zI+L7U_rieHU0Hq zfLn#X%VWyBosUNdYCQq&{kzw9^phkN|F4pl6=zE{Rk@6tIZ!5f2i$`v=hCxYVv1k^ zdEo^m_Ak)rOY0!r5VqwMazUPNkHKHK4^Cu4&osO$lr~nKqJ1b7^ z-X@V?5(&+$_=qHeTwyVai9wh)o3VK0I`ExSFV72-yKLzkJ``%kOA*E1R`= zK<_mTC@s)ym#{W9lb8AZw6@R#yc@7N&+K%QI5 z#plHnC7QD~{q*W}h+!wktQ!YhE{|t*j5!RE&7VoU`^fF7Q3*i^OZipC!q&>Sn~H@> z7;KqX!3xn1%ntv>1c1G?o5UKRoX{Pd0 z*COe9D4Q{o#n)ecQ>K$TQczKhEKfqwpt7cQhL>UURjEJGwasDg9oH?i;**_)&7@K= zHziCjV@Yqbf1!@jIyrsac$aIwUUK#uthHSX*(lR!P{}Pj`X@j=KnTc!y*e(&X;59` zRH>K0^w!6QWV$WjY06aMsG8whd#g8AaJQOrj|xoea6L1uvuo>jN>W zPS17TU`_duM^?;lN%-nKXS6C9Eh`Y2oq>z;it>qT@-Yj&J%hRb`;A!@42RvLihvJ* zh^0MxvEm21(7SL~ssDaI#Yy+9NadZakV2{TKFj}U6aGslCUmgW<%%%#9)IaEs&E01 z2&)>WHd%{by6V_ZX-$x(U@vid|foieSfli)_gMh$Zts? zW>LFTg@u~s0DMd&g+5={nL$32`j!fl zY9&@QPGy+4vL8qyNvrc<3g4aDofPuowhwR(bs8+oqo{&K8KLfN@1Gi(kKO+R!Xh z?xd(L`5Zg}{+K1KuHdcX=l!iOXN=l2DZs0`od5f%$E(;EY!N6{!FgsDWVgbZ3m8# z&1E(EB!OSw@-_qth1k@sEjeMx*Kk*e~0B_<|W@0{`-BoR|-JZ_w&@t87(%b^UD zp_+mWgM{HCKK^)RuG>ew?``y^W zvW?XDe$xqpRN^XLuy~ZDL!f?Dj64icX$d(Pn2?QJaI^z7F%(*<62Csgd9#H@sT+vM z?5LJZaNW|DcNuaAgh= z6ZMBOY03sT4baX>MO(TFYG^@0k?vT)Dtbcn(5DYs-iSDEI3(=MI(PPIpg6(mo2CzOy~!Hc zbu2^S){keyjbcW6f90Z$VHjaZg5y~X|>Ccbm-R`hvzupN??ZfU9yFR2E$$U}h$@|U! zX;H~RII;BS?hStr9G-mqU*-SU!E(Rr%6~#hKD31Let2@3x^G0*@#oaqkP zKjZj$*NDf4OPGY^zb!+TU>2G&yC0w*L!k!#az$X{$oe$;FNgI49=%`|M*&|cu??;x zPI2OEz!soa<;FvNt^dIPOISG$K-B#k3HY@Q(>|ci#p)xDh?7vin*dSA{8K>41bf0F zJ-D&!t>~r0Bu-eBv!!!6n5T9zxXU@^_k!xh98csqR<639kSJrxb=Xmy;q&9~C!Q7- zcm^jQwQR&YljC*H4q1*9ClN`*3>VG>+JU=j&|!8L7|}l4Y5+@@397JjL^$}HHD_Dw z<0is>kkE0!=M#X@iGEiw7hlR?@iY)7=u(H5C4@w%R=_DX zQ+Gk^gn&gCk%Z%Mcc&?!!33?m^&gB)R_}Uwvg~6o>CDa~AM^(UT1j`a`#CGRtNaR+ zL^OUyPKM!0c$dwqOx@$T%KGQuK%uo#D{ZlqC$8aOoqAd3Tdp^;+K{NL^OjhOEEA3V z`b6wn!^A;)N*J=?^3U4K?P!HMtfp|x7GDYu7HSdAkLXg|8h&D5zc+7}6Pszf>zG2c zt9sskDQeF6X5;BB>Dl6opI0xE>Sv1-7#Z)}g6AGM7Q{5HP%Mpj9WzN9uM>U^hrO-{ z=wNmLE4N17PbsV2*NyC1SbKlo2cn}0dRKXs`0qC5l$eqw4{qVL@we-B&Hrcbw(Yj4 zYaY_;di}G8X!<{2{@MzO2cGJ3+>F^-Y7V^l_~rfM$vz+BCvECoOG`^eL8}8E1AW4= zOAG{kUDK4kohU7hyMJ28y-8dgir-#@f9V-jk$--k=>D5rwF@NT!G3j;LQ$=E^e+E9#ME_6v_J4T{(n+pkj2;VnghhwU=QP2RHZarN2`wcMxyts#fyy zVUT+v)iDH1XYn7dE*kRXafi2zW03RN?PXTENepY!-nyR)gtfnicavC$-pqET!tN_@ z&3sy4ejcbs;nXa(CUPe<+e>9d9?bspXrLG03UDX1{GN#;@#_I*)cc#h7iA>|c4KhI z|2CDLWEQmd(!g5p&HK7!q$<>s`PQV`%ilYX<8rdxAMM>sCwB`gnHX*Y1bH}>#H@;{r6H75v z*L|?Cjld^}=A59%Oda?vXk8oVm(LB0?koW_;nBcW&;e;SY65TgrDGS0Sb&jmsG zfO?^^ZnF|Ofz$pJ*+ajOTF$&I6QMG1JxzLvMGfej9K#oC31Px!rIp-5KM_gvk?xWH zwjHxcNbRAIF0Fl-0X|wTM!7_PkR2?OK4$+712H1R^L{7RW> zo=vhg*MGuA9jy7=x(78V-yH}wZm)LxG|tLV*i_LrKyBe22&jQVKcD;8nZtQodQ8E&_Vma7OI^Q1Ysp{~wig{|V+iniX!gYXDD13C7J* zw~^*Wj_GnnNEH0PhwH~D7#h;{Rem=JYjtZ!kfQBSVe?gIsHEDd_g>24YYPn$lNqJQ z)@pGOuO&<_wkGxy{Sz!E?717EcP3w%@_D=H)6c^L`HgWeagQ?TKI2pA%bdFy;N1iK zm37zS<>$t4`xyWdT{cvK;H-Xm@V@+`d?qPa(dUu1PyB2y;n=SUUW~4Z0zTVdGoi*k z<3yTW^;6cM0W{(SIvnm;II%4WI#*7p=EJZMfFeUR;P>HzSsn<9W%Wn8k?3_fWOjw# z__>vmZQ^xMt=#JCHt$V_tfyW6jKp1PxB^qf6j0MseC5Bi;H+u|-5F&aOF>K8ZM?2@ z&~`=|*RQc~@On7eZO+@7#9I}$t-sgJ`N@3P(> zBQ^0#d=|ec|J?AqKzun21vo+2 zC~nW$*7xGapeOTU=}#P9B_}Fm@SlI{D>tf;RFz*N{rD;a=iKI%O!;`k(ua5a5LX%Lz2kYvq$R$5h3EHB2^*$;H=e>jFG_foXT#M5-9YO#{f-i)y`fRk0O zc6umnbT;`FLl&;{ZREw@VOrzUD+$`*xopvpLvb#Ul_EXj*rjWmV8z>7$2xu=TP3-O119!(c=4^~m>5OkCW zx=)b-XOb*#%|8BV2a`p1O|Shs8QB5zAO|#)Qe4f!YG|;F@fo*YW%-{?FD0eunjyoD zdXDy^rUd}|_h40uf$B#A}?~X5f zse&{;Sn@JhhepqX%MyNdT>U`&4n^ffi&0v`X{_*WW9}2)-7EuRSH%?_(bfa?Er^_T zn#kj{v&5;gt(k?RykHRi35VjiqSfQNM9G`H(Hh;90{F*Vnzn8nOkdHb!w&srL|jmz ztTab9X1?C)QJqF+KJf(u=jZrM_^;KC%2NSX?ayxw3@2FIpEL2W@Nmv1Wa)KHq&D@w zvT{c2vHW}LYGv=NzVKXVU1pt^1_`C1gqUrC1hh-;ghM3At+cGqGW@kWGn`dAo#|Y= z%;6gv@4xu1!wLhu%g?&*`g;y~jSY zQiJv1j!9G8{kye$7@xP?DlQ3fWAZp! zdXT3O*RFBGOXoGP5AN3T)_2Kk>n$<9m&^4$|I3&YS{T-ObxyEaFLfWfk(#>|#XoZC zt<8ErrXd?kKkZ!h|F=rb%UApnaq<URF-}(2q;qFoA?wH0#$E@+Z$J7L2vH=r`SpJWYct_X^6xBg))*SKON0)CwY~Fpd z?1t>4fTYInW~2S;_^oqX?(I0210~oDT}onKYzc5?>mM;FQ%J|tTP^gfB5?n{1Y;Xq zk(3fz;tOCq#Zp5w@|52w+q;?q_`>3q9Y0~ez0wHna`$>(a=Hdlv~-klbzc<;c!_9# z^$4U6P5my=<7?*9V~T7S2{}RW=6Bd&lk1BZ+)KJ9n1id8oj|4jIw)AQCt^saf#`*l zE`rEL(iI?JhGT(+Kn?n1bq1W0WWfehdCQ3tXvQ@0y$DQSc6nq+4)Tzgps69bUw*5M zY*%~|=|d5@w^i|4jbI*KJ>gwZloC6A<1O9l%G3<#>kwu*gGAiYh)XI4f8%@?7#S}< z@UnVNZ=gK)?S9{ev2{LgFcSX5Eel88q|3FdEl<(}yzOn@;Zj-a2M`A@UffxSI8XZ{ z%hO5r16Cw+^=jmY058u(nt#;^93?X{1KR$Q(SX!GJec+JLxSlMgW3IP z@oWq!r&l5Q@_3@eKf=^&y-<1!sU7D7HiM1vnun$$3kTfEY18OdZMLK;$p|=$(?^fg zj#NH#VW7+5*kTZqYZ!?QJcrBXAsmFXrdlNQJBw6svgT^$zQ1D0yH3-`s?5q;vXE-E zN#)&6GF4C8l?2HnjhvSbZ;KCeO{rdGG0$7FG#lR*DaLxAexO6K|Z7*)>}^263S9EJ{O{^ zQl{dm2aj0I$`7~(!gZf2s(e{7IcBccqz+|lA-c3!NPs_5RLG-h;jMUz-oOMYQg1rM zbRr18v3Vb7@36Llj$@E(rE%h)yHJWA2}V>hwtd!zmk2@=4ZXKf0{qWT#(S-A92I>o zZ#$k?&~^r@+-j4kkZVY*2&ulOwz!OoAHIJ<`_J##@G>gMx#MT3mDvj!7xMA#!X3A0 zW=u-%uYgx)f|u(z3-YdF@m8Dj$3@$}n=zX~?j7QyR0`336bbM~M*JOQqhR-Qr6SKd z4<8Su-wEhz*Rb?r&L)j76m*Nfz9UBZMNnUSAQG3Tpetx^LkS;^31!9D;F@fEzMdYV zekW*!I=n4jvF@$ZJQYC{5dQp1jp@ah=(1J);nKUlD&gQwSibj1Q|=Fyq0OuHFbA#B zJ(ay}MKAvIIZ-{IGC>bqhF*(@&>tCouXY$u1eo8!S(!w!93Y>UmHM^R!Tuk>&Ik$`;&fGCK)mmb4=Dk^**iV$&%d zv({yZ_A11Wj079f=x$1$*yo`LQ@+%f-1FQcA99j=FHmOl2(6NgW>mm#4%Euqw_4aq zU0~C1SM%m8m}gvL;;oEt;Z?|sD-$$5T z)P+WQaJCIqo86^V5&gI8cTD`K;)(wsr5x%Dmrs`0boaaP8N_U)!Y;f16y(G3=l{{m z{_n4If714cLpR2DFtv9j_4^jHjI2=W)^C+)EN`4Oy+thXKmIz$6tAg>NGwU7zQ6#a zv2$(fu`MVy0GR;$j9&n2OHNC&T7qnRTqk0-w3H}7KHzOv7Uzwr^8VY!Qcf7aA~_Y|8m>~3y&$c~Mr1xheRM#w8{N&WHB3ZAzk0Zcnf5(t<)z&n_2 z09bMMV%>trY?5*`S+elUxgDs#|Iy9jS_itaSsC;nGlR$RzP*Tm5n+$v9biAl8qH|b zVqxS^4tCi#2|abozBOwUD5L`%HS#+)^5p}6Ip&dpSHM2Tvk$)Hip|(r{Lh!8ui|k_ z2O#Y@TaxC0itAV`Wi6=#yt%=MX`TSFdDid49vPC(&d2?!OK8t_ocwvzfJQz4Ry4hh zb)Onh?TnfVOQ(jLdXOXs(s1DFkk8xJR>0RX4!T(6$y7)Fi$N(6n{QBdWKAmetF!aS zCxCc8(Inj0*)wGm8tCmZkBrr4JpCt@WgQ|+^S5DKU+<(1miT5vj*-;cAvZ?hVp^uL zEaF@9A*OY})7=kR?BUq+1Sh?|&vQu4@V0A{Fletw*9&`H(yp*W<+OfVq=2f<`4lGB zs>)(>jT*@W`YN|bdZXXo*76lThm zvnRdkP1A_v_NZL19F_(RMptS&A6RR`_A9mn$hp(jEsy+r((W@HLJ87OE%53aZN6az z4%-d?xGj%6VLmyd@fn_AzTEB$)XhSP5`98t+|E4)MoOymJBNSQ)7h1jwW#wq&qnWg zt-f+=dy1&EVA+*A@BQ6F=Uih`+joz~6k(YLY&N1x4DUD2b?sjvg*&c(cP`II%pxZL zM}wGgviw{6#^Z)4h8XC)zkUg{x6)-VWQ#b0!Jh{CDwd{Gc7MKy#}5@8V5}cjVikO% z=x&TwP?0>}>l=N(7xCrjeELMU-ow8J2-bdSp^kiMVEiUEU8w>0CGI!~j}MlU{_%q! zlR1?UCi7rZI8cpI2eCl3Pbv)Ki|@BGU*l3@TzBpxJ4j6v^1>M&E7 zEz|o|Az;)w=I<(lhHTLLlfH}dN$K9))t9$NXR*EMy=>}oR#dG%@zVVl#`=8mPq~uQ z)l3+q5`ni?G_Bi5oukVhgqOAc5B zjA2HSa$lKd8$kjqoP2;l9fO!Zj#en%3YnPjVBHz-Zy@GqYBzt&bn!1n+~-No+-h>) zdj#bBmMV!Pe~_ZpYYNCf(>5Yrt|0?)6n1fN@mwG)%3{G<@dM*@`sJu2GU+=sS#eAK zh6i=7U3MT5(9GB1IB5LOB>M&aXa+BuI7$*?XpR60C6Cep*^I@f@CncUoW{6GQZt1?Co4=V7EIJjD;1P=N51oKE$5oqBGJC zPA||s_8kfj;f?6P+vOq2uK!jb6bx}d7FU0f26w+_cK6)FIsV%VFhy+fq%-t1fLtd3 zK>_yKY|&Wy&-?pdXSBo0?O|xBfLW{f^r>6yz(Xp`;9VZ^BWZ$DoJ{-1XDa~)Xe~`2^e~^o0^G%Er3lzJts> z%K-2vP)L)spE@h1rPMc|;W9L(1S738hh5FM=s_SMe?@lxTYw?p85=eIqgsg&0Q(rN z^!B~7-wMBRKj9XC{$1%@Q|zNr0AAQ=uGlp0Bg%4{g9%{{(%M8tC>}-GVoDa@wz{v7 z3>Xu)H_5X?ze4n}0k|f|P%heX@l6Fx0epEKGn^sY-xF9FP%Oy)(gG7l%_Yv45G|S_p=lj~GSf$ktcVPh^(aeMZ=A5BDiNlGXK4**eHztbc658?($LZOhO2zrhCjr7ba zpLfwN&CB+PUfSD{RAiVV7NG&e-ne(Tm7AB@pC|skM@_8k;S*8Ed0XlU9!izWS4!tr zI+Zx^LD972otr>9_Vw9ss-jxhKUzS(_0?N*Pil$QayDdwD@3-AL$nrQ^#!OyR(r*Z zUUYnD@bd-V&FC#wCTI`2jEI`0%)!^0t}$i#!#oR0Nj%dUi_>!H!~Xh<--%7^68|wK z>@7Y)CelB^ULT^H2|ii4R7%xg;{i(VcY%{5TkjBf?WZ}|`W=yOR~U-!bL zk$rawQ4+GFl*4HI*wM5%@DHdg2)Z8S__GfH9~|Sau22>+#OyVf+H8i<6eyLNQMiGNJ4y&fn0g1n8qU)R-Qr zV|Ekj`yo#BIqLd?%jp(w8ehS+DWV@Ugz&>zb=(Koj|tm=_o6VE8Ov&;$ow8PtuKC#;E0<(e%%r z{e8li#XoBI($~pkPuHG%+tDjW4#>&OP_PN#n6u(M;I6?)$76;#&0~k<>v3eg^a=;B zRFuDTk0oQ?FxuFFLO_|EdFdVg0M&D&Y^ng5xWH5zVTXgW`~w%vZo z{(F1=w?5&@O!^>#3d*(DdNgc~qim8H8AO(B~@H4{!8I z062LVOcG7Iz9rM6?AL<@4gGB2R7(*oYcAr67EJxU8X-f0Bc@p925 zm#auNaYo0{7f`{3>~VU>FN|9xTmIrt7@LR>N-0!;?`34!ik1cK>6Z;cU=tSX%X&n( z<~}C8)&#m5w5!_k13N~GuKhJa^BpQ`ef>jVf`<%txQ-YC(VOi%h7A&koGGSqaGl50 z31f*h3HQ=0HkyT=0Gzo*m+2ADqUCSgqc%bBn(%k&2qguiMzGXjjiIFtm_lUmQle-&Lx?SZWXlDSXM+&lrA#H` z2nd1KHkZ`Y5PRrG3U_yUdh=j`x4DAjtFb6v2V5fTwOhEOJ!@^62O^UQDF%?uf0~-q z>)_$Y&vfdV z9Gs==p6`xgW3t*T?4-$|NWt+@Kxc8u7 ztu8ik1++%luAFZqZxia_u@uxLxFUU1)JXnS`zcTbRksb|t>>Eg(ErlG&4fQp2#@9Z zK&#F^)dl-yMHpUYUPxUS`4YVy46Z&+`?7IffnA?fJmh$hP(9q?&rhNs{{W|MO}f?Z zNj#9!KPq6!Y*d2|QEl$T@?+QS)x6(gkp@#`wLDp2*2Hf%`hs-W;9h!@_e?5&Oa@CS z2k>$rSbXv_Q*;LJplHsIf&#rCb7Y_Iw(LmJKHHjle;nD%&n&V^zI09=oJhhPhssuZ z5vMd>fU=2KRmT8)nDePa=xj4r<1w|Q8F|Mgu{osP=wI_X#&7{tLVMRV^I6)&Cz_5= zj9@N{*=MF0+U!w+tX=_udVyr&ZHOXoY3xv(3MwvBwS_kQdzUiOWlr5*$_B%z@Gw;b zzxsuOAUk|aH_U-??2ElB*t8F?LFK^wgE6~B_^wvK`P+->YHpzz->zo0O2G?V8KNUQW zP+8V1DS=PP(}agoF1Pa8804!G`=Fh=y=ocmlkW?iZ}=Cuf263C<6x8rduI3)UBZ;h z2I_UO)al_lF2>#FB!3e*mk{cG0b45a<%R zgU$}I55C-6rl9U%cic$~1f`b866Bbc-s|J;b%2?{TF6ZtIliy}sBx2-@D5%mu*hOc zSSoRWDqbxSD9~?C(15oITPL~b*Os+`$kNiB!wLxeJ{vzARirfx=hq3Y={HVn`W`cC zl8*Nc?_SEEi-R+24jRdmv$UjIT@RDL@h% zM$_Pk#Pjfg$jwm`XZr#w2Nw9AR(wwYgWeO<|8z3B=)8;t}<`8_j zIAADZxJyqmI0x+gjq*;JfD5tsSn-aR=us>;pTBZ1cmI_8-BN~v<6FRU zD@}KyP>TW?$klMw)&evrsm4D$0*zuzjuDyWu)Ke}v05nazl!MUP61Rhvx)}Kn%548 z?k%j_i_e{ffzA9rXDt^!Lwtbvcu-Dk=zwA}at8?{i-QpKdsJmTT00*ze zGNG5~H8|nRhKCtN6m|dWp1{sv3XutRSV2AR>EY*Wx!*mM%jrh=nsioK_z|WJ`y8q? zRe}r+zR&tp&2)90j)#rg;6Q`MOR*>LP64+L#?V?*P16`xZ^@lXuJZA=+ibzb>f{_d z3$<@&>A)-|zUnO5kUBiVeubK0-uF8}WiWV;$PWo=pO_X7y{v^kZx1&0=P3bi$0g;M zzxu1UC}!P>tBf}A2CSgZYEfuSA)HIJLZ_MO)DB~-nYn0stbYm{thspXn>j10e{FzQme z*aGj*Cb3>&doAgL?o^z(m-;cFo_~XOKt7i?CN@c|+uj9TYpfRbWw9wN02T$ zodJUspj|}PUI=!`Y$aCb;r~$lXroy0_v2ry6=OBt2jJX_X3>VzCM?p|g8M;?=gM-0 z=|?TGi$~hRQ%8|JRhA~+WO>|9W5D0igoXU(Z_MM+_Dqs-hP^){hjip`zEgfhHu^=Z zIpz*Aj(>b7`T9r+3Q;)#`g$s*3a-m?cK@~5Cy{YVFjwdv@J#u^XR{Dw%Q)hn-q0Jx z=HiLsRQ#pDMdE3XsHo6aUagt#d!ArUr7UZE>^@k8XJKm{jt0{qwcr1$ie2PR6IQmf zLURjGXNUVPw>|Dbzad{y%Y$3j@)JHRIbhyJ^c;TEZ=@rC7@vhHlDi@@3%|E``nS%0 zqFB#+wNfYlN8o-b({%LnkiWK90)hWU{UZu2K^>3C6)xr>lp)`Ca2AA{gV;35YD5?tI{}6+j>j*NNZ!do1 zDG5#5D>DS10ODbC3~iDMv~r}npfB>C0L`7JB*X|ZCW2I;G%x^WfXKS#1v88fMUpP^ zb^_!g`e9ZcmnvTjVqyBf&XB6!*v z9(y*cYOnC-<)^6|iL3%vev@Z)lqswmf)-NvWjK2ApG*-#?tFOxdf4GeV2*dC6W|xP zMo2v`Kn&xA?Jy(mNidxEF=^kH3j+x)bUDl3j9u3Y?=b1-NLl?%s7g5me3x;BT^{E^ zIiR8wmo((ro^auJX$Et#HIbeNx)o~N_^gM$twnU9>)zrUq+cp2 zSb;_3lgbQFP87b(%#RaY1586r6aCTU*{m7?7OQ)*9~Dhi83Y@Oyc)D@b2aWilH`$+ zkV|?cqE_dfj&d@3HxGGv4OciyX5>k(aLLxGU7dAsu5ugc(g$|r5+!ef1<>@ENOhTY*l;ThiWPy*&J>z`N(XKC;b306P1qKZ&)#*~BR zW!*scJ85T-ZV~wU4r}(ahanlt9`s|@sc0xAuKWbBpSC#}+;BQuswZ&hmVtK9xgLw* z)vNT_dBnM4G6nOY?+x`I_TZqM-#`8nHhbo-8+p;YSSFrLcf8W}X1RA5cF}atr8W1g zP1_Mi$@)4h_QR7C8U7|_Pik;j4v2mKX^Q&VBuluPGV$OPmkx1eVk%YFgmiY-%;MY^ z#s#x@a?QEIEgXE`E+)za>F0#(HDRVZUYpH_&ESTp8?L``CCP^FA2jf<{kW_$38C#2 zyV(-7zn&!Mn)y@GlH7(3ORU&*X~9BlZEezCC2LB8A^cx;A<3?twj1G2v_zaD!T=Zk z86U@HUJ5}|TN~ZZTSh4iMHj=>lXf=y*pO-~Q!S}7@{*(^RQgmMRG@ohHRdO^681IJ zDnl8&=*Z~hnA4rNUT}DQoYw>43BV}uOHvO9rhbcmOwNnRH+~rM#gwf;kc3kCi2e5R zq4JBs`|_Vz3ii_~im1ZEF(oZ8+k36%{)Dr{H8N@4)$+XZ50jDSkB%Oi*HLC@#V%c$ z!MXB{x*6xd2GJ0Eat!ejitQjGp?tos%Cv5Ln64Sgiy=+TMVTuKZb*m|OKQrkSZI*2 za4mhR-eq2W{ENm&tL#up*dv?Mq+_TQUI%LbaoSaWQ7i~ARpSGT{-!QxGE0p&rjX@q zKZ_5RHkgw#mEp0G5!7;@6l0YL1K+Wr>F!NbEB5H~nWHCr(OEPUf4R+7u8|ejI+({~ zyF~1`q0hQ;x6eb8JdC<2l z+V(}e|N7K_;+jzVuy)P6YDKIM>NrSxbnM9SUHH!9HL1-DJHZtROvb^4%SFU`kVYo! zQADd#MXprTG>-yCo;_HdYvbD^{{pAU{gtmVV-c||6xLFLS8JZa$Y={f4x&VrXok5CSZ_wW_2+9h}?*P zY%87b4&rWYG}`VRcOe<0Pt59}aqojy+Wcm~KG=7}k3nN)=rd%cn{ZII-m;Xv25qPN z9e^;EJ^UKk^ST4vr-4)zMdmTF%7wI&eD}{ie-R+Zh(dd@9Wth?&>)G zh%RFFW1@-~u~*}5FpFl;1q&6=?8CIkO{V3_CBADW;jD-sh-JLPq^d6}3$8?n{;==w z@2)H0|HKcBSQZ1cZJtm;(r3#P(p;!OGqs=Cx!)dxh@I&k-c1g5=5g%x7miD9cj$Y3QdT;)R7Df2g=j@#h$1xU7lUlwWSq z6H9ME5oRUREEt5pr`|CC(Eh;lnpf%BM>btI>RnP8ghxHVYK0i;YbbjwLhDf^JH91*Gl?IdBQh_H`qFR*02KX zpi*igcI`^ycEsUV-5kTMp(IhbPPk)VS^InPRpO>ZwxdoHX&FO;7?qM0DGOu})M)Ll zN3(gj_S_kOpOX2)w0V~bYk)4npfpboD@mzi?U##nc8+dl|v`s)Do@m^}XmZrS3 z8SOGiH`}!Xbzz)E68QCU&WMWu)79W+<&L*x5_7fzS%BXZQ46>lqEIp?AXE~Ph*Un*t!YQ36%dwycHH0OI< z436u9euT%tJN1x#7j^0-*V_ZQ2;wW3~;?!feZu_aTq=~TF^GWIYFTS_PTh&Y;+ikEi@?xpo~jDuQS zqUH|TZf0TTv(PLmk56Q9wruK_*mYdEQR(S2TW^k|58T>KEq>%Q^+WeCGJPXK8Tn~i zIrzB}@%L#;C@J@7%i&zL4M(*ij*Xv4XD#rWO5NIKXO7~yuh5+^s}S>?@$(HM=EGb4 z+0VE0(zE}Z#5vYkuuhaS`<$URt*`&+a6T2LDz`^%R9f1PmE^V?fx`XJ=a|@no67>L zDG%)b-pBu1x4b1(zWPJeGbFpj2C|H_KU^!t3(Lqs~N2Y)BU0{%EF~*gJeY5z3D}`aV@3?nw;YzM1F=r%45wT zmLnTr4uGy*yG)-tp@tZi{4N;^A0HG0%)_;%^M*D;s}1ZVCB_5@-{7uj*Z^SwJ|&RD zw!v@2GepU)OM?=Eg_>lt&Dp}^tj{`P#m5RcM;ufJ&KBeXLyRNc<#Qd}Do5Akdz8wa zI+Me6CC5GotKUd%0-ho7!^(mo=JtXYkM4fJcz_ZsHhMKOE#dB;ZgMO{SZ6e5*+^HvBw=2b;>ovjn#seJo8qEoU| zcbfNY*yMpicT1e-V09w3eV^CF+ls99YJ+{z=DYj9E_wA;?DZ9oa*kF`yuw^`=NoyZ zJzQ~k^N*KJ+SmVe23l5`MxfvhtC_2J-p4+l*bu$T7YdI=5`{6@n{nJrMEv|QQqWx| zN%@BUX`A^k{(G7^2u6m$Mr>QEi*eD4;6_SF`>NMreM%*EcW$)<^WG#}eVT?hxf7h8 zN`-ZT`jL=m?91vht;A_-(yBVIP=*^R3MN7h#iwdzr-kb5Y4Go19@|*FkEV)8APpFC zR<7QkM2sUC%kv0msQ1f@nKd7$slv|FQaFt~+mKvU`Rn%;$nX(uTfcXF?~T<1SKQyR z9Y*)X>#;qz!4`KHKF!Ef<9DgWSodiJ9Hn_H=FQxm*Q5$;Med7Ei}Y9|e(O_#dek0{ zBxv2PI7r3>`|Kqc+@}8QEhzhNb3T6M6{CFRb<+~0h88;4VMAmH)WO9w?oP5xbqF+Vcq8 z*_+G#J5HN5FTTMazCwTOjJ}|@<>rsM?J}CP7d-x~@%%!!mXZBq|BJN0bKXhqNU3N; zzjX`W&l=f0#aXg$<8veUyw@RS(IqJONMHU!D)s&R1~bhR&`B_g*kX$+GHo}D&i8pP z&VqWBBpR!5bRF^JZgAiHuM|F!m#V2*-sXAkYU3FJgK%l>Vp`uaW2L$VH~r&3!C)>Q zr+#&P$@6Wcm7BbI%S|KNJn0We{e9luSnoD6#npw4#S!R+l=oiKX4htTyI3(rMQk5$ z)C%I-rKb@dq6|h)6J;Q0^TMa}!>w@d^b_j& zo5dwX_3MVHu|V$TQ`=3e_vB!trJm(=mntpGzsj?5&*#&kA|}_=(7f1Nke-myGSB)m z=WX6kNEQl&0xJ;?4uxb0R%+ zcA9m+93n4Y@8&sxp}uBtb<%-Ye35P&eLIx>w9@_h06I>`j#nY8(EKwcm)$joRl;3T z_y@lhg?G%=&c2=Jc=cVU3pqMjny5J9`B?=CNi@+t(I2*&y9&*^2O}6_n%~ z9nLlGO;g$*H#L0+qMoQHV;^b8$>$0PSq!vp#pP0eMsfe*yjFPj zL1eA)htH&QAe%17Z2mYN>m$WNGa0~#kwL%a`g?XpkGsS@Tn9GvOFu0Wf80LdIC8B7 zP13C2Hx~l?;i~`|;TI~9oCM3L>GBiP#B??bpwEs_Oh9*z$sTpjv6t@RV+*aXY&v!I z99l2x5<_2ehPpX@=IuEN>KRSxr&O+!f9~7E;^?)gAd`s7)05aCsu}V0c!Q$v?WgnG zi8k|f+C@c6n7Tuy@)ZAyo3?Ooya6s_HwPpO8e`28VCE6r;t(($o1Q4opl=$31VS zUn8?eunzVVNS5@%x>~9Zb9Ddh1f#8<)w^%+G>sEeK3qu_UCh#QVg2oymHC(EuCE@n z^UNM;`;e(TTY9gyuGu^$Jx$~F!7;}p-LZ4CNLCVK(I;ej*kT@G8}lVKMgpq0`_dMw zLFvFB^-|Rj_MP{fH|csc?Wjm@>0mB^mFz0#4RU1r6wl-h%wiqh;Qoz6=m zw4gd;c<^xBTK}p-9g9!tR=FgT&tN7?hd^I1YC*7yo~qH{DnsjO_e;vbI+wof+ZAGl zDt754D>Y|^H|M9(kzz6e#WXFi^{sewQxD!IYv}tH7>vYWkf9r02CrSH>>HofhkP>) zv#&lDAQZ^h?7DqJam43&pv%#=U1FY+Ye$^OP#A)W#%Fcp9tjcCGST|`78Gq!$?Xub zl2fK~WZ_<4JqMS|!pVC(Vq*pcfA_9xbjjNLf>&24-_l&x$s-?S5-@Kq1#M5v$5?)U zfn%h=QypTJDh4?wJx``r-}=wn3mgg9Jl?oZwMVlVJAzV4&GFY0bW-A0Je`!&x)M-- z9}@jD;l0OsuA&4zep|(Sg~6#>iJ#~lX_Yxfg9mtNyD`$%i(tQ>-P-;K9{%n1S}J0P z+2Jytm5sN`YITXrE~1uhYcx_H>t()H`8GIK7J0bMG+K2F={rS5JHO)ciKC<8*wm{s zK`UQx8}tl}sHjx6hpNU~*YR~HaxfDOe`Z+i4(JbicYAXs&=T^kfNv?J+j2D?WN@Pw zFfsS?-^NyK!pr3hd8_as9A51YFKL|VEMt<9xKAy3wObcJd+6WOCK>jFrZcR7ZidC} z-%tI^7>i=R&o#09JzP$BnsUh zJULt9a2eczjDA}d`N9hM;UFWiES%&4@B!)r%OLfG<0tYG!`}3{^)vv@y5MCO(~Uj{ z!7F@n(7jNi`GM69V*n_=IT-?YGpGBM(T)}p9S?Hkqq3tV=_Ob`%Bl!h7D`_9CP6$4 z1yTc(WC#^qR6!VuQS;m#i29e3OITR9-~>I2BAIX+L`)L|YjzZ2NOTuL8~yek%hro~ z>QRm>P|e-CpX7({Cphb@pjD>e`OBF&Syt*zAE_q00?$fj76YHGmmm2SK`khv&pgp* z3o;B!&M!8}qP}oc;*@Xm^aE6Tz($2fjT^uc^qSiG7av639YC%INq#^!S;Fhfjv*@6 z$B^gCo~iE#rQ4DRf@ZG4X*;z-atTPH8DCEx^CXN!JN(7H0 zi5)!w+e_w#J()e;JfqxHrD{%LC}^pTM4K~H3!q2Ee2P7y4s<>m=m_{Up#wM3fG)Da zpX;R4p{i_`6++;Kwaww!UPFNN!J>-fTs$OJyOx{1?w+q}nszuY3}Hfi7NvCQpCq4Q zUDeO}>Qh^@d+I1BbvSsmSs~w;)jhd=sGfo}eZ;*#)1a^s=WtyI%H$T__d!k8M6Al& zwImQ{M`@b)J?~Wa$44}tv?i6&F2GY|4CeblG=zQsQZEKqtJBw1=wGDV+2`8fkQ)^)9kV_+Vc zzgr8>Og|@IkE%!()cdY{ty|5>Xj8Pg65Nl@dMoS@igO?4hT~@&{{t>?rz}kntE^XS zns~_Y=^|BDz!T_c6X(+7aq>XR=;uA`W@*Db?kDmzW|RoJ1& z06fr|C$2Fu@x;N3SOjYA9cSLh#`OVHP@giDv;C=Kq$fk<>r+Yg7F!cOhmWI0q3{nu zeQg`>#ZvKAU6iG&?Ju8F1m|fV{uzn0#15|%D;AGmWea0mIaIyKR}|qnm;9c|S&4%o zslVfCVzdeFg1^htIN(s_6ON+08;>Gs1;;W%w$#!+BtL1?W-BW!y%UIBfA1k+<(+8A!(&Vvj}byRR=oB>>Hw~BhJx!w!9 z2ltevoR$;Qa(4W#UO^;Bm~X{RqK=#z5WeG!{r>XzbTpsz)E4TiOINxDMi;#ova>xW zeiM2%%h@4hT5Z7@IN#2vp2Wd`o5oFM)c+NbHFAX>2)MZP#~3Zq2##2PAGj@+2L3}T zd?LJ2qR6={n&}?dyFNdyzTMc~PjGNiy5aT;H%Hdoh=SnQ{Q!Z;wy?4Cl~&#n&$|Ek z3I2OklksBtUn+v8Tj-5`!Q4XKnpo3XY9pqb)gkou$zUd!*XF$Y2g4!h8)^h)<_;6_ zR3P9ZLW7H$i&2gW?N)h<}1) z&>T=r25{$c5dn9Ba7{Z{5I|IEi;?=960GW!Q>7ra4^~<`MuT#HgW_un|AN;88S*Ja zJRwkwSVCw?roVi=fDv?5H^oD-2m|EG6M(kcDXoqmH4y7I6BmhJk>D4(^DRyY8n2s& zY>eB&aTd>Sf$ErWnJ0vMfvH2=Kt^GUEI5)JVZhw147vfmVxKLE1zZ9m@q&C+e;#!^ zs{-T(1GJ@U_LyjpEU$g?T)tBU4e>I;1|91j;vLJl0DL906&n6C_5><4kvE;XDc) zK)){QDGtZAO@U+zVVUQhF?)jyZSc_Q`}^EGGXj|v-b<`;`)90{w*NMvuYBmGR=!!4;K`s0bfz0Kjia0htZYBZ>0jo!0+KP1zVkL+L?Vq-cT=a z2DXR)6oSepQ&xUT+foqsG$!P({V9^gEzKZXGjMN!bj{I$w|l^5u-;KR;KNiER72c7 zG7DtS+Wl^g@YfdSm;M((!7fYvu+9$QlTMyUkGXk9LF-fWXO`g3e6yY=jdnY# zCmM9{|HZle*FA4XegwbUUhJmzP3&imrh{YIo-Crz7q%}pXxh$rInOn%{uTW7;?-T} zy&tx%m+W4`j?8P~%dXBAw4^XHKaK@Dxa+h;^gfDkS=2t7R6z`oy(-TY39GF@6KE3k z12+N92zQt~j3Kxy5MY~?2UA*RJD@nP=oonQ7!OzjY_5;$#S?iv=?&aVmOI3%!IUJL zj1iCU3=PS_uJ#0F35>}k=u>8DlnSJHAC}dPL*dPM_fw^X6S=Fn*n)EtVKJJ@!L7D< zg^qZGJfFtIN1xcjESDeD%xoso;nSvNS8V~g;;?JXBdmh*@ejL&0nAUbdv?-=T!?)_ zO=JPKtPkdh8r@19%>rRBX3b|6bGoR=r8VAvnHplI{!PF}gk`~%zEcttE#hG!(8u~R z%k`UEjckA9%>9K|IhN7Be6n^w5||(Obey^$)I6>gv>>n9XNKV_gl`h>aDyzzBSW@x z^mv@s3H*p)X-nM<5XMY*x*lh$^WKj=k1|2&TL9FjK3-M8Fam0Ewvn_L{}Y9>a2^D0 zZ)>E#e-yowSSrD3VPPQ9f~!s}2aN`DFgT!d9ae<`fn5XYx&}j&0^!c@FBhF7rGcox zyc?P>(rBd@ne;C}13?-|6>dY_@0!vWJQDz0m6pD)o?A$%>E-~G$fbi!#cF?z#=X=AyFH zfE?Y}Y3w-(ALqY$d(yjC$>^~Gu6B>vbsZ>vSD$?GrNxJ@5A~!rzHv$ba>H%VkkX$Iq#1PC>n0 zLI(w@(cf@)WO-GSW|kqMAJGC1L&D0++303Yk}7^tZBQKwp0C-nhVsHSY5({ho-0NH zewndL-l@VwHNckTbC{p*jB;B1s~mr5>OV2p}XWQl?g7r|cxD~P%W*L$Z&aTpz$ z{#E?*0ei!45D0=#Tf&vT9yCkDnPX6?)ZUD#G+};O(~rnwRM4!D^irnFsL?yhK?-}5 zT~m5r8jLYe8hs z0#9XPkRN30B#j?v+*Zv-zjr5kKM{z#jRg-t&$jzVEI z(FTt&+7=0p(|Ap1{q1D-v9FEhgRc6?~eh=dPx3{;5tgex)Q=1K=%L343FSgU1W$W2ta@G==fC!d*%4q^=(35|-8 z2zF$zVF@B#!*6AIyJQWS#;ZD;>{k{0BEt-TLG&E($U$5YpJ8ft+@nt|g;;_6I3c?x zVk6bRVtnQU={VkeJxK)tF~~BM(is2dcWcd00QIntsiU;BB%a>7$emgjACHY!NvW9ED9bEc(6@P2 zB4R6AubYT(IF9|Bot@%_N;yk1cewyl$H-9-O~B9MK7;B#DY4zu*-w&;7x>+Ej7J`y z*9i4ZYgLVT3NAfxH@mD70AE-Thpd*(@|D3}<-6uVY;pKm$epJCPm)RQ!_ahIMi;ej zkDM2eH1$^>uL!};+x%1*Y!Yk!ovxi7-T?DYoblYe(M^3Y5b;WCtLU>gOcRj zsE>s#@fXJeqJ2VNw;{`F%@=6o70O)7R3pDekMZ|`{=IZ`bF9yv5rw{U<5{Ba)x6bn zFZAzcuVyRYa6|)*{RE-0|G{1z*y(eld+$17y0o+E&laR&n&C;!rdSSp_3QO>?`gLs z4&LMn|`gZyv{hdrX%)jO!PSug7(BA&@_} zPPhWsuZ_5BEU`jK#*Udy`-gpe?VDa7-$mc6zo-v-H@J>H_MY+@7OI78=@uJPt#H%5(t{IOW8nM;(fsFKE9PNR)_2~RX5jLZ zid4LXRp#oT{@}OfuFi}7T>F06UXPhQD#Q+T&%ICi93BEs;whi zo_=e zt2;(l@N_yVb9$abs?N0Mj$my1iJ6c|Qe{B?1!FBu3r;|rFH+TCn9SA@>{61;PuKE+ zn75egEjY$^5M)vN=3_lK;luW$EO<;>pP=)5!)SKovuK^;AJ?~3lT8kES&FKR_Ab|( znCKNI222G(Lz(ph+7Om(Z0`f9AKgV1;DVD4->I7lmD3)ZUi+$(dsRk%tmk_LDoC!! zL9<;}y{0PyQ^%b3^U?t~(WSa~GyC3f`-2572H7vKRnQA0jd>>i;y0hy9QC;z`QC9;7X$DP_2csiGO@c$SF^Q} zp6dlp2gzU8VQm39vCl0}sLXa+|BacKEv6B7XgW=+c0_w@FFyhETQ|3k2AGLDPA%?K zOaFgyx4)+mr<&xKlO@U~VFv}_!e=T6kSw|9(DS^R(E5)}Bcvt>r4~qpZGbYK#*U%y zz@-orh*8X&Dh5=*UXL_W>+OAF{KAY!v63hZlz|jJ20;p}^C2&nIS%L_4#0_2EXRRc z>@h+~Jpdt|XTZ5OP!33urzE9s1)u9lJs6?PC3i_^1sViaI|7#fPK*GIU_8{c_sAm1 z>`I3e=aM+k`h*YC*qG$})yGP0xZ*`ruN%-*tjmtxnM zK-!L9f{BS#;s*c}?JTG!z$G%yw9I%w8s8v+=U0g@+k+9MvgS$|i*P|r8%GM4uNnF^+~nSJeRI?HTw6czP5?8Mrf5M#*Q@f`07&RIaIqm<_jo^Hdy$F?mx zV4rW80BZ{%wBF?m%nnQwcHP}druZYM=?r*w`rN>-x{LU1#66k2KOz$A$gdb?ktAqM8>X(lKhB*e zsG{=RxBV;UgAr2VJ&P+zFDMaKUU+#O%%K&#B6x=a+X5ezVy@%KJ5SMg_NfGIACHE3 z-2wm2x6!~K6mkki3tvt;;J#qycCpGs_-($GSbh!X;n%-}?w`)KVfx=y&Cq?};Z1UL z_0ikPOFWzwtpib_YnZWTqMq*92#3gVv-vuvhbqwxC2bh=VIo!7cU_wwt7haahq$Mp zD^X_T*t(E23)v5UR0HDK4?1>(@=yM4i(GYE&We8i`%za7*rEHwCj05tp^DfoRNil+ zeO)jy34Mr%aq|RFEA-VNy%V$P1e=fsD+1u$Mj6>1E?}^Y@`plE?-@}8wJZMNlf+D# zKZF=vWr21^2eXxtwca7*%prcZ;u?>m+KTz})ses7mU;CD$YNB{!s-m1agV!vFH)Db zc$2E>f0t77G14){b)JY9({noO-cym}V0hPsv9~)+HOkd#kGEXMJLCfAD^h+X`cuBa zZ%xS?@R74m9*H_3KII0h=f(W!DpF1Z&5^H!ReCCMM=Sb}ylbe>=BYAyti`4)^O{b_ zOw!OS#+<_mdI-r2Au3I*I8H5@-_N$L-Zyz&HC;cQ5ck z+J0{Y*0uZMQ0|;r0j@ffZc6wyian)rt+$&i{A>6{a))!5<1Wv8{(EBzb0aS;(0%QW z%24~Y^lm|eX@Ntwr`8*TKJv|#=}Cc<`_eM!8m18SWqaG(b()mTf3yH4dsicQvtya4 zZ>C6JzKu_)wSv^34fDD5eHL5K<3DG?_=4w`UeVx^o#Ia~zN$J%n6^{uaFN||f%5Yd zN7!UX{t{M!Ly2D(yuIt+%PEe=6dN8! zRmICE={rdwW%EL)lpl9EqFw}HTdGA>eDNKXB3W@vRB zt>JaGi;I5lCR-)iV|5Y>T)iIk4mY)Fxg7|=Xa5fVPpJ5RuZ|}WsyZCPCAKOx_Nb-Y ztFJzPkLemLlz=5+SF4;E>E$!&3#X{^*5lSV;cw(?KHR?7ufovnicp^qQ-Q$MWHI!p zUTyki03HZGVhN%SsP?(Jg9jLdICy~O0Vxo*<-8@}JDAr9kS^jA!U!Od^<9GlqU1wV z8%L_Y2;u=f6~#Aq@6~n)uHc0Qj7I|uXmovKWVR4QJQq=77 z15yALfaqu~N431%8i6KAa2xQX!ed!(nekOtOF1yAn*u$&Ne?lqA7kgaVxdcq1s4N6qU@=gOc&4l$D#qUrbj<~IxfeSU)c1CMqzBug2` z!?S)_lFG%tY4HZ#rNPivm;5NIy+Gta+41saS76=%wDmkD`4ag8tYJkbxc|)Nu()nO zLFHgf(tEeHw)RJ0l(Kd$O=rWb%8_2pKVl#cW6)9;C`I{n$IC0IJrz|T2{ZvM5?`1>y}KB0ABO z^N6<9czngL^3aNQ)#=1+W3pQDq2ga(@BADB%wta>$`429Z1Nfk_Ikmka)wt{<@zVT zYxp{-eR=A+Bcl!0D}9W>1z0nv5S7~TEBf44qj|A_o2Hi*3-BbL&TRz^w$BWo*Pli| z^U-Vq&GvEydB_UtxKZlB1sOV+j}Gp|emFS5^;8$!jhVR}($Q zo89ilLiK0dx$~$mIV6<*QcXJ7iS1qCKZ2Itixh=kr|Q^17!O%JX17*@YsWZSa6kB_ zcPlWxV#ePjIblqtiNwMr-ae2|2rg&ax5_bVtgL6SmSLN0ds9gY&)YHlx?}G^7dmR@ zshpO5Ko<;L`Y^#|ju9O(^;tUL0Hx zF0^ZzSkr!DjXeTN1v0WXurXJf3Ov! zxIV}t;dK!E`oJjsB=i^X2O=ji|xjr5T9d0J~-IKv6w_og0$A+MuY{qsE z#omD+D@Pq3^YzZ#!FIRyv!a3ikfpPm!_Su)A82$qMLVOq3{!bEz9-!Oy_L5uVEP~W zVS)w4W1m@adyuziviJFRqQtg!ThI0$o%(6H%iM?mn}Qgc(FWaMO2ExDKqOD z&ll!VHuE^*_pKlB`L4HAEfI3~TxtTLW%miqa~}vGerW4HV_BnA6ZOD5f%%>^c6&zM z%YuDCIAuI01eAy5o^;?~E<3gP65u&Z8e$Y~;NQcL%AUM$QFwQ`>MUtN8sG$I)P7EK z`r1Yz_^Kc*0!}Od^IOZuKN2-g24Tr{)#o3X<1K=~2}xBE9WX0064;Y{P-;Xa%xvl- ztRFF}D%h3y(&5MdA?rN@;e6LF-Wj7MI?5aOM50EG(URyyFQfNP5M@XN zLG<1udMD9q7&YqX49@Jc-~Zn4d(Nk4zRri|ey;mkYyH+1AOtV2L>zzUW{K*}{HrrU z#0(y*N*HjCi_X!mc5eyrmm-J1sDh2blhl@ouoe{1p8aKx9?fdFP)`Wer}4Rn=q9=u z`@mcqNt_l<2{1XpF$H#f5LU$F?zJf+oBy5N%4;4Q)wY!}a&hm@Tnj1|l)TC0(Qk7K zL-N|6nq7Q@#h!$)%=$T|wSomMSmMa$ZSaJfEaY&Xz(R|ilfvTq6ia+*KtlB45BfT`nBib;WVztDiY;A33DN zA@9_d6C+QovCxhK$MD%mG zeaVsShKM5*D6@CfW7IXCd>qMEdR9a*=J&=n$m1`#mChG*!!8t0(*)!`IufLIv+_u2K3+S{`iq6y&NcK zN`5yDwwemlV(nq7TrOK*a2??ZINVns$hC4EEY_7i(@rwGST^GcxcfOsfDvf_JH#*$ zkyAHTsfhd3p8Ov_MY}VJ3|M&gL?}hJXa9`h;IIB$r=j%9^|7C%LTM)cR+5-p`qW39 z9~Ew)5fq?FI4G^Ba%ojk?jV$SN|%5#IC3_T&#su9h&?ExcbzHDFeRC7vKfVV-}E>~y6#o9!H{1UxAXUO_s*gSkbMI=%-i(Z!e*2;h=ZpnX*>+CUk?5{ z|AAL{UH1nxb7N>r_#yAB$eUQBrA^636-uMN9nnnI7_Cws{K+!9i7>KET~siH7S3M5 zIzOJbpqW~ub~Q)WKA4AUUp1||)R5V=5pEdOO|@V+=#xx-@o{a!krQ~@R*q zJR@@an@j}6PL0BB-m11s6iCJRSo};99Suona1nHhyeiQd-GvFeH@yV&9>A>_l z+PBkhl%95ia?O0sDw4A=&j+ zd#T%I40>=-_I%;h$MV-kXE~Jtm(kI31|B+K4ZuSn5@7=1sTk8VXBEUz{kR7^`356% zh|13LN>O5N1R|84xB_`8G69htPoWQU9~#`-bp}m0hU?(q60q+uM1s=qZz);gu)nStlY>%1@1N9HEpt=RD#OKwa0KbIR z{j)t)zZ@}5xYEm-M}M+qRbXF*MbJ!bcuFG!w!R3H-80i^iY`PWV%z({#>^p!6gN22 z6M0h@6Q@k<|A!s)LVw6keuCD%W9&}7Vp2^CprfwTKg*J zT|?fPMqV@|0M}0?-i|2$%g{lVOCWVjb)1Bm%QYhsSB>xaX!)pagUqdW>c*pX6N5Vg zT1nj9EmJXgLVFQ<=1TkICC%sjQGS^tVY%sncT9~V7-5^O-v)i@DAhK*+pD<8HkbUf zr-ZQX<#WG3P$sCOR91Lj33{?4J zVjdQP4{UVxj~u=ih_wlEJs>O22NOuP&0|;V=xi3j2#@aH5Rd+6T_ug*6d>$+56=4) z|7wT9*?XmI)38P8Bwf71zIqeXqfspD%BvdAh07G5x>C;F^Fe^YVujF2P-v7CUt>nY}A#2A;i*WSKy5b3sILxj%lU9nI)FR4L=kDsktLqrj zb-UB8?&ph@;k@0<7$9a-4rl+Las9xQ8n(fz}G|o%@ua(HLKb{q2fP zSt*zOvD$_!YGjRQJ1lmc4-*kLqjvk^LyKUbWX@lyWlO5{Un$gfm#|Yu81Pgi&EJ{F=F3MKQr-l=&Y0`8AWj?Fa&z&pAvUZBH>|6|Iy%+X84UW_Dcq%&6ri9@Uy_JId=7N?5Mw_@7rX!h7IwW=qQ@ZalfXAjk6O7TTzV-ONMuJU@EP3M&9#&9QNYV1>yo0m& z0W)6(KsC|rPSb$IJ#qY9AJW4;oFp{M9Ygi?v#GxUmbhln)jW{DwZ=)Wf6wcC>QtRv zan``O3C@a)tR>ENW#(Lmoj78e3I}hQTRE@ziZ`c`A1V`+^r7!y)DPo{ioTN_ScVEo z;cu`%#);gX|BRPS*B&Z3P;a23nP$_cd|;<2o+do|b*PTe=JcYIWIXL?D>~qa;7l*C z!y>W~C#4WBy)hd^+V@sIM9A>4?~B@&><3HvVdZke?}Iq6^kks9#i-tWPvYZ9V!)Y< zgUYqtn;(kGwHz&MZg10;X1mWryGxt?`n(u=Whs1h7@()gN`}r^6X)kYf}Pc0e3qg& zQyE+P;xqs8EY0O5D-N4d`4N;j2xv&;&@*Kq}kSr2fmL>d({q!_X2^7(p zamKkX3)ApycjF{=w{x^jg_h~c=l*qED@7pa+;6=$Kek^)w?C5ev2>WNuqNORTRfjc zpP%~{9FReI)<;C!byd#XsNfykdujjN|Nr&0VGmn$y3(~eue?duHIjcWy;t)}66F=1 zI#||FB_n0061%=`NdI$k=Vx#bYohK4`!;z?g)Vs~g(UJqc9%Y3<3=78jLnze3wlE^ z-&B-jb#Y3uam2$_nB>dYN?H|P-X3GeSN&lTvt22xFC}9=pEL3-(Uh=h_RDt<3!Znc zpc}nU);8L{T%T~a>x#W*+@|E7XJxIY(BHsk=n_UpB-*be!hXL~2P<&pQ&#dWM(WnG zc_dKHp+YQI4cw5kkLC5zTnB@heOBPO9YqTt@5(=6Z8G5;O4Ammjg1#rV?E6|im2zg z!zg&T5|185-RzdB(HFnfle4?A=>24Q4)QtX!c0)7q*x$YMseQlXB?k+h`?5_EPSd^ zM*emf*0pq-7C_$|^(seMgFOv{>GBUnCuER^q(3)PRdta>&l2`5Cq3Y1`BGj=8IvA9 z^ONf_UQ+_7Li(m(Y&KC-7x`(CK{9WqeFMG7B~Ul6=@60V(O0>i=qqv0c1b%UE(T3> z`&`ezPsQ&2?HCa0v`|!eoXj!b@3X_jru)$c&X4xK!wmJ#E*H=?`kVGVvQbhk&digN z35x6I;r=SrWJBb??V_bDPMd`=)zMY#M8PT|OUHI^PXx(Q=Qo5)P^j{g=dd0y%jCy4 ziuFEaEws?VKqfj}({)x3(nV(WR17VbcH56<7Nnf09LLQP!gp!r|7?0!MGI~1z1|Kv zj>C*{iO~UW4nckwkQIrZP6s=bPgp!dFypB4z`KQwMByzyzOdw9zAGg;p+^1lmrt_r zyhaP8+{H8xmR(N2u200@SzT@-1^BO%G zV-auKlbYs!{&Q>k_fxtJpQhaKwdUpOUjpJ2BB47aNjvn9`Q4TBhSeyS^F@!euYCGx zilmTyW8Xj5#ih?m2eJzQez>J-1@uh0JVDR(_;JSAbWs$s5f)GbC|v~$TaOKNKqiNc z?-oGXS}sdll~{)ugThx#@qS6+V3i^#0%P0?HpJ}FH+!rb3&#+MiW5J9Jbv(Pz{lMK zj0BSD-p83sB6<4G7-|bs`Td1UqK6&7f)WwY^;sy*r(T94_sbeX>jC|Z?N4Vy0xjZP z`bX*ngpv>tB@P#`5TFc|S*4fV9A`Mex5LYsn{4wo;px>NSrMTX>jSjpOT)hqNjFlF z3~q*4IW-LsDLBAT+NF5UEfU$;IF7F1XE7(>cK&| zVk%1PZ8$H=Ewd@j#VBL(i1)DjgoP8JlZr9uq*08P3lUmBc%kGdIKaNZVKh*Io zPdd%!vf}8BU}^ULo5-cW<+(f?7k#F(UBY*Z!ihAbCqVvDFogX40#9~K0I3u)q#P>c zv5A$&x}hQbw~>~%%-g>dnAwraiKP>1(tDSwkQwM5&5NPuu2~Ba?m1%1RZ&ppY88@7xKW;H5?-k`NQNDMP~ppC<*_7o4r8CDZx%~ z6DiT1%!}gs!4aPY)1$nL5|-uOpG02dj8GJ9^=l&uCB>^Ev}4muVodhGlAtHjFhR(CER7YyR~c~^DvJ-Qxcc7h50=+_ zbCmr4+~8zWv`)kyE@JK6PQrt-QRm?CDq`%%=aOX5Py}HHj_t@CAN%|3O8`EkmTwF3 zaWaHo6MiJLi&NHln^oSYy#6?6a#|}VDl1m&r~XAPkvN?3%Hma(wep9i$BtXZBMKu; z6b_y-ndOzq&t-Z{_6jUM^6msH!q8fomMνcg#xnCP-EONAq4kxMeIOfIvBnpp7X ziHB8RhgH#fw|(bZinp1#QjJVT$JCIdOPL^AU)!o3d)YvivF{5akv_ZZur-NY(qQvn z-b3qG6_K$@=y#G}7E$Xfg`I(SVR5*r4Da=shscJGOXF0y(QnZg5VT2G`;gEr@c7%z zP(xYgQq$qH2mn$KDxbVta6Zujl&h~Ob@5*($>7QZL_Ti)Y z;kM?XzkXFMwxjStl(vcFip`ih7OiOW|7#Pwvt225Y_G<+{TKzMFhlBm4?NoS@O9M& z>b(5Us*wU7OI|jHRld5CTiA=0^zH5_pLql>Fdx?dXBa(mf8{AF(^xu8kJSVc4^Btg8e0yj4V`U&Rdc=W%B*V*kef~rU3(U1e; zi#>p_K);Sr+^0Gw zN~ojIo^TF404`%vA~9k|o7d|D%*Qs9l(GT;1ZO<0Rdk=o{-`WIFm^l}M zl03RpLGFb3_w$u$PejR_d`ERUU>EmdlyQ15$b^Z5t3xDVUfHq7N*hFe|KzLw-pn(5 z{Wkx1)J1pI>Boz+Ko04iK#ZN~{KE$w&ig_5ofABOhqGZraa~Tppul{n4M1x4v}94n ze|*!olRdWWCJuwEG^*xElgaO!Fh~bzS?bSThDaM zDbc50=xKb1^w@3V6+y;GBl#Drf|)>XI0EB3cD8iE)mDG$&NZzFJYPrn506lIQ#yW z@$CzAU7Rk1ktaf<%>m9aiwCGxGsE%y{mVR1`{1+K(@HC>u8I~mZ3CaL`#j@Kv8njN z`_m=!QPlSz|9AmPdxi*RGqeUKTfk|n*KYlqAF8#r`d-Gn^M7R&=3E-*LcjT*Aj75V zjJ|g)FIPLTFp?;=bZv=h$oY1hC*4C4sljq}Mt0k=th@A?wvC63GuT(%2=YrpdU}mp z9}oYF-|Y?6WQBdfLk6!{b;w}2Zj`ztTJ;5lM=0t2q50J5^RF5y|Mp@Ul30xp-wscgUIW6?B-?m{>pQ|o&k z#2Z6(l!KE45vJR3(#@4BEFQz1FarvEuU{d^0Ln#durbq0ipm{|l$W z_1o`I@<6(z$q$H}xF>*OU?woiOC~S-6Co8;zk~Y)$&33KMb_djWDY=caWK2+s@mub zksX|#n$La!dT%y%kgnFjb2v3J^OKw45n)$#1t1q!e7=&X=2bUA5y1mICmgX>b%)VH z8A8^pnpfBo!$;p!8}UPB5b!k|adsMDJ`(?C3QMZJ*!EhfsjC5*7rbIE5`NG9!$QtT z$tf{%7x28fmLdm{3-yq~xoGaoK!}n5a)p3uyqOR0HQ=V>^a78ss-qbZ_ZE~96SPbQ z#sGs7R%L&phk*Ukwh!}J0QFOld3|tNTi{v8{6hD^1vT_rbWUub2Errv39I3CP98xU zFoK}r7xgN+`A03=WM-GbW;KgBC}LH8PFSzkqu3y9%+i?V8K9|m|6!eic`@~RSeH$N zH;q1EEF#3R+DE_HH;X?aj)oC1cU9T2B8g@<0wNMO5d-(qZLEN6RVx4uah4XF#9)?B za;b-PjpeHh*9M33Es77G*MqqO|FM!wk26XP#2+J%{?vP;sWGZeg zp7N*17&Q06wbQKCa7&dhr^QT+{fW?z0MpWv0C$td69TNvfSk5`{|`B5r7=Hn9(f^u zdG=Fcl0>7Yi$wbS3o@4`?RYr?rvkUZ&tSQsQZRZUct_zle#>zJ!|Tbdz~{3b1*Btp)BLMljElpaP)0PA0tRm$#(CEM_cna=1J7X!BD2+)Hjt6vzFKJq`Q#>mGZ$UpNY`4AdeDBH7sOsJ>q z_^3R>Ym->cCN>W2j4*~R6%M2r^YL-fR~vtI*qVJ%*4t^`yLExk{p99gjSZHIs2v43)vp zQz_F_@hZ#e@y0{GD72Bt&F!R_P(a*mpcoUXWp<^~x8a>rtD;r7{-{n`U|(Pl5wDS3Rz;HIDTKAJ~FSEDGC4D&p+ z^~x^=B|9@A=r_4HOf`9Q+3)=7I<80Q@w(Z@w*ys?AJ6Sj>~h9yNTEC1#?|Dz7i~w5 z$Ag-D)~Rn4!&6!do(q6sC$Bp7?uZWm5*-fwU3zI0~L1T7m#`_VnM2U=D$kw3YMub|{fKz0k?{k}(N_y~F@g7}kH1ob(7qs-ldF5JoOjOX*8QL8tb4Edr z*GF9X0mb};qED3z4d4O(K9YNIZ_gJ%~ZvDi281BF+~vN z(K!nk2RL;R(3JzXiKa!o#ASuoexaWEn**86h^zUwle9Pj88yM!HMU5vCiy26`Ieci z@FHjc-j0I^pYR@@3rM@x^yMI1d@e5*7ARbZ!2xEwn#`-4XQvWwXf z*J$H#En>uyr%PIOC~&RfsXPSz+PA_0{-;lYn!%aTdt~u^qwCjX)5mUKo$NRg<0ZDj zqfcPpP_lG5%`gCCx66V&la@ne+F9)v74$Ci+j8b5E)ALa_YhQlrZF5o^0wTHI&^Km z0q%O~ypUb}@O&!>k|@x>&B9V^+l^1dxmBenX2jSVWAGa|W@Mb_3dv}0<7;rB_~Bf~ z_fyM7lkwZ9*Y#Ab%wkfHHmWvFu$c@asIwexUf<605`0b7U`@D?pG)9xgeT1qI$dws z$~VYl0cNXYH@KD>6DvTL0r{HcaeKKAPxEb%k@D%~4l|tbzm8?`y>cAwN?aTCSUOo3 zxHbCC{NHsNHiIVqlW*Gt%!cTfNTf;s@br-SS-De@? z6_(zQ&nDbz(><~ePU-V9CgfYyIeH)xd9hf%x>{P0#bAG85AGA{3R_)pfa9)4`>(rN zSP8en7jz<1MI%Jl9V{RWdf1^f(th3EMb3Hk#~L_p^!dHH!QmIE2cprZE!^=5Mp3V@ z&RIRU7^$=ScMO&Zvih3Zuy$*lf-f27tK|S6;BB%@B4VTS()wU3+;odY?v8%htI3$m zI(4$LPMHS&G9LMM%nJ#OQ3c z-3kAc4fuxr=1}r7Zu3&OedD_C!!B2EId!96ufb)dnnPoM*L9qgG73)ANRCauOMI|I zao>~=tO|Ugr*E1%u4B-8>t|wt5GXsN&gTL1zVXbq7`9abdiHzIBo0!y`9iI|K zf?c5lvA9x>(Y7|)cUCpJ*)1kwB5nD8Ap-ur<-^LC{PWp0KFgJ*sIx}nK}^1uc$!3} z;vrEZ$Fj)gi4mWoN9r$B;8n@wM%qX8%mAxp#;Dztc!tT1)+LXBLrKZKJ%z>Fs&mpRF)Uf|(@#gk|e~;c)^7Eh|CyeZfzG z4}jQ?0swOdu`QM{ULmbFif7fhDGpCX=hNGNF4PA9ZwpSacg+V>Ig<6-_1B&drM z2|{gda3lquvX%m?|2pjK(-E>3c0csC@qred^BZ$`#XmpA8e|uClAp(f%N;U__#o}K zYxf298tttv$jGN~5H{P|nT=qX5}nFQVVoENh3)uKjP^B+#m+e2qHstLF=3r~V2iDW z7cRkSwm908)Lvs*l7KgfaTBR zvY&dd9xv-Ss*)qg<`$n9Zp1cPYxqvG$Klbq#><|Dr&zItLl)}^31iA(LBwP}0F(fK z!>HrT*s3~{mR9|HALAO=r{=i!*<_y04r8Azt+J?O6Ax{wgr;Q^cCk{&$s*M3Z}zWdYPqxp_u7XQDrkVE5N z9fHlfFRhu~oGw|At;Iw1m0H78af<@tWer_k#Uzo7ai18EvS*%mGYTg!C}({-`t;>@ zf-4HoLSHhr;iUsR^T?O2E~`Efaoi$f&a*Uvh~U5DapC|s1`o~jDP718uSDd0*W#+r zQE7vKt5T)l=M)1|7E5Zfs8A*bp#TbS)E-xCgt4w24}MyELQV8P^*;9oEj-F_ zJJ-Q~=0QG<5=|F{$lqvUw8kENbS&els~7d>ROhH4r;7f<<--J-j0a0pUioxJBZ)_L z4k&bUmKtLT$tA#V#!22?GD1AT82932xS+=%QY|{T!5XfZlv)gFg0JYxeHL|z3|wcX z*8-0p-35@s~a<-!~FCt`+!bGi`RC_0a(o-9Pw`z{^$nnL)= zN@t*Ha^rlI)I-6sGLCR6FHxJ6FMZeBAnQ)e;;{bgnLNGYhxC%+jAn721c~S5hbTyf z2Lsui2+|J6f-;%}oEo5gV3!Q8Xgc0d4sV@1H+X#(>q7*W&L0UuDX#}n-eAGwQ+pnd)gGF6Ya&# zjhU&ru^G4M=BT#Il*(U7-)qz&D7+Zb#)?*Iv}7(E7;y|Z=w1&nx!!h}7ura9t~Jan zE2}uL=LVw(B{X-AK3T*R{Z~kwm)4xQvyGs0j)tI>q3e>0IMW>}vX~|T!oSPh!l7SlA zMWq2_RE2m68#s*E6|Ox%n3W-c=#X@wo(@oh*N4-O#sZB3YVZz03qbzB0U~dZ9d6j> zh+QsHO-Gfz`1yKbSceHNrG3d1-UD1p9M9tjf)ys{Xf;3%4cy~@G~;|fgCV)fd|BGr-*nKS+WBDcs1GR+;K$A@Yd-P+t2BXYDrbH>lmWXtd~_5 z^33XyF>isLRzErw@+58Y65{&$dknpzg{<)aRi8wcHsQYW9?t~(u$bHCD)!eVTMr+u zY0m#L`)*tRq-P5)$#j%_opYc4Gal=j_<8JN%NyF`4t6!Y)@Ccs8s|=jqqad0Vx}p$ zE4MolrAex|d%!GP@}Sq|o%7`kr#!JyrHihl8v#U3!kzom%e=@@v|uQG#OyOdh_Zlkx1?CGpy?6ytrE_jCzpCA|fn4IhM*u_O{#4 z`&26vlHA-!)b-24ZOwi3?nJQ8W7b%S>!_{e(KF8+PPvy!#;`c9j~@>r{I*s06~wO& zOEGEo-D8bB&wT8PMUJ-KP;d_h>trMop~fRDd`0@5t5=i1h;M&mwrb?!eT$ug#nE8# zWQhwTsXGI8V~n#+>SOo5cXJVM(JvcI{ohSCI{czyM>58+jNd~h)*s1v;WW-t>*ZC6 z_>^wD@ULxv(VkLQe}Y(-nB)Y+;oGB(P)18|)vrY4cB<@2=t_^jo3-ZK;^YB zYLJ%+u^MeeTWQ0sLS)6M^UYx0^)b6b7qg8wE^=$d=^ZNHU84pc?iWz#2a)KcA?VqlUlOY^w?$TnGF6j=1k1Kl2W}hr<4aU)5&` zQ^r@1M`_FBzL?Jj8>i|tiu7~^)F3A}edT7eR2*Eh1AI@ZXg~ffyR>Wk1Jg=jQ@2T# z4-QXpPA>Sky;gr0^@fb!E5GH>)iFNiJGOIVZ$~SDs_x2Zs}lMGWSw5B%$z_qrO zBG*A9Qm-4)9Sf}2tKInI#brc^_p`bDxP&q2VnH18I{V|{NHazeLS@Pie6C`b^e1JMAG%oSqgk3WQ#p-Ii@ zZ1L%o7(oj-Z9r>)NLrLK0tPz(E*tw#%RwI`U^~WVfU|(VOL$c~%|X7L2)Sm9e%uQ@ z)DA;Ou`Iw`j{_}uNNVu6&7Jb3r1`ymx8UpMVqH$q3&<>kd5+DVh4MQb4jd>j2w`Vl zK%}ZE2i0#97GEie(jzkUq*g!*}16uL$qHl+b9}_aR4d&Px##_$P-M$|ZT!FOW z48?*BvG-|m1A_`tNWAiCi!xI*4`B#qlGKHWqUT3%jYQ}$mr^qZaqv220IqbIUwKS& zF2MS2%KT5iZhVO$Gqf&LV5KeE-dae=>UV{~!e@!mhw*rn=!V|3_q9#yY=w9IxVz#I z+B?6#ppM)k!Z*9SR)6jp3VFPo?oL(C^GQV~&|Jz~v;Q8izqFG_*!RCn3^i_hC8MtA z-@B>1GH~Cur?)U>y!s02FbLO}A$(i9|KuQ6S=P0`+wl@{i%w@d-$rjp=DlxT@3)o@ z8QwM35A9e5pwqjnFeAc9-*Ob=bo@K~t4e?xm(=NT$g||Zk`Zqo$-7ZG9dErMKH0xW zhOP?xV*d{o@!- zkeb%DhsG5dSVX;WVV3-AtYZ4!chyraI&+rynYjy~QL5SU)Vhn;>)y?P4+>!kZDRZ4 zvy`ZqRNXVmMP{Mh-P!STEYuwxw|LHqC~FZizoZDQ@)Kh$vF}eS(+mQ;x4XL~K}fHRy`KUX2>01V6_%^VCnFg$52zTXF21Nc@WXzQvJ9>%UUHP;3+6fAb~yMFn|3fqcv^m_Z1*6q zmk|mN?`5r(+@Dy#j=t@G4-wpX%<3(w($>r3@+$v;{@_rU_H%}>_VqMo_?if_chqZ> zhJ-vWv169WXKXqDR}Tp`-W}jE5I)G`!&Lt@zpyTXC7EH*;*Z2`DMB&;D<{^Z)ec!X7s46qgJ< zmsr%H5iornC%67}UD@$=?f#Tv8-=j(X+xbu8YfgR@CmE4tP9+dh3CGiEv+OD;TX9* zjwgu$sELip%IYyTknmvZUI@q?rvZ4>VYb4yN|;OOL`1HX54=CU#yo{b{x%O_o6*5w zxTm1`lI#+@k$AzugUgF08jz-4LOXvI-p@oUf`;2&4T$5O`v$l#adLnKg3=Lz`zR=L ziBcH}uwY*Jz%LrKi|bS%H)>h?ccF;MY$PSS`&? zie~t_p`E=8BxN%aTR(-2?Knp0nnTCw5Sl02|}1$EOi1quENOeF_6CN1)_(-F0Q zW`LzuA;l4u03Y!wGI9o<=(yQs&DL^if6sNzB!um@TM+6ENrJsGNU31$ zf^B{$?tEZDmBBxB2y|y}4kaHgHV2$&25};?1b4JWgB8zJV^8dDTD-c;WBR2#T-L_R zY=*g`g?t~y7u7@rd-fEP-*`*(j96FwD)C5Y0-WqB+F17Om|y*o@ZyiDoL*CrVh6v> z>n3y^u-Ttpd+z<&WDS-hYFafW5FpVzL;`60;(&gn=90-!d-bE~!T~t-e%gu+DreX@ zBI{l~lUM%AJ$`dWd-AE@RA}TMzb+-|8G-$=>UTtf%i3;*3LzdBVc0jT6VPmx8iIQs z%M(xZrWalimTkO|G(dU7r>l`8)7bLoyQ5HH)=gg3a_*(oansVKQg_AK^uGs$f1&EI zC+hW7N60;@0e^xae&p4{U)b0^#GXs*uas&p({n4 zah5Jgw|t$>!Va0JZ7guh+1y~jxw;U;en$~CppAlAJflX~7kulzjoku|Cn%2?@ri=24%#P`qF*uU)SN4X$t_OixObi{aT!Us(EUypqB2^%(l&L86&dl8ch8kh_heG;v#9xrbEt zeZ75&)E{FV^GFQ9*JytfpZ|uTztd0(YsZlZLzfOxy&W8ogu9;Va{s32@mHxQCTwqy z7#dlwt7993vN+G#-X}fUP9Cig4G07FeRGK$Xl@1D6foFx-b*lJ=bm`Qx%xpeYqL+C z^yV0Rj*3d%EeJXeyU@5pON{n?z2ICHZ~t4F*^HaV`ynND!H?Tkt63cHZSbyJnv#b5 zVReA2!PTT(R^CV?MPMR_7hk-f7%9?A{8@tAbGhPozo^Z$fw3~a2`oiU!!&{+2HwL1 zD<>rSiN7F?QhgO0-__Se&wjNph_(C7Aj^-I%Z;y=(33)@uN>MBj;bt|N<}PY5~@Z? zT~hK-9sjP}lwWC`~TskcoaGq}Q?s&ZX;lyiwC|E0?C(n^W*zV}l z*Yy-3Qhv!PHQW8Ivfn-bAUOVqhamS$$*}K(t*wt`GCDX6~HT2UKhgLVOOsf+yoO{ zVZa}(Y6pGKn$0THz70L##`hwY< z5Eu6;4sk8niYuo!eqvWc(u`tHlc>8L51&LAoj?fdJ0Nc00>I}AU@YZ7-T?dxFzZDF z^=#Z>RDf^WjJ8C+XT&c_UoaHCO_VnRxJ3iK_`hN|30vN{oil*hT6^D^kmnb`1GpM?wZM7Te${G z=F&HyGXYgVVBkCrZw$=G-4vuPU|Rt+VJDs7W2tTUSwRI;ZqLEnh;t@hE%3yDtj_HI z>@aH>>tJ4EriHdH2hRBR8N8pYAgQcSELYW~;WFOEC$4+cDU`60P`MTBc*L-BymR`e zzNhh>3C7X1!i)K9vArexjbYJa&ApBBWXm&|YpfS{UF*a$54sGvP}?!0MSCqPAnJw) zn%Lx8s|`6hq?++UP z0Ge}t-V2TS;_1uf)sScmFO7k*Bu>H-#a3Tun4P)}b`fzhw<@u#D6xO^e)&1VPh^6q z^%Ize;Iid<>DVtp305^mG;dhZe;=Rb{_LDWB*ggYd$epO{OA!M5$mI*aZJ9=@*67d z=e*^h$sSDL*5HyML`hQKZi0bkr1TnDz79%&?PSM)q@x@=a%_ zX9E-gkwoi}7H=961M;BGHVO#jepwC6_J%O~x}PVrapNQ#Ly7HW&(Gq&L8h&d9#V1YfavghAx}5Gut80(`}SkuN0nrkF27<&Q47yLD~Pz1>-~Mb zY=57gnE`PE7fta6^kai~>^(g3aGT1u<$f(w^dTsO`(|SUJUUKo77Vle;97O8c*%pK z)y<(2j>*CHY$~n&vP=OtU@Ne$(&A?rd-vV=&->tn6lDoG{Xg;NU@?OO<{L`P??24| zWs?^flg%ts-ya9d{eJgByx;?F`w>_%+=bs|aCC*ZOnO5_^S09jtnW7dYB53VksHyd zupV{7B~gF~Sqy=pR-7#&bUnk%7C5vKAe)$2cJx=?EE37C{35ljYZ^9OI=m zDqI&y7*ph&IXqwCw%gtEx60mor4jJ z8MU2$MSpF!PCaU^9vI7EvpQf(r#m|rx^==DsPu$)CFxabI)V>m64;wZWu5Up%)f3yU2xB8W9~!X$@xjhQn$%Zj zXT>hR|BjTbKoTBylm(5qYT;Mn9N`_}H-J)bxOJ&PXTT;sN8*Xl#bqc2u#oMlKKtVH zcMfep1u!4+6q-+zuscJ{$~LC0D}<Hy@>b6-T*a7vJL>PJuMM@TW zXI^8FLK#2fLY4J~;LrHkS$|gMK`zt$XQzFOZ(F%kqfj14w7q9zt=-VNdR6CRDlD!^ zW7Ryh6|D2HITX)TI{tNKLcS~!fuZhb&_h3;IL*Mb4Z->GOl1M0r)6I}V&P`~J4J~k zM<$7^5W{z0afm)FYBTSyUG48Coa&?sk`p%Hf3G#m*~SC#y|RY;ZD=>T{Y;yQg7g&c zp6OgrWDLDp4t0aeJDnW9)SL@=DR~{gWO#d)L^SxNdNYl>6`P5zy<4ii{of|R+k=Ni zT^0WiU+*2w_W!q!n?-FZp|%*cS8LR+)tXhcYQ)ysqgJgLRm9#~QPrYq)ZRqIrnF*f zY>g2!2!7u8{kcD%`+R@j^Zh3$C+BtYNAft2*Y&(!*C2Gtyww4;y0UcntEkb=$PzqX zGDy0gMV=lb+rKYTo5rX!W|tO=9TpGpRYiP$k+`!LXuKfQsQH7Y3uGDCW$da^_ngut z09Fxd!mp+Gypq@-#w=bs^u)(R-O9m7Z9{YJ$xd<>S4_;`^#WUWH&D{oib{dZfFDcd ziz#KT@;ri+!|G`?prwq}K?PV(jG*n?K$_G9tf?NS;yq^GXT-A_)dGoXytMStsclUq zDHG17nDF;)iqd|y)2O`HXpVTIF)+DJUs z0T(!Jy-|1=JboW!$t-DJ0dsY=PhB0{W-0yfx?D+IrYo5`ZomIHXLY!i-Q0M{CQaL{ zUJ>PSp03NEC#Almp?{&OLHA(_oVt6w*_oYRxW+0K9Xm=JCqGV&{BC@`Bc-rTn@#Et z&-s;f{^DD;>(Lmzb5!Z@T}$Pr4Zw^zZh5$C?Wt4-)-v=C{RD4U8i>(w@wH>xGlzDLv9YMP#?ci{^GSgaiB_PxMPNMK!->Sy~jwPo|# ztbJ(^H;-GN(#u`c*#X)0OR`W)U*}eP@}qj=k+k_fs#aX?I2WX%q+fdN>09M6`xnos zy^{AazIWEC(L?2fnRzpa+>BzMQcC6vJ|G_n7~#BG!xqRfzv;j>6^rTS--@SvxsRIJ zsBcl(bIwaClmBHS^~Nw`$rbPLD&nDrh+j%yA3u_AR@+ekZ6m_t=?B#^Cl1s=H@BPk=T$iCP+;CU=IXN&Rs~z`f9qlvr zt(y;zI@?j2_h4Sn7T8U*1!eS^v%56>U+P$&--k^rpTi!K=62h7-^b?FCXPyUF|I5?nxH=Duj^6KqPW6LYO`wuRFM zS>QQI1dS=&m`5Q%qI5!K+T^5&*Kp~-Nz)~ciQJ6D+ zA5Vs1nVGlqmg@}^qJ2$iaL5m%hgbx+Z*^D2xB1BQN)P@?*OG9@ z>aG8}EHI|Zuh}Jm@>Zk{6bQs>BP#JLI7u{NmMN;urIh5z?wfiyrQhc#yZWV}88bzPr=1Zw_#3pPu zhSe-XQ2OodE^oDUy*kY@wE9YI54?ertbsm-TlEwX)@f7fFn@p1Tj|<<4ckFZk|vnMWd{>SMb*qhg8fpG>t!mNt&HP zT`y=wCVS(L)4}BwtDp#|pwfd1Fo4TcoyCCbzo2f?jTrr@tQ&q;I0X zmyjQ)^xA}3Muk+5)ZTs_ILBZPEl5xo;T;$h7+@zw9x-r9iI42l#i z-TTM?{xwa|spv%V{6Ut! za_sza-|~&3_myvLi?NKS)a6#UNMV)O{L14M{eRO_|Gci`JgjlP+VMs|`hNdl@)CTI zcnHrS++Qa&e<|+z`;c#_vg?$D@V*A*AZ?$wX;G`tX)6H&%Nc>IoQ!u-ktNZzF zB@RQ)-59lzC%pIv_{)&D-Fh6J#speVC?kk#m`G#35Im??y;nmst{UZU9F|RzeQs4&|Qh}R<-?O>rW@S+4^Y1fEi%)}ms_OFSbeDcF2AI0~$A6J#^WWMf*v)JwDFqM5x9SDMBj~I8UrEM zF2xa)!tjGxHNOZ}DFGCGBk|$}7KBBAl58=N~9oD)wW=@)A`ipx;KknR2z z1ip4=xD)l1+#J+bK3dyK82m(t6z>@S>Y^&DSKMp4gyQ2ha`B0D@L6@B_%cncbD81* z`j(-cAXF|=+Q5{Y2`8Q_dcPL34}jA);G&A^p0No>;9jTjC%qih!h`q1Az?<0$V~aS zD&;50k-Z0OZ6~v%CE1lA)kZ`{4VvxiT|Ohpgh@MH&#*K7!HK=u>`ytNTvHn-oBw`6>q}tUhYicsKPSIJ^*sY}xL0jF(qr#b{LL zYW5AY!+ssoW8};O0QgUxE*J)&;r&aB%Wf}`Fog@?Wqtjq`CDCH*!^1fjSjSXe3B4l z!C=T*96HH9P4?Dnft{t>7OYer{=2PMnRUK+hml+93PDd*FzwbkOCz;ncv~ZpH`EU5 z@=7XPqm>FB{EO5zKOr3-Rk=Ru8r474YN>072he1|j&b!U^1p-wD?o2hOGl>-Kx(lj zZO8ghEi@Jou@TD|toB|YkbsNb`=cUG571I6AJK_?4nE|U6?nqjR;$fss5O54 z1h88JEUIR1zv$5uxb`zz!yb-!o#A_sq3kZD$2SgRB}1sD_a6P-KUX3k#jOvbT75{`*kG&1qs35CN;yW2X1rQQzQ!z&&nA|i5k zGU8X}!lQ$@;^5ZeJ$vr&I7AhG-F>dD&GbW7*>IeYFFgB}WiWm*Pp=3W`@yrTI)1X&JjB0J24L$q#CwSM_nlkT}kKo$8H;~b%rpj)iz3^?x3vQ*GCtoU8yi5#w zd|Jfd9%x(2c(2Sqt?+H!O`Z*bD7B;r&yJzU*~P#kc6C1q%?a9XRRAab zNr=9r(MM337Q$t&n3e&Lr0jckKai2_j_$i{hRM5#lz{_N*z;{&V$!SRMVwbr=vH4g zR>4fzF#z66dQiA@Ue?ci^lUyRu5j}#=G8{)K19Bxk?pEQ9&l%i4@gRb!GhVKL**ln zT7SALZ1`F3k z%nP0^`3M$#@;^_}@fMsSn%ybXF-3J;H8K|YvDu)`A7ps6%Z~JcjNTuxdLx2t^xz;3 zoPfJ(_aJDeULa}pa*bf_-pyb0U#M4RO`xBMatm8I@nX}t%R_we&q0T;yx5swH%pI1 zZp*mV)og8Q%djs;L_{0JpU4XWKLfp*ScqZ=W~wcy@imlMFtAN;0ZbiI+U`}z z@Fq>MtKY}+5ibFJS2fsmgBAv^c$l&9!2;bAicaUQYc+840k2z0Wo%9su#^nRwAbSk zEWr^xYfFdCRo<3K*`j~wy&L5=;QO|&L^4?xkZ-doXtT+|{WajFQ-gG41~4tF>LQ(R z5D5FT^amK6N{RRaRs!8mso&7G>r%&jESj#a{i>*0kZ;M*MbXEn(4G zO`G!oNI(+`@l*yws>Z|5yWKx77m~7;60{T55R!+##j}S5E)hx*oZ+1-%cy~@i^&;H z{LSax+I=9~5SwoHkH4-$*rtna-{>~Oy-?)n^>cm@kYw!O!xkwbhKBW$V+NdVZXce0jR^wRYjCli1 z+G;qSZqmjLA0xWL!N`bB~FTzHgD<$gycc;vFS z8c-$|Sh)6_AvZp~@e&2{AtQyJy1qrr6gvF1g-X0_1 zS==C?%L(D*z-<>*pKH>oEjrO3!ELpz0L56yx$BZl^mcyJjhQTlTJiZQ>%*=pvxW+J zkK0_Zj?=$RS(=$+6#}yR2g`z+N;krCzuC{Fa{mKBmOtKM;XLm#SCabJ?@<)$nW)cubZAY1-fu)s2%K4?Bhu0K5ycN%C#%6HLpKJfXT_(!CPj>etZ zO62$GpgirCKt0$vLKgMZsK%p!S#|*0n*CxS9+{>|occ^&L^N$Yjz7T&CoprE)2`Ys zsMP#-5J3%#x=^!EG5<-D)9yqcng7H>#3Zc}0fxmPxC;52kc&!L%}f~LZQ`#Yv5yXQ zKQ$#-mMvGo(VdL1z;6?YKA773L5rE>vo zb6ApfD#|$DOs3Tix6}>gR#= zwIDc9@^@i@nB)sXwNcAFzqde377nq$X76#1&+M#8?6M}__{Y%uwVM37i7_HGsYin) zE0)5+9l<$hRcSihB%Lmy`>Q(RPZmyhEF+`fJP-72P`ZJaQ~>0EX(p~a6comW}b%6BlRdv`x4HPltEHo2@)L*gh>aAvtG<1x8taX zF@GRnj2%$DTo*oUHj}EY`0q4DYu1O zP-ZDr+Wl3kX(QA3M3bTEmF3WQ;oT5+rG|7bOLgaFSa8Nv>psZsz{n>`xSg6LrL>p4 zuHo@&9EAdsH_4iY+ z8fw5-qMkve|3Vkm=D+@_!C|wyFb!Os8D|oU?vgz(IZWN=bFW-=_Wu8=#&Yjt`%6fM z^SaJ8374)v7>e@q?P4Op_5Dsa4WgkK=(_pxg>jDD{@`JdQ%6}l*4K$aPCwiVq6%R? z7~Qj1cBc!+fs;}m7>qhE4dFF}J1hIj@!fL6Zq}ULjN!vU5Up_Ksuc8Bk{C>2aK}BNOB@#gwu(ToJDw7x9UeB&orH2g+NG^Fsa*p5? zK?eRh9AAedn&2gVSxcX(FX&W^YKX*?S#)+sJej$KokNl1aZku97s&yAfR#U2Wv@La zVRkzw>FbLI(BmY(2{z@~5`6RDcKXSPOf`12N3jOa1L$2mQza*LK@OpV3?XGRyCzaw z*RM{iCoIg9%(5r@iqFlINPXZA;sduFMm$M3==2K+!R<08%7}|6~xHiyf>Is&$yzDLj6A-fpuavp-~N&&-cd*2_^|_RFN&1)K>cET)ijN9C$Q~i6hvkCWK-RE{w0H2 zY3a^2+6a^sCi`y9BNJ0LBCgsJ+zXcV!e(UmpzP^UCIwoL zjp%zV1idu9@$Fje52OGjKf?p&kGz5BakU&L9I3G#`q==lIt#-pH(a9lC-(he2G>_5 zcHDxpG0mohu@CGe85CmG&zOMKs2pxGhlTtBg`W*LHIYP$NHkX$?Gy-uc!RJ$N|B=g1o|UljNiX{^;wdTm zSg;kJe(LbA;Aad!bW@rRR9Z8%Ys=bo`kj@A1&={l$JI#7u~Cn;D_7O7>#V+sT6q<# zvXD+bV5;G}!Yd=D@37L8GS}aFCQRCw7b#7j7LhL%)fFFScyOss#kKuAEg7-BxJ( z&S^@syw-(R?lNC0EvzmCO7pTZXlC6$+sSnj4{&yoi>Q*|puwHYSkAh8EQ2zL^zpvD zmn&1BgBJp>A{zn*aN@wL@#|n+#o=WvTSyIU&t_{kmp&d%2l8DI#|{)+lXblXy$+e-NapOv3@>~cV1w^p(s8C*oeSp2f1BNCa4Iwm z^}IYA`qhLh(J_70c1*!!;fQ|JK*8Q<_mX11BCPq<=6~4Y@wz*WF^s1ZsRKjXGt}JC zSG$Ak0_A#2fBs)3WIwYyUv^>=Jkn*4HnJqg-nq=)lsWxgY*g0art0SWNZm}nR%Zu9 z-uVIxQ_03F#9uzje!NyLAFX#Qi(s0#7eoG$D87PV?NF|UgoSO&fO>L92&wgLx;Wph+OthnOI$H)Q%DBSB)lWgoZWjfkoxIi?0}WE* zmuSOxFb@`M10XJbC4!HAEr=!3``Lgc_n3q3F!A}zrIE+pd7kk43^#~j2_h)QpT@H# z#|suv3EJfy9OxxpVMOd?Q?A;0ta1omzD`R-Qjv}T=IZQNp1YzU=8qpbLe2bKf=E8* zx5YJ#4uoHdkQ26C&Ju)I5CbA0?WEcOb;!)zA96G?S`$!tt2<_7H;bo67DVxqKScKw zEtp)5`arI#0M^xJNP-9KQ%3ustW7MrZ5A=`NJYI}YmRwFm={^Az`OBH>JUrkHpTNi zk1{#i_^sMdAL}zoE_i`l9{wR zSNYq@slV809_1Ny^2Vk1)o??E1Y(6K=B2tijhc$E>4q2&A9u1ELcSm^C@d%pnl|-w zK1TToa`|X&Pk;H1d?>0&<1e~^d2h41>}?Q?%bmJ|o4x9J284^uV`ZxN=BQtvJR-VJ zKp7sbtZMB-8^CRYJWUpN)#9U&yBevtThDHJq?)4BomTNQ@&-PzN`BNaWg-|lo4O-S zJO4>vSmPeh{*Fm4flb_IjFju)A644fqZ?AabkS?Bxx-Ew=ywElBfpw_ia0fX+9gf` z|IST%Q7%&^j0!gu`FRH02TvLyaGi>mkq8M>Y;TO)#D1SEwd9dvns$rZiFzrecyNhb z304VhWPEwGJryV3ZxuSDK>!pd5SlJ_KTBUVee9({EAE@e2YWVG6afl&FtvZM89CfI z+{7T=_j1;j28&7SFT#0Z;%;mPzGt4tE)RR0QiRQIha3}%Ty(=7j-PB2MJ!rqXR4nPNV?R}>GNr#O zRSugVazrhm+q}e#(Pk7xhRh|dTrUzwj^?QKeHfcK?qn1cvdZ{Pdk*SQ1G6sd>wmK? zON&W`CVECiil3S(R*rZttmX;`jDXx`3-`IrhLIHk%eVijPjx26s*6iiWk;Y8dcn(= zo^GoV7WY}>gHG1!m`BC`$_VTs1_3?qYG~HA~7f5!xZ**%~%Gy_7c2) zKH~%S&b)u9YH_hhwc1|Y@x5qm2&9jCbYF4z8 z2GA7yMbjS#bPC#sG{xY{Lyn7p%^|AwFVN=x-T zXIqcq*4W`w*)j|HX$-yd^hMSxfTk6ppUss^J40%+&I$Tc%HohdVpr;1l$?Ulv-g&L zViynwmxl84AFBfBg=vYHn4tuIvVXrX&Odzt#ji06i~E)yi;F64tkgGyPa~PNcWV#u zL23aI$>Le{s!py4EGEEcnpKz+ zpj9Z2(Qw~vB?5G4A?IDVT|_M6YsUDb)kWIYJa9-^*{SjIYS9y!#&y}%vD0Lfm`>`l z3^r$%i8`vOG!UKaSIgJdX+sIN^N3$@FLeGMcnAFH-5^Y?dQ95kZ{CHqbv`W?@FV#I zey3V7vPE5e86QAxqZ*7z6dEy=N&mwsy8Z=RbY%yIPuWJPlZ-Q$dXH#%Tv3T7`i1o2 zYb53yboNv9Dqqb=UrkGqzxgYrLBT{=);b(_C0!Qu8ZB2{=Gmya!zo1Vpx}BsV&b3@dI2% zdutXqc4=>LWj?k$RkB{|y*u<}-wjdeBJ~37G2OhmtRD z@jq{tdUsD{aN@obhTaWEdwJm7pAsCV=m;-a$96mT+C@^-;HDQ|_B5@P9-5PNrVH+& z2KWte+;{%?Lsc5i&FEcDU9wuS@L-G3&!EA6K$J00{qEMz_n;yVeRPS=dNZHj15~Lv zZ6wJIh@e3sX<&`;=$qSPF8M=?i!+q5V(nScELrA{Say9_e|aW@P8Cn}q-N+BZ6VHT zh}mq-6VmDaNU=Ygedc~*wa~MK`H@LlaygZ}PNh1>*-RGyQLkhi8sC-(Ue$_I z`gd!L1|I3elau?>7~coB6Sy8H2~XP4RmXbV{+jn3v^PjEPb|YrALMLaI^$g1kJa;F z(HpTh2?^r1^VkPPmsVmX4wn&4kF}yD2HxoHjgJjn+vf?&P<{<)<1;Lx^uj(EZ-d?N zXfGzWst8M~GO1pr28{R@?^Eq&{?XTP4yMBY7>K>(>kx6jK7WMiD7rSsy~9K>O4&b0 zD>RYyw{evC>+U+PKK7dL(RE&%)=M#Sk#kM*`H9qpWS}*6C5CO+ZvR{l* z{C&C+_et&tDX~u#?dNIhomml9Z3gVGOo~)?s`<1SjbO8QFGQ-&XvWBCZE%O>Bs{yErDc+J@{9iZmk%meB{H*d zq$$bYNsXh*tDdhnzwZ=yn%mXCXN$}LaZ4G!_+5fm$26G0GOnXTs=cd*24&OE$5^a2 zNYQA@o_0IcsC>y(r20xO={NrlC^GaRub<8?38&8eT@~psR|x&$-<428MK_IkV#HaC}kB~&-x8Z;izZNhqBA2df9v}#JEbK5A3u$`S7 z+Qa=?G+^l4!Pe6^e;toVBIaCmaHSanII@^O(|MiWo)j$2i+wq~JN2 z3`T#6*&}%1(r)3Rzt(#_@iuesyvc%vy=!Bi1lNzdeg=d^6*NA_UEK-OpD+C{ta`g@ zX^@lug;&eIKNkF)wCxw`3c~xWS8(6QzX%AHIm*YLY{~X(uJ0d^pHJ|Zqb~$b>vEJw zqztNR8${H1F?}9xu5^C0#1Fp{ z1cQ|nS#J`z;NwGe#j*Qv_D#aJ0g~Um*Anar;JrFp)san~YvET+UGDCVoBFb9UNhN;o!judn&X zY}*_{)HumBrb~F~Id#~}Ti#^Sq_h697I*j&;4 z!pHVo6SHJR?;H#_?iaHam9jzGNB;z1o(^WW|A50$GVUu>@s)|0bRrXqBkJ`r@4F^beEIG*$+1?|Nvs)OM85RPOOFhfb?`8=f&+ z?Pkj6o~h<-|2T5!#P?y;`eM9Gf`L1mWT!+B)3SWU)WW0cP$y|uYIg2b>EcY&mzO64 z@`I&vA;2<7b&PnMj?YGkV#cbp!;BQk@88Y)J(Q289(HooGdYLRy7)1jk!=Knv%y90 z&RkX#nX949?#<%vgDQn{U*OBX=OaFnrg>Ok0ejGQ-oTHRjC-$Ziaj#h<{z7&e zMK_DWLW55#!3jZtV_4qO;cw*C0sUPS?f8fXnH>WD@>~QdX$bXRF>Y=Gwr6Mg{3nIb z2Wub51$&;VsZFmv`NTCy^Et-h1~ie}6QlEquQ!{Nd(a#9sCOapMK8zxpg=TIeJx18 zgVgquC_(BMqLze_5h2s3uh%$Gv?;j*+6dBUd-ddvJ=YQ{Te@{~3y!%-5-15k@wg4W zQTTGueb+k>zUR@V-axLhkLc%OVS2f&$y-6*<7_}wIK&n3#I=p(4Hod6Yc|0t06tDux7SITW?iWA`Hmh57R z)e08-z&MOGn!QbjZsrLMZskwmX%LBQm9CmCS}qA9G=BQq)rbg;unTEeu?a_{g!6O1 z@MC*ly2RZXyN|Tk8!_orbZnH1YLN;c+58)JY6+-}Y4+Pw+9P19-UrNE+w}A}6!&_i zZE&Gx2}}p$8othbxm;5J7&OpJ*!CczSHy*h)7EwXFk~QXw{ryleH*K}&IlbrXa+lm za$e5Xx7eXas8=^EiDYnScc-b1By}2}QGc?$m%ONj%N=e~^`SmhruD7FCFeHR1t*xN z$?3E#OmCC{n3CgVo3t|`?k!q>?{Q;MS_LD?^Eoc$hdgj7^vS6vKuS)-&K2Ks}rgaR?syH-VlVQxEB62ZjQBsu?4@v z0K^lsWAUQFqQ)H25wS8c`c(AmwA4MOA-DYzw(^P{r~3+>jq@1;uj2eqQ-2|&OA{WY znaFO0`Z5sj1UZfx^R<)pl7?UxZTS}Pm{)>^GaZ8aU5TWJY$cmxerR%W<#oZH5P90x z*D+Cg)V7?`C!#n`FuQJzs4nO>^0Qwos69*5xYu-qz|Jc;QDG9$cINK>^sv*^9LIfq zfeHC_2U)l*>1R-gjqKPiYVHlXKy=2F?Ddren~n7=7QRpFWHLFmvFZ`-e~VUQAHny^ z#L!DGHPnrfjnBpNRr4(!7WyBjh15vz=Q6t*nYqF@4_*`|yxa(M{7p|UFo0dX3#Y2g z^C8uoRm%HiEahIxDAaT>GH!K*>2&uJQZwp5MhJ_D{M+73UaLLZjVz6yeEePi0u>=$ zNI+dOz9T~LUF5fY5H{^2#nct)-f$Murq3necqvruLAyW1lV%QBQVoKVXJK3j%M!*X05$O^ zFTj>DdM6+-ZO&8w6E3wB=#X)jALoWP*u;p7PUTH)@1AM)T5Tk0K$NHr6YX8jcr65_*UCQ+A^sVk=XF|J4x)XUJ5H=9h855N;z*(xunB6X1IAtZ}C zR{sRDOQ_tWnT`pCRjL!%5f!DfIvcVQ+L`Rrk*5N6ZG|%W{q_-=u<*{!M6Pwmt zjkTEZ_bV2%?@<3BU-v?8w(!*Kwk58FQ|8qd11rbex(6jiVxsHS73AuQCL~ohwz!np zgPJxgr5`gXJOK5Tn?l+Dd;rQ8aBktBoHDje_>FUqFdelgN5MHTuyMi>x zzN&NZ)eLJLzfO>xI3aXL?8;}t#vT4Rs}+>bJqd6MrFG8z4j>o8*2;~iqk{|vURhp5 zA7~!vv;XR{W0TsM(Z||5N}P=l=VVv@j}|~(>Z9dGm`F@*5Yc6OErU;^X4Ix-a}e>V zYiW#>^o+W;ABD?lNe3bf%S3OsU@zcvZkZ_`)n&PxlQGh+ld{A)YFJ-hi8m2#oW@??_2b*e z|KhCvnN~C-seGy!dj22m>b4qhGgYpiWai@sow`2H&z5r<>$7DKy$^;iZz0?iy#~Jo zg@s8uqIj8dV?9YP%6FbHma=@))DzeuE&m{&~)? zXA9Kq_lV>;DALx=bRGaOg^FvK0#wd|b=f$7I{iSLIgAk|?+}?%dQIB=xTlEGI4^-U z{>l(gROsf?{Hc{~Aw3Lb2fpZDwl`M9<-Qi4nM8UQ%yz~xvY$Z=}OHtARZ;~YJ z6P&=;UHxL!hWrKz;EwhUry)iX5Dx=tij~5~*dqRA zS&5x*-|Ym4i}$IIJ_JarXU!U)zFkaxXJ56?ig7t)+8|CGz`-W5i3cGYa(gg({kS%d z4)N-XSy8(`5yaxb2Op6^O-$m!0rb(kjA6gbMgNEU*VOn()^{siatm?!7Y+O0{OSW5 z)Aoz-0L(cbb->ZWsv`5H&ccyF`YEj4`_FqIJaDrBc(%|cwscZ>2ImwFRHrTfL<2o| zk4H%RriteTg<0dK2(7ee<=SnNF_NSuS{eQKTQ4c_Cne@XyZpy_-PjQgB&R8D`3{Gg z3d>x>tGXu%Uj_&`%wkQ2h)jtPLK=R-Amfyd999AUNG`Gluo6v2C@<>CYcBDW(3qK2 zl`nEy%H}DVF-pLnrm}Od%O6CroySIv={Tu@5_KnTpT?}oGG)@BcX$<_*&4CaMz@dy zt7H6ORl~g`?G~h34h%oNp9@pd#>T%VqqC5O`Ag`TRVlwPe<*ebPLvWK<;(DA?30K~ znjJ@&rMSjhV9I<=YqJeY%G^cFTRv2HmgUJDzJIsJUS%)0rZ3l|!iR$oNtW#~U@UV` z2xsclUTV*Fu~sVmzNyflB@R90ZlzY~ouDL<)lB18ILeCgtjC6b{9VnReHCwQTpo1Y zBc5cANnG}EYRBlK*uPT9mDI@pBuH^7?jtc{uR3a0cc|5dH4=aBcd1;7w-$W=M$D!Q zjo%V)|J%d`51@cc)cLJ1Wn-gp*)Us@Db*t6T;t_KOBJ(m?{1yTevMJ1n+1=T7`Q{U zk8x!7y*ewA?7}QL*TPs}gd%y!#@#xFr18L>gvto0 z_m^-`zIam%=X^FaKr86i`-7|wJ+LsOXQ+g%?X>=si9E!qZ(W9SJif~A<2XMu?LFkV zu;e~}(p5c)W@$6BgIH#qCaN`VQTQwNHTh4){@^I@3;Z*Ss`}TbSEliI7ul};=%Pmp>&^2+H)LzyPUoW}4^OZbIJQoS+E}zwlqJuw7vE3+qGtPj zvMVO9^AZVPTIj)ckx~QPqi%}^5km`ez{x3Q$V`so-sDMO6L9vOr842*O7|?OKoe#x zm|qKZ^e&oO1itD*7l&+4xqeWjd$ld-1aRs6Oi4o|f>QBHJnJ`Z;fxt~FV%&+s@dV* z&8FBtqLYj@BrEa?_OIJ8m8?%*?;Ebv2MpinN?J8KSn{RnJw)9)JWs@QxgpgN1< z?t;=vUIO#a(PHa=GfNq=Ol~Pt6*Q5kVzt*#|Jvt{?;|C_vjKvtm)ht#R+TN{TJ|tg z_Vg^f+I8{b;M-WeJTurxu#4N;c%M#zg#Tu3yse`+06$s$C_X@6NzyLEYtqW=vtnEe z_8k6XM97L4!TDSKdah^y80SC*nDSg{YhIS)Avex4$nTzHq(MMA#@ZNydGwO z^7V5NBCRJ~wOFz@&wW`8{y`!^G^6=Q{U)l{tq*72aM-C5`*0`oHk&L!!_iqQslq zZ*2rGo88TQ{BEWhjr9q=e8N6J#{vr6YuSV=oQcFVb!}dW?{;SaQFmL^yHyS%MxP|E z*ZLv6*7D+&{K{KtMiP_MLRl5-Z?I2cY&Jl;y+5S{L4rBa2-On01kTeK$5f=|=T@3B zgglJ-MNt4*_^ZU6Ek9eXKgj_Ib2V1F-|we+kDn+jhP~ohw|J@BpLR z0Dlr>I&7`Fkqe$u`*d%_UZdMKCE|d1HtxZ%tU8wf%U${xkCI*;Td}2WTZX8la$Tgr z)npN7XBG$@)Msv!lrB#Qb%YT!?ckm{>2qGXmqj)j%0w<|?rh&6)|#Dk`>&}{44E;3 zj69RtxJb|6Ks85MM@&y^nsL#9Dc*LXQH`L})v zaN+~pqv`(v27p#7Zs0viCp3a|J)AJwL3Q~@Rh{KZuH)lkzV~8kCzSk3+|UWTpsdjk zbz2iAz*oQK{ zj#T?ViRMPErr$%P@4(~TF2GlSv6y)YLCE{A{P7DSIll@80QvA3Mwd2g*&HM1>Uh%z zFwdMNUN#x9%0c^YB|qK+s0-C_94XkTOfA^Dz#$<$Beu-UhZJ5V{TcEbMqNS&Ym?1^ z_u~4afg^b|9`6P&R}I6wv=-_;2Y}I*0e^Wgla(Zn6qie}oBf|sZe+h6%3Qy3QJ4O= zI-pI{dOh063o$Di)?A>U*Q^=5-aPJhzs-x=3NHMU{V756hg}bpfVr8gGH-KM^~gK?fd^9TW1;9gxmjnQ4|n? zjTqp_jZiuiK}s5>BqXGS(H&CKJ=jR4q!bWPKo}hg0wbg*>1fyp=@J-ycAxvcfB$pN z>pa_o?a{8+_4;0)cz^9Z$@25Aq}_}US-ZH;%|kdoQWG(b@s`hCcgnAtUhZGMl(Wnb z#t2Vn`y3EqGJVVPmb0#qg{%GXli47xy%_shyb|Ju?dZ^^{<)`IY37F9q+8F6dCTc% zpjE26ppcIQb(wpH9Fji=l_lO3UFpENh8CqWsJMb3YKSi)1?sBsmIuRWoN^$`Qel;+ zMB-$${i20L!>*&6(Z}R+m&9NUmBE40m}oCdpdXkE`y#2De{zJ=%oclQ_~qmH@rus0 z4P@H*=P%i&_zN=+AOb)u^*H&8N*u!6!6$hyGEChxfyUA-cW#&rpQdKBo}U@{1kNg& zoSNopn;`6Zt}ZiKxM{l?qxC6w|Kc#*HO>T?gwk!3j9JiT#X)VW3(+@k;Y>2>t0|As zN_nylZ?>|J%G5NKG#!G_0Bfl2T8*dmZx;(wibJ%BkYYjY{SK%^CiB)|DG%b2ahkxW4Wia7!B{32F&jV0)HEh`ok2TXqLr!s^xLVm^2X+vKmw-gPYP z$NYV=#nXVT@YRM3#!URyDIfLtZ~9i0yT60DTW98a2DY!53&kxhQoK73^41f0KUw5S zw%tT7;K%lI%bay3zKPea`{3U1;5{&~z(0SG_{j%RVl{BUxB~8igY2oCgKmlTvU&3vcyx9=& z9F;lF)1nJC^>o%8)|AfzHewK+tf}~4${{VWB7zDXzxYBF-RN#}QULq=s$Yc>`Uezy zbr4{*Q#A*t8nCukb8GaL^5c;?YlIi@U1CK)o{t6j{CtvuTkd zn6OsywIIoUtALNKY(6SwCxj;ENQG^&s)F&=$Ogp@HIi*X8R`$G{^O zvZe})3lvk>f=^C7Wq97zs~vAjeUZVN?5@2q{<`6`o~HmG6t8d5yocxVUThz6Iea-e z<<9H!fe+1+VQ38OsUR+szfY;Qzpq#pWOnW*w=BcIbWr`W=ZL*2#EKHNV$2F zP^AW26Ngc{`&0#5b7ys6a|4{L1};o9j7ZYD@_s|!jj(8%`T~o?Y|Kg+ zswR^waxQYi>FRKpsQ^G4d1*P=!K13Svn zK~5pHfdQHv?72zzT9?|KrRCF}KfBYV_M7ZFWNX~l_TUjkB;jO=1bhh*&AHRDzEl%` zv(n4}#&qdr2Vb#Ae9wFI>2x5AMC&2|{rw1VU)I^{>K3!fhrjy85AcT7y7KfW)d9Z# zgQ|Fjwp@-rCWpubeML@Bgr6DCyH!GShH(es8AQ$K3VfTU^(^v>L#(AMU8W?q``*P; zWT!!{2v1e`eHmnI^Y|hy1`r(we)TZmb%Me-8mR9rog?maT6TvFudacB@}gE}%iBAI z%XMD=kx)VhO*&42;8gawHoF1`{7V+n^t7vH>J|-#pUjA3BOS%7Nn2CQ@f_)dl-aOf zwo&yJ{Yn_ojnA_e-u)Y7XFB?I;Mw+S$&TJCB7x6kBnSXrM#Zd@8Yjx~FcMP`ccIA< z-}eklnED&DgsVh*7S$Wcgo7WghH-BH&1vn#=7`I`?gq#Grq2(Mp z>ASoTq2;^w%a^!Ir|DKDuNxo4$YlrZ4>)hah>G;+Z=;PK^95COWGnlCxn#g4LuHWd z-x+avH^PaJXMZkbpWkR+%V7LsXR~m}){5|Gp>wY&W2?j)Lm?qyQ-ii68sUJ~H|rYy zakO?xIX!-4B*qHPXo>-5ie|1I3I$&-?Iwgyd1 zdj6h0Mw`wbr(n;LGe9HpVTe~`DbX@DlpvFscPe^NS716^JxZSQRy%d|RE@fWOtpDY z?*qnE;74^y*l(M7+hjX+xh0_p07)k9Hdoy~rPRi~*1EJ`O1rEhTiR;-pPnPa)WIoU zd7ft1y+j!KlGE46d6e&aF%I8{jtO;^C3i)CQ_6uT*HPS$zw-_l&!jHjFG^xwFW57| zeFA@?OrT5QI60(3eP$~vyGlt@ceUZT4pzv9K&1Waa_?!fo+e}}+d<{}fsjJ}6vTjP z73kw+FFas2B?=yhpa=bn?R9LG)igiQ?T_3uu>;{p?|hxV%x{p!zO6m2*)8Az|*N!kA~Lm?0t93 z@)H2g?Hw{AZZ22?N1cXUm6p%3baoiUoe?(y2p|s?PA_>^19Mm7IyYCN`LEt(cEK-d z{*OW3IlWL8!Q!fD2sw+{+0921ORZOe5P!DIIW1}Hz-n41x(irC_q=wXB(u9>J>;2^ zSyEZOy;Z_&V~^Y79lDI0q6mg{&JQCz^#dV`4p!YAOgZpzWW&)*q^M=kjsxA9xKIFT zteiuVipl|#94G^D;ArnsiR{#Vq50l>^?jF5UQgt>aWr?acP2L9 z&3$G4vl4fh(R#3GhyRZ0w2o0xS58Wz?I19BC9~J`$m0!(FWF&PJDK(jM>c;&V8CQgCjR z)!`CFT>XnU>0QTc+A9m3%gYIcCesjYpQz_#av`goUt+H|PGs(4WcP06r_T??aslu= zh#L4bVR8!3ey8hc;x9U9IO|xUR>)Cnh+{sJ`Q|#SC%r8t($H&V?ZDhYw;yl!<>7u0RbT;hGU zTQ?pqjK$h+_Ce(-OR5U&q#~n54J{|b9+Y=ONliC(%<#-1jm7}BC`UFxzFp3XYV@qp zG#^NGXl@zb$;RWNS}cB~nXFu7mlI#=tnQbyuzPpDohJ$QAP@b#N`%_QL`FSm#r%l3 z*!D)iSV^PiK$coehn{9SbBRs%ev`3;cvd#=eKGn+YX~YE>muZ=RnJ#F7F!nbdiY;G z8wY(V5`DWg5DkK4++Ca|$F<`paxdgJua9%vUhG5ZPKu>l#RJbOv|Qr9uk+?y)Wj9r ztZiFAK1P>SF?hR8(p((mE_U=2T7Ph6CC%yk{*1TNpNNOcc2AVU62BV6BKM?@`T&SP zYCD%U8>)G;nn;A~IJY*vX-j_A&5Jvs=cL)tQvv3-7MQDRW)xW3PD8?sYCSPoi*10Z zs-!LCV5jHH*-nCU<%Q!+$nS`eq~Mh;rjDvWLjhz)@ZsT&%if?%mC)Jh7}weS*&~Dh zXbmnmNYg6=MG_^CU)*rITnKkZ;Pa10)LgXaVq7B?oL)%$_s#ZiXH2G^Jm_M>GD$e} zq+j&vYYSfZa(&GIXoppwT%1%^m(q%?Kqx7ymdRtc_L)p`vZs7YTdGwPVBs?94d2IS zVQ(kX;8h^so~Qn|hSeP+msHT4N-4!nfG`;q>}MrYw)I376nWUD$^tDVXGVUF^Wfd0 zTwgCp9I&3$Jiz>o-__R_nC9dLd4*d?h8;PL+)1lXZb2@Rvzm6p2UL%sF0yJeP#7rU z9n+`H_gjf~lu02ADdivqKy9D0A~Z`D!J-=Glw^N6AN}`GPVs^8p>a`F=E9%bQQy4| z*_jhO)E1gUZFOAPb%CPqP`#|(0S_qk#-xv5OP|{Mj~GTtno(!!QFw6`OW36gE|uk{ zT!D1qWE9^w8|$7ggc8$iOecs35<(!MXA_mc7a#N!N#mN6N#rSc`eMS5Q+9pto|y%^ z=t?JF^g6?KWbvKd$=~4~T%vjtJ5{Q?>-?S^riPXErZP`n(!orseucUkfHW!Iz1y4&xnh!YNwwVzv$jE2BU+!R$IQuQg6;Hk=x3omvM-0@(d0||QoSP&|bn--U= zI{X^ijrP0?iW3WD95D?B1?m~|23{x0llUAvc(-S`vn+GuIOZk^rBe4#Ah#`9ePQgv zFza!<(7TkD{5XfegPKdY*^VN_zZK=)D&=M_*H*iJh>_%+yPT1q8+wU-Kl(qfupgn(KZv6%tb1V6bBeKz zR$431{9KpOGNnrOjuKO{wm_w_|@)%Qvn`PkoC*piIBZo zPf^BK1?RMC31z{cl2hi(?x!cqf1YMxS_=6R0^K0A{lN4%*-5^Et{|0Up1`Rue67}R zyM>ZG(cuoEi_wPTZ?GK08NTsklZD2bGV6SMSMhva4=k)*WoWn61)L+91FaZOX&xQK zxK@SMr|hw{fW+|tBxB&K$UbrVK|hwrL4}o+Y8iY0{`HUA%N?ep>q_%fIPHwfjB*$7 zM$PHczB%auP1~X^>W!n0+3~0|E?qj%8aBUzbsZ|tOpe*N43={5$)B&@OqAR!kZ1nY z(H6L9*=1EjGm~gK`+nlT*%w>V^ok3gZ@c_GG+_%E>Y%`H>yABg5CJWwBZK<%iQ|7v zh<}wAq{*+(6S^Pa2E;MQ6Tdlzl8{Zhy=s`H9$Jl57yjVNOkK37h%yU7&NZZ0y%!Ne5TvpSR8M^}iHy zQ(Jj{bsC|E6eMK{SwDX=L5$pun*!$f(|)#~jbx3gqwg|2fR5Zek}QE{{5>TdYXVr<3n0kf}4DQV4sdI|ex zr>xCmft2>sKLnKyKL1Q8xxL!^AJr^nxXku#COwIFIN6Jq%WyvR5kraa%-__9i{XO) zoD)FWluD}qW&!ZMK1+4cWGyp`pT5Z`#xfza?uIHp#M|8Qo(mWX@@gLuazN1RDw}mK zt|o%kPF=~GqHi6h-~2fILQ`dd)N;B1nr)+Z_rfQf>GKSuqLhyxE=0lyvIw4Mpx#@AH21Df}lQlO%PWC=|py ziraO`=XhHDO~`){^c=z~C)oaOpg!n6Di6`Ns_xp#ET2`X^i83$U5)2~mkjhO#~4zh z8fb*%?dJTos??0u3wja)Y7-TI_4%zk&)0iu34V&g*8G?E<}D|cna&@03RuOTif6w_ zg3h)S@}A6d2mW3V((vT{gZn)>!$a3RO2v<7W!v*8M3`6=yyt+%&jO6D>Qk>E^l4l;zX4?$W^&D3^3V z=j~84$mk&+a>`}LFbc$1QG~SltF@iyW!0a%oV)Tw)+8wOc_fuSTd{+E7_?RtWeBx2 zt$oqLa{qnyYjl`3RcljgyUH{cjwheY^hcQ20E40MXQJT1)SvMePT5{a=9XWVOSi?Q z-=Q>zUrze-K<9R+438id4EVURv7bZOQcr+AK>f|cjzlYp4l6nJ%iFqXz)&#VOSuU9 zQ#ONX{A}ueSwp5)plhja@ji=!Nq5mS3qBZgVm=eQ^Gg*2Uq(21J0;A%;+mi(E*;FJCH$Gqs z(Dr8Dk2G6EZA`-;J?myy;>tbKhr)wDQ@MY2^Aq_IkLRodT%#KZLN}wWwY}#{3EHbXArr@m&1g){G=7WXY5G@lHnouDC=%|=6UsTZ(5hqi`*+Sx zQo(p|gQel^kV!V-_*Ko|6&QBY@V{g5zuRKO`*&d&x;y9|BHl{q67FU_*ooIUKfFp@ zWj!i@h$x^ry8-Xig>1MWG@S7snu`ntbZdz$g3>a-4|lMte<6&(OL7-$-j)_~b+gH0 zuLieC+2*G0ZIxP+=T%i<2Kqev^lLdiJ{(a*O?6O)y5pc%-G((4w3w|1=Mb>vQ8a9^ zOl$`0QUogB6JrdBtQG$5A7;w&isg+VND`%Z!3sE98;-}uLk@r21 zno%u>UM?=$qf7S}iW1h<3{T1YNXySaLdkO=#pHZ35T|h58$e>8VvG|s_5JgNs@T#l zNr`BNy!adhMBJz6o@os6sHT zV;&0Ez>L_*6X#7EiK-7C3$@Q4v>@LKkg7XfXzhbS`gU{7hrtr#1G;_MDLEZT8kdDV zY30{oKJ87DP~ckkBqSr6NxEbpl?QJ!YJ#lU1?Ue0JtHYijl^TlIbRBO+If!N|@Z)^ma3Z{5qSy@9^60i|3NtOn?z(hd$R+Tx-ne9wlL^y5n3}b820ifjc?8~6L$76yN#sC7m1*Nn-bdR8t#V)O9mSxh{wIKVf4=70KWC=0^1fU z^WGiNX$=L6x_$}%B`nvRz!s&AlirH>SVlP3y?7eJ%Zb&RMPae zDWmhirttPmToroR8XQthd8<3llA1k4BJoLigTjpRX03OEWB*F-ApJL8o!Y4+MCLhNknnVDN$u&TJ2v^R3fRm zA|*+JZh?KyKh^V@o&_7COTb@d?{t*ChCK5AL0D)v%Y(ssSiCi7{H{g_{oa-F%-}$4 z!oncx_?wBSi$lh<;6TES-Fwr!>K#=&>;X>abLFe@eYDfg|M+75LI1lp6HD{P{+o5> z)e)S*{Y7_G>tL`aR;rSy5Pn$-COQjyk#B{O3SCrAM*nxdmRJ*?hIkCCFm|n8VHj~= zPyD*`L(ko3-iKmdE?f@zE*cK}+qKfKGIqvlgsBpglR`hBjs6bEm zd%rot_}22R>7t|~rfjV`t;x$PEezt!qAK4kTG=~o%kWGogwb9)kSXf5*f=VkL%C8K z)*nd-MHO8JfF`wGy){VyY`{YfO|LycOYe6OY||z1cuBIOS5_p8n2pdF`cT@NHC_xovqxvxQISmslS`!*hIR`D!q0~s(Xqls1~59Jy! zfoDHrXO|;Lb6#oQTV|iG@1MfOM3Z7xQTe8JnpIU@LNop5qqg?)B%?*m4x~4Z>{%u& zRT%0!yEi)wGkhWK;s`^#yOE}-Wi6k%NeU`&SG})trsS`tvpx=4X9bjoDhGlj$Q%A5 zdaE`g*v8_nPiom)!jEMlh}p!1%;D?sJ+U2Z`H5W~lkmJ~Q`l-CmtXv#Ms_YL0w1FB7d0zmQ`GCL42Ij5k+55?}A^ZWst#VTs}jR1rQCs!yLEN5PVPW@S|VXKG}N_m+8w-&UOF95I%5L)sY)m zVUdJ=sns5f77U+Ua%hlyNIK#w;Dt`YO@f{Z={N=Ancbq+LscLcD7sfccp4;+qYzER zMq_$0xE85Y^Co5bKD@ncjQOR7R7mB)axB%C5UKeG;>-I7=*+y7b^&2Ie>Xz_CeBgA zkjEtL_wV}sPbalFl8_wV{hiuRHt_8Y42WNwzGioFbe;ixKeo++)B}2^@~qFx@>|N@ zhx!Xb>w1aZk+W?(c?gcNabL*_lMzGL_W02N5d4dK4FV*d8=ahA-OM zEMfU9^~G+jpGrZZgc%BcgXBCL!lvh*35otbg04?B4UwUwLY_rbu4-+|Q%^8CR!V&_ zGwPnScEFQwFt!o6QM=(}`1Ae{`i=BM^uixR@NwpMCGt+Um&9@PoCwG#Ia-5W zz#k`X!)>A-jsO-jvtl;;loL<#$(KtsoPx=8Dx)B@Nghf~>J~Q8RPg;P7y_WQE?e|~ zmf<7fkMfaS_XjS4l%UhFOl3s~wC?6)Vy$u`T{OV=XH1b?OY<2ZK5||0rZxCyf`f7~ zG=#!r?9+vBq*I&34EX7qpiAsDPLj?q<8iyj=u8vuZdb*bV$q|C2K2`QN)}xz3*NZDDdQc zU|xT;HswQ5vcR3Sj8pU0{!Q&iu@o1`G%2@j;~%|J?QZV^(Q_?mQ=OCNEElmm@t?mV zVEEHI)VxJYUE7m?CV+o64(pvsR|c}^^h>mel!7L2JBBO7Rli!li+K>gC^#SKzOjEt zKEAE@cOU-O$zSwip*@g%_Ck#Cpr3aBuVF77PwRvB*v3xoGhulIu_v&YKi}cbW|4GP zQ(jXhJ==lsliPQ*;`03!LZp~3!ZH8oV4Cn&SxBN0kOdz`e+{w-PS#a#vgVS@BfQN- ze2Au5Z(SucpBpU4IjYiBnlxzCTlJ708SYWAr&jh0W1zcwcb2js$Vsz{SJ_Woo^d1s z8)%KuArE8koF+9S?v@-2s*VHz*m>~tYv#;eL zZ|p2_Y8%=9nmH7qFs{6XvxDy^SJssB&KZqU-Z=5Y_N=4|tuvhpLK9&JcNZr-`L@5i ztn^z%|Bm&u!wqri1EHM7ysY#|_z{z^XPsv|JAf+gV;WxE6LFNL1%Z|&1SEX`q<_s9 z`nm_re)JW`>scl_(p}Kuteo%^U9cIZJ|tNc0Q;hi|6{?%Um60-%pOAuY7!T3UgoO3 zHo?YDG})Gm7iN2kg`yL?<8r^8O)UG&#o@)Ao9by5m?^g&7)`eMp|4KeT*RPY%>;zk z_YCWroMT($gnY;gP&pZ9**?8uWPEI{qaJ@7+QI;d0Z&YcWT!O2yLv8M7no-0%|2I0A$bg?xw zo*W-@cewHV*|m>ryZmyl7Q2p+k)p5?Wv^~*5G>R`qI&JKG~2_ZvoU$%-rgro?sAuZ zjN9J4Fbf=K@H~^19sd2B@oXTQPASDyXMZCl$|^K;>9eOpKhqHx`{cy=|CUgz@DPuy z_n$of2Jlk-Ss+fQM#SkfL&mzKfe(hoY@gYlC-u)se+U!)w&aeJ($7cXA(Lrsc=(xZVga3MoEX?{=iZh&tfbZmQh`<*n&p2b{FOb}Eg{p|x z%HL>LIWP$I3c81mlwvHR_NxxG>Gy9#h{$jV0T9qDC*8~X@q@+E$Fl%ZJ1HJ(y(=MW z*W|(5B#w(u@>w}kBHnX7KcqZy^i;|^{c1+a%}Gg16jOV-6Zh_*?u;-itg)r0JfU`s z8&G}+fgW0Rf0kJxnM5{D*hetznEV))YvhChi91+tOBYL90f6)_TaBzL#^PUbR>1ay zb%{A*vR6v%@7#;rA8ux{D=Js1uhiz8Z=8iSoW85v0nz@=aOh>aKd(q_JzjtiTpNSRwB*LBz!opSQ!L~*CwK3&(>sQfN$%2pCj`#n>YHmu{MNc4k$ zP0;t4{1Cf^_FthMwqR?(`R%N?h>8gXoR~_d*>+B#Sh~ApU`knC3Auj? zqXj9+suuoH%)jrCdCbFc$YVeKgH_Vrm@rk_BEP5mv(o1i0PzC>oyX13N{%V49g z(JU=xf~u3z=AflpMg`_q$dA#q#&_ipWRplZN2)olQ~H0I4|JiKs0vpZ@{}m~p2&MT zyWP_7huFD~MZvrS`eD`yk-o<8aKUfnr>|8_h-uxQi9zmb4bitF)M5L92i@Hiyg2~& zlj%{|?0y%@&73Yq*6~uLKIR|mb>TkpM^2MBY<*PCz z?p-ts)ZuQlR(gHk@lzu*Xl;>vS-o$Z&(1NSJa_vxjjivtip*OC6;p?lN_^50>^5jK z7D{ru($$fT*#EUwmmmbbOQ=0xoZ;wf;GUyxq0Jc1#j>>9KQwG|ygk&CRJk=3;BW{N z-_U1s&NAjlgkt7rv0X7}KO8@*#kZOYrp{5gH8=cq#!RpJ#8AMHT3Pq76x-iJr6;$_Lt^E(Si>B`z^*p+9U6w5+6EGL;ddcV|+so>k;0ERfrf z^N!ivXx$h9Zq+~SBc=`ur14fW*w6KA@0vusUndwPG_88F5ZE9s^o=3Mb8vlaIX+c4 z3$Q$)od%Z}_{Lh_-4dB2!+Z3;>IQjd`W%SY^1S56r5CYvppQ*)F#mUmMdvzcdU*(k z#p}zTH(s8mM9wh?hn7cMS)RLxf-!m3^KY-EPN4rUD~l?8o<6+B{q#*v=z$6f<4qS`7T`%L)}dOWqWB3QdVp=AQ~9 zkB+W}M*zxBSPF< z%_(zm8|C}J+azE;Yy@I8rBc}nbQjQN8|&;o>+0GL6%XbVz9`qceoy(+JaDpCiwjk6 z%JM68`DJNu;f0rdBkQn7WHf8%sZIE`wX7Z`2jPME7UJ7XJ+M`8Ag_N5RKMKgP%`U-xso{eWcd}!ksM01!H$bAvfH9L6G`=lP<}K|xDU z_PDomgrPqswUVlTD-te_+?YG_e#v`ORR42ST~^7>Pz$_!TPO2L6wsa?&p;MdVmYBl z!hLUsc>V5=IeJbp6A_HV;_OFl50I8mtDxKccVxMhe(kR-?ODaipAY;#KoW*fn;{_l zt}8`6!6aZNS28&t!~6P^C4XzG?tAu3llK(KgwXRBmpNzWHLF@{Nv0=(Q+>j#qmu^c zRgmA_YMlwQ%D|YdKCaX8k^Q*6P)JerN!+=f+7_`09>S%#sK0O3rIR;$T+Ix1V(nw( z-H$%mp3?Mq=wpMf;W27^S6P;OI=~p6AV%2vs`Z^gd@oJ2BgmYw?PZV+T`VC*c;L}; ziM{-I5BmyZnHso@UO9N`nGI2R6>q1RTVGjSuf$WPIeRB(*4x`=D7d^%h+|<89(`%I z$~W2d*otmZk1eJ2&hdDH+BI>_!)a`MKRRlPlcb%lP;6Amuz0nN5N+dxtq zmma1T50a|kP+wld@VG{`^Dei%Cg%hlqGODfRo2;@AOPj!$m*J0{5P!KUB~@kFQa_S zS?>AvK_z6B%(`>B{F~y`OsuV7WZw8^_Z-z}vUI0$`>mc&x5EaRuZV{w^NXeCbsE@L z9qUID8-1E40XfC3HS9fa;wdIcv(ctYD>8)mNBT0wTlZb`inSE_MqkypS?MPf>N)s~ z7o2|8iW0Hy#M$jEK(S${dp$9}hji*Z_Rw42^Nl}7&wt{L4%AJVQ51W-Pq=H&z8>0N zmyrj{Uqsw=Q#M)WTH#kix_vb%UVK6hFQeUP8q>vXrPS%HFl?)|r9Bh@jma_w(uB9vOX5s=#ZkZVVr$3TxJOM@wbMdfkdu9AN=m(nJuG7uJ`dN=Tv zXskakL7|&-g`EGL!kChBB$x6l-u4(4KV-DZb(8OU4m@e{Vl6hxUIS5u7ltoJB>X%e zw`_wHc_5K-7Jz2B;ZpU0@iL)l+yB}!^iM708nQ@*!--N6F!`Ac0fIj%;;7D8Aam5V zQH+W)2-+TdHq)$D9!0NA4G|K2ZS3&GjGqjMxOQ0nhFm|nN}VXjUUCSfVDd2dJ2H_h z(NCqW)2&@S=Y~_nCO6Mp8b9srv>V$KPd~LxOf`G)G*B^}?2+<$8S7}`wKY;bkm7bo zsgO&l8WElg$KS<N4JRymNEsBN<+pWQLUzd zT`S1{W&w;jO8G7(KjlXm=7~OR&1-0U+mi^u8NaSdwgKMELUJ7E%%`+<8{XoV4y#Ku z$Mp;P-vf86yK{+I=>o;KXYx%Dzv8qAnkJhsWI;k$QN>Bj++Ir;g@n*3{Ws+IlUFkJ zGPJ2%w1><_^>(5i+jd3YMOD9Kd*?w}#(X;jTzygQ!I}bNdx%fKNP+q-z11X9?hj(r z-D$6Wnu+w-IEb?&nFEh1H(lnSzBNnjaU~-P9B8 zn_b7tFVsT+4zl3 z6ypw}Y$SLj?$VI~LU&8{8{IE!jKke~^BIknP=A9&dM+xG&71p0TyK!dYY_>8@9TF< zPWo{9^yhF>7c-7hFCA5$c1<@i+R^#r{JmQxK@R6;UhUq<14fz z`e~wOkb3=ED_;n$Ju@ZP2pOF@c|>6PS%|bt{|>D1XU+)#<4 zfuO7U1phZsd|eW#VORg!Gm}X#00;A_(FnRO&EF&AdtFq%lYXT;%j0?eAkoof9{mG?3XH#^f5T4VnYu<)O;X*=arEuS>V6fHn*{$kaXx=z$?t9OIRapf451*=e zu_N%8t?8idm2omZ{B_28-Ms#$YdZLB(dEmxPz$fIMU)-PG6+|`(}k^*6%{zOp8vjr z{>7;iVudxQnF9-7oq}(jUp&6(%12(5$U0|VX*pB<7dspCBQ)m6f0_*;1U$kbKy`pk z8V_Wj=d;4 zGrp!lD}TjtO=N5azFz%=$V??JNK;CHdce{HT~fWbVWexwWN%KMl(SxKlazX{l{kJI zlIm>Q;h3?(=xa;R>wuqSa`{k?6kl>jE-~CF##b|A_GdqTQ)Wvb&>wD6z;FyKBy~hX zK*E5uK2AjiftTb@#vEaG=8Aml(hxoQ@2ex!%3QkCxZn2bT9hok0ZZ(9ERJzeLW&Ps zn#qh(pN9fkdyR^C1>zcJptoxz!-`a_OgdDr;Cp>d za-I42Ddi9NKk~sUB6MHd6D4UlTW?!!-%CvK`KZ|aV43z)KqB^O8Yw_a1xAN|Wj)dfrnTb}6+f@1l;9@3<@K z;sTgE>s24OB@atGTOW1zslExb`KV%g$Qo0n1gKf7maHk4OR7E2K}lFNJ;&#X+Q|#K zM%hk&Z2L^RvKhbi=j~%;O1`*vT43*fHTFR%?QoXv(AAV;a)XyX_j>-kExmq{R~={4 zEzTqBhF*WXoz)NVT|FVM=|+?m!fbc6+suP6YmfM#dm3SC5D_4?@2$yQCwTtu=DS=z zN4EiF-BGsDN|tZG#Etyr@C zybZ4J2HARgST$?SzHVepPQ|#TPVi>opSRA zIb!bin02#qE_8D!A`M}^xtj}{feY2mQ_tfk+W(oHWge5ZdjDqd?4yx?ys*r#eK{p@ z1#X^pYw7y>wxrv{vWep;Q@Vx_vP^z3*Sr`0OJSzxSkvl7y#Kkjh?uM9VcSW16G>Qp z7cWB+L->%A5A?c~{Kq?OFJ!vd4J78#)Y|RkzAn1_Q&AORB`%@SqiRwmc9adke>aWziX`m;`+_H0LKX9j>PB6DnS1aw(ZR)^Y3hPws$1M88s_mmJ;_XDhdQW-}zB+?A;I%kyyz?F&xy0 z+QtPS6C`J4Jw?Ey_E(6EvD}_u+Q;o(Et2e_?+ASaFxg@JnU&M_k{uMUZv2UrSuN1w zguCB6gJI|!-Bo*;4RWJ8G;JU}SC&!R+KeIbgnGA}@H6L_S<|X}8+*;PHX!jHB6GM{ zShRWmpwC!a8eOweu2J4SlmNT^d*5Hf-?>oochH05E@U1?RLHv?u}pDO z$6}S=6%V$0d%dL(=?)UnA%A>4A9{72V3~h~#|F6SAeqL%XWLDtcSIeee=JK1^>{U@ zQh}L`+Exl!KNwX|{zt>Dfx1VniQX^c_|7 zeT@CO!UNi3NotN7eok;orMi5JVM;jph^5@~{t@k{HnG3n;dr*5F#+XX{ip|va6fZ( z{VhdONXtc1ukz~^QAiVcfhDPsvMR{PE2C_6A>K@xhkqU;CI?L6j7X^jnY{aWb-K+a zhK;yEJ0D#1Al0G`$+@Xt8vQp~mWW3(eW%;%bK^MW;}niP;+sGtvI4=q{saxdrJ%^& zDMd}$rWHJ*qGCi%IZ{X2w(O18y)kY+hf-dO*{t}u*L6(cX3@Eq{2ObtDLFScmeZ~^ z!17;_lOX=^Xe#cr$#` z47g@0^s)*lQKGXeAwe2_3IkTyd*T>E-Zob%c)QwJP{`=E9Yd<07 zB%>$%RG{l+-*CQ}gHnk7tr&3nxaY21pcTExI|j`aVc+XepUjr2&on3I1#D_QPWs!= z{K`Kyt)4+vOPVw{i8x0{W;Ad)9qLa$7c*X8^VHX!v%TY%)t3;{nPJepnoA}54Yi2? zn-m4Gz^nK9EAQ&7NNlUPDV?VNoZS;v)PZOrG`Bl3ijzY4o}P`l1EamuM{R;UUoBb6 zet%5}KAu7)rE(#8l6}9Qt_dd{v+bsr6bb)&Jo`@rqC#PzV7iTty>2J5nO^Q^pcDnY-ho_=26lNpbv#L^7u_7XY{Jm0PA#j(|w< zTFGeERas4L>SL6G14R;~V6xdvjTrBwAG0CBTgil9Nqh|BULc-D^#sZ@!>qb^P3vOq zt)s@Z?wMmCV>LM;u0b`yqVR9INVMpXm>0W9M&m7NG{W!ELanGb+OBR}3|%e3^3V4P zr0J??ohbW0{X~zl79@1w#EB*0Jpl<6yz^F~@9_~l$2Tdjet;Vnfub0`emBSgO)WR? z!ai6MLfBkhNVrpeF^_FU2kn_LHQe@kvN@!4SP{CnS)JpX@Ct&nLf1Pj-blm4SUg>d zn9T`f6716fD?WQ;v!a>PBQe+n=|Paguf2;!TcHLnyC2+Bj4WS!{nN8864oQi*Fv1& zXH%x$GNB+kgou^@CMVisZ&zy1HA{6N@_JcUZ%5YU(Qx&-A>h*AZtnAe*CrdR`{wV$ zi-wDWmv{b^;w;9gS(iuI7z>=coR92WPQ9%QIl=zMFzx?&LHX_fXNg8?(y=S{7S7@y6p^{Z@FF8FHCm3V=;XX3% zoa?P(CMPd)6vL_hY6mik98}lEyWTUB)!B?z2#shwOIrs-t)LdsivTRKT4h*ZJR5KTHw#;8^c4 zJNjadZwIV_@S*D<@IJBHT#Lp&AC)Jvsm=yj1#=`y0HX*7az^#%S!>8zxIUMl?F(su zy}i5iYWO2}yNArcd_O2efK~J9qm`4$;>kn2>MU0pZG_p`63%o>5fAZ@vr^ac_5Ik) zZwJ@3bI7-GblYbldsz=ve1}@P=f9=aNyp=G_(a8>bIe#()_)SN+vU1oQ(?Jc`cWo0Ue?xLNaBla(o@k8}$CP>BY|zYqo68MtHAWv1=JY-V;_F^P(uLIH$i zQL~+=+7UaS?I!n*0mIY%QFFeWlgx2~H$~E&ce3j+`Kb?crz?F)y1qC2Rr`AU_|LZ_ zOXVZ8u0@noc~1T?sTv1AizE>X-HmU;jzj#}Xyrr91mXs|RySa-`GeqRXVm3c&i7O8 zUFoxV;xP*XR4on)G*7t0Aklvp8?htjn=J!k$r*swb&TKryecef1rO(3x0lb2ooDF7u0cCEJmPA9>8_O0lQ|gyVojbosq5?%td=OB2x;>1`cuK^zsD5tY%_ zy|xV5yL0mW)&l}nSiJmGJjNKj&j5*^Ggsj_SGr`psbP6;jE=;{?r6MD&54cEJU5rP znOMqx&(3xpaRE3Bk(`YzcNQG1f3b}azMq^1yApIRhj!@b0@Y{xt?pAlLM8X^ ztmN&jkrgerkJR7nyn7q^g~DK(DsBJl;Mwd=p->UdY|-mk_C@fltVc3xD+Kzu4Ft;h zKC_By!Sv?*ebg-wtRoU}1b>Oi4R#^8uqisvn)hg0eN&#>M)Ye)Y!ZN)O%T$!0eEu) z`PF3hBdfecjJ$S7vPp`y_E0uhG_>&_4F9PI#q3@ZR$NkvQ10IPwm4u2`zKPWdh9zo zzT@lgjzmG6KF0m@eM?SKu9ac8%z(IMQq^m&&r-Q?_#drVYoDYMY-x77g+*;c-He)) zL%P&Ui*8uHlWd}*BXL5FAk{(33~r^RE6I|h(ZtD$YUVJiIEro|Ras-!jqlFb__#$; zE&}28_liYILJO4yE)JRx+iC9N-pSq*tyM)l6J_?x;lo`nebW(*>4H<1cgYdzXeMX9 zk`rlkfW9^4owc(n2YY8{sxhl_K_Y5WoLRcRiBOT`Pi2o9LymQrY5mmA?KzQ;?^PM!oYk* z`0zk%uqMlN8(T&o6ezblUNY+w8d|F*ebK518Hu`GO;{{<<+pDIt=AF!I}Sik==LI` z*h_nJ!knhvrJNkvMc;GtI`>y3O0n+0^FRJGT-$mO4ilh|Is1f)3*p)NZsA`k{y*_K z*AKi!N!a3Pw`31rjgRG{*);f8keo-8^R# z%O$DSqapoA`=baU0m|8vRaq4Qj;a&k2GpeG%5iLMwbb`1KX(dO$eZGfFw6k~pJ<>Q zXoLl~Ih%m{+Z&`o!fdKvv0Hy8t!%lrVq&8syt!QbwMbi1HZhTIaa+|w2;eI{D45kq z-MN<+bSqGwm0`rB{dv99^&LP3llhBK-wmI>OMV;Vkg_}Agz8DJ)|7K=;(ud0^yjqm z5MK0Ek$vb6NxjojiJ(-*rdL702(Y}z`14J~v&|&A?J3r{ct-!fk207f}rcHpmJjIj%EgRF$8K*zI_MxA12P`jv>3!xk@%iHWVJq)I zO$pLg$mg0dbkbm@Pc;7viFrL5szNoc-A8C9OQhyp_*q z_)bj7mh5IiT+*HliYKg%f^O` zg?`jQ;jv2dc)ujq4Iq2Nac!^`^dqI1>zfU+l|GDz82uFzMJ)z(nW7jqo9ROt)O(Lr zUEuCqfPG!3PA*dfliVg0X!d&o3pV?$&WaYjx4^qnHW)U0W@NJdSmu{LJt$T=+X7 zya|ao)??Q1bJ~UTDAm@loPlr2%k~(3EgQelSEEA`i!?UdArJ z`dYU>Iq~vf&*V$Vqjg=^WY+!Q!gwi>5(E3^tARAn<8P-$RZFJXbN6f4j2xM5G}hLN z%Q0kd%Gk(FX1MNLw2m}Se81cXou54lyO{iruX!cnzx#{o?4)9c$B$8l+2Fkp{YLMk z6NmwqYJ6%AjQdhvx8CcMm9=(~A@oLDtKi9F!N$c|FvSeEUOH27nkPom27BZAR{g&- zISO48wk+Yz)55&vgN4Aecbxel#5G$~X%N+@jd<<9o0b2w9}G1l@!0$>ZW%OhMRj`I z)`I^kW&4A(?PZ$I$=VP3tMg+F4-{BGceq3NR{JBaC@I+J?_SMHMoaMaxdyapv)xaY z^*x=^eS*Uz50XUGd#o>f8JN;sU35kS+z0z89PxmFc&;`L^yBCQRsqKD zt8j9Di_11FB`1_q+Dm*$FNu0Z5||~EX(*=j22uzd*j8p?rvAlhy7en26qpFmmGJ~d zKh{!qUIk`>q-cIbS%PR%a{Lm&xoP>_;HDNHTa~f7QMj^w-5d^XcCpT`=u)u)7;4ox zc6tbL&njZ6H^`sb425ZP)BrQwy!gr9vqaEKfK*bpSE)=Xl|Qh4eAq%$4hY2yd;ezX znD^M5l};Ek|Lfd;`3BEXNc>F7YqZ@TZwqi~$Y%b?OQd^T8MfzS-m~u9U%~H?w3TV} ziLA2PF-wG=FjWRez2JXfx>k7cN*HDd;fKUS9(Z#S$x4T0%q*^3{Ws`t-QwRHw{CE0$2fhEC^!#(gqpE)EsDC{xW7-?~_0>)?qLD$glU?5W5K% zmuO^U_pdCWLfYbdVtBwX^QEkj?AUe0(@B`)m&kw5^rk%^fA9AWX2FtxRy3j}v%!X* zb?-BYE!SSXapY&0Zo@o%Jd`CCN4`Re1B*0bbqXJWGTCXg;bew&sl9MQw=IrOq)AFQ z+7VrzD2%(1HF{pDl&+}d%f57({U#wVAiHGXE;k`ZGt89f={$F9hECkbiYkc)1{x)9 z>x}-0)v4ffydO*3e{8X>lnrqbVAlE2b7cE_2uOz67m$#E1|k{F9l$5=CNV#tR!Q_n zm(xv8%K(0Cm7R0}0SgT)8oYYIPkm1C20V$II2*I~03mQr1Yt^Ve3{a_o)K(thbtqs zX_$)$_M^#ctcU;RrZ&hMFu!MHJZrV~TkVlVGu#sYWaW4`hkhd!<<~2axxC(N_c)PU ztw(T@PG`mh%nfyZF>IH>qw19d>$6#*C6Y*Mc*r6lC9R)c&@0;Bqnv^y$-nd%2IAlC z?&Y>(PKhw-A{LB!u9-ATFus2kb#*esl5PiG5%YOzKEm$)PQ)=V29ewk!^0#5SQ}^B zS04vn{|AOqA$6^E;rvQ!44X?X_{ zQKTrXET&K)MO@Y_xlcz7dc!WEjd&|7uO$~UanLaSmVP8P=e z>M*2zQr2pIFwuqhxKsBs1FW&UdZc0(K!Ve?QztOXr?^h-uQKs6`-if5jSQU14(cRY z1bOqf{`Ea7*CXW{d5?6l}!CB>`uEE|M6$4S}^<*tm@yXX(i z`6vz*{9&&ApBxDahyT>uiY!;bo&VMXcn*G++TJ%#_sOvE;U0?LX(4JV2bvGp{d=DO zkL}nB7|#60Gj6ir$T$*u(U=v?Plc-8H+CbGUSaPrnA5PIVS910^>ZMnLLUznU(9<| zE{E7hwzP3sz5E}a-z!DjS9t<{iu{g|e_9Ew<~RbD0c4{diwcz|yK7bh;Zn9g*N_`f z{}bYi_i>9PUh%K_U_q~YA9HN$?|%>lDl*anIb9~p2IQ~hcFxjeU4)p8^_@*cBM2Vx z&cXL00nL$}c$29#c~-Z4VQ0>mY;bLu9s52P#@w*bc@v#X&O|O5vL2hF)vT_JV1_?u z1S4z`rPLlwcgIJ)o)S;2uCs1e!wCP%&AtQN@KJ}^+3^s8x=(}Gq!h-$whVE}*l)K{ z1=YI9L8Z7unqm@VfX^W_#CyTXdc8Pk!k3&M;3s8juE2kEU`%8z{lOF8;AT-ER<$OA zny-(E98z_wWk1oOz>C4(z*st?cRJ+BJOViW_+;&h_-vTOm8Nu%0uNqaDh@@ymKagd z0Vxl7(WR9!UgPe;Pz3vGjDWfW^hT|BaDUVr{yUoj#P)d;ezkxFg7|dp#^mg-Pn2Xu z3$-#MQ;^YPVI8O^6^n1g$4f%|i<^VCu>`9t>jE)y1$AEp*Q*+_sSk=qbCI!Y;Uz=C z;6?$H07_&54BIY=au{LIWeuA0xEc3bi^+^ZKS6gz!lxrlU^^Qj8yb6|?v5~fOKb|` zO(EzrP_WwBiICfD_!yaGumE8-D{BnxF z{pmyJDx8_U462{hmGffF>ROC&M3o<$+|iTKd$HTbQoj>@Kj@8P{TBlhOnUkH5lMvr z#VR@zTI}BH&;lBly?r*4LmPH6uaCPEw(BI~tL#ZJ^A$?>QVa-m%<6yhHur zQPsf-iO{1{S?ois=ANjSD2=H~aaRGu;uI|&KiZQ%>X*S7dvo8qza<-XRLSdOfKJUu zN-1l7?~Q`bbf3*32~i3X?EQQzOy1NY=j(#($l^HmI5D`c0U@>aGzY!3l|0s9bY33z z0k{+Ab>Yqr<&$7oi_3|V!6bW&1|suI{GZWlp~p8GbyZqb^Oa_~c4)h5GE!o*)mr&1 zqsa&~BFPPG^Ad&d$F-uV=6dIk+lH5yyFXMrR!LySyg|1LiPI$y*52rNcx@;j?7>F9 z_$aAEaE`~oO>`&IsT$bh%{m(utUqtdhV2To z=1UGQW<-SqCohr~uBKM)x=7zq%rbvo$ioo+_$s9caYV-a?P2laOqpyKmyfmd=S1mM zdA@?Zn~iVc|0IlD1ZyX~e=&7rNCkPoVzqWpg2;5)dX)|jnjFi2VJHTGf5!lT0br!|DITk~wvk-&=GUOcb4H zIWA>MTGT+T-nZOcH6t3%!b<;P@J$FdVH*zJTW8|T9Agg}s$ZKhXCz<1OMV)Vgm#Xi z_19$swk@P??%nb6F_oxEEW4$E`YlPgCV|R7;LEe#l78NpZ-QpgY^;Q?UHf3qafy*W#H%ly+_oY1%m1p# zF8`^>Lx4)lP`mt~wP=yhu?8uZHiq6*Q$);ud#>02axp!))%M`lOgZT6x&X^kp?r3$ zCKiA1KJKjktt}(qanV7&%&vW<#uA7}M%%+)LV8o;o>l`nI2|qeR%BRii-04ZlTNKD z;W#@&EFB&(bk?R%u^YuH%SZ#K7?YTzrYLA*6}mGmTA7W!)YCI9Mkl` zbyPTHkQM4K;79hnolna7+~q30>>sYsjDq;h{XiQdpN9D|jcaJ$$G#h@3xk`Z6luXk zWIl*r?>t@UQ57=}C$(tGt0)CHO=3u3`t67;>$}LU+CDA&uRjdUl(vfi$ltsT9oT>i zWo>35QQF4RkRz}yvKVSdMN>x-0@vCyeXgLLYlc#d0UKOUc`o|{@f_*H)S~wZRdOF+ z(xO+R`yMB=i175hgi3nEoBUI5^sIKTX;HfT@XLD6;jtoHM|glSr{;=ulrWgl58yUl zF~ne&kgPE=7o6|qa61jZsFjr~?N@*(M zqd7QNuoAA(T*%KV(rSX}7#NFH1xC3T>AdA>saem6H=0-exCiY0ksajYaP#BlODl^R zaYqK!E1_n+mh6x9C)QTdnfCVI$V&_9?^*vbogi(|2-+z7BarN|L<)FAB0kA#pYYeB%ABsK_ zcy4O_%E9o41R>-8S%#Cpen{bQ;)1~Q_JwSL4@~S~`xZCZFZMDjRN8g;d9SaJkgMHi z4oL zT%=W_#Z)fAL}}Zp7OwI1F%7T__y<4gp#sCGsPNf}5{efz>1PrRH*JgJ8-9+4;f%dX z8|0D$8T1Eb3|$9W(oDL6nNXh33=9KScs4g+Wc&pt1ZOH;bevh8_)JAn;w9XoHXa!? zx5mSn%{ar&)Sqw}qTQ00fx&#Ub!yC$9o+F&$QTAx?(Ob^($CFIQR@dl zUb=3rRd%LpLpGog|Ei}0X2@}5f!6WY`nc8?tz&(Aq5eHjTaciI<=V*6ePB`0P;@Xo ztlJ+jgylJcJbxk?8k^XNOPl7)SnV~ECl++E`Mpsajys7pOOof?9wa4uY(eyc1?AF7 zukR&Gi#iS$vR+-(I(7dMmlFxrq>rI=?W_jf|2G)Nx(~A35N)Fz$~D~n|MagscfvQ` zcqB=lcZ%RpSBc5JwT{#lHrQweOGfsKKd|KqvoME%VGH>=wo?dIe9WEv!9 zMu*qsl|JK%^0pa=0u^LJ4u zYutxOCrWrdHY%W2Jsb{U^x^csP=?l!FXh8{u9I~F1Gyl#rqQ?h3}K`Vld6m1)-W#G zKLM;vrPLnGlkYDWryVhE>X?VDUE@}v5??7iDe@L5&8X1H0(r#_g<*Ab5I0#WI# zJIm!XeN>(>ZM>ixwgX#fDzi8BQsk3C#k4`c#YN|a4UD6*JnG@Cv1sRS!q&PR46ay8 z&7Hq95fnt`ryGv|Q6(a@dmN?Tlb#$txWzE~l0^`qyo=*=Q+(C+Pcp?QDa(|zV9KBE z!Ga|~Kx@?NAL|b4FUUEAX`Kc_TW{K3H0YUkhc@pTsty+5d)kwS=&gLJSXYt;w64Us zL#>i}yb>1iFOU@IXDu=L+&`4~n|qZKRE2SpP*cDNl!PCfP54-ZW!#>%t_iwh3DcNU?zf~Z8TQ<4hXuLOsLU<=j zKR*Xi#Z2Czy^!Gs3G$NLf4k}TLPaELc6_Um=c8Wfh!UNdfKdbNj)+t?q^hKy zfKS`>PZkMEid)&&Isfbl-D&Q^#!(E*>oRgWTL$;r-mvO-8ptpGl_Me0O8i!9+hMu4F*;3cuoRVzq=|flam5)| z(Y^i%=v5>65Zc>yjn4f|BC5i;O*gWFT*iqT!ZEQpe!_=c&J-8a)ZVf_z;-a4?O>&y;Xcc!a3bXselWVzSof;0*CCC z$mHE&Gtu|CAHjTbLksQ!@WP~cmx0Sn4Sj5vBapIiYnAhDpHn0?JellO3wPlV?;Oqs zHjW-v%Ndfvix6c|BkLwOJ?ZeF=ru6KZ01WaY$Op@fHw~UrJ%QH@_WCXFm0C67u>9iYQKKTYl~yWg`?$e>Haf zbn=AE)@2R55x?kt33$;hnVfc{-@JO~t~X`isgHlvhOVTL^cMr8`u6*;L;|Pbe*#w2|FgDMQ`1Hu{Q|`!e zXD*b)Y>Bt7$b`2|KT-L~vk-&K4_-Y!ZQ5Zv>QoX;lrcTL@_ZLGGnfSLuU+kyKi`r)~H5=9I*0{(2rle^pE^gBw_4GTn`Tcwqd)j%d%3yK7 zxbV%(S?`T6glJDo7O}$@EUprUqRhJLB$2O`Rp>l{Q5-PfMKsBl9|PSdj`6stlr436 zm6t{bHByVzJ*F&uMUTD40nd&g)my>iS)nxPNnMb1n{w6L z_u}hio|j?2uC^Wd6s?5XfUMi^eaTz}B zQ7mDGtbprA@!=Rkg-z$TIvtpa4~*2kZn4?*54fy$G4elY|KW77gVU7148*XOmjMv% zWJ&PgWi236I52fGB1PlwrfTPlkFPptm)t|KcEB5cukd!KzO(RX&YiW+fz%wsbo)x+ z_KGuNyN`bSl{?)p*7Q00OMwc-cT+=61pAWicgWcld!6Mrk(od$fmBC2uI(5V+c zN_VBdB|9ZKk|!zjS*w7p80DkL^3~chZUZAneAqTfX<^IOm@mc0_s`j!Zgi9s@osMv zLViV$NR>50H%(X4R|{7`wAzuz=aS{XTgHW4_V|t^lV-C zHNMS7^OyotQ)9M|1F0-sYIg(UrKFP6?wbu!dN?%&!!=j z%l@la*X_})+3wGrhk?(MbnHt)8dK0KUVE!KsKML&Tb_Z4e2vQzN@!tTE?zxYSQ6xr z?50g*!O-&kL+H0O899EC$`zK%tEV_Fs z6_>-sD=`wg0UmfcLD`5Ay2gy*Cn3I;cmmM_C&dh}0q3_Adx39jcMS&p;u zI3{rA)f-KqBrI|ymj&`-f}P5^u!hjzs7X}NSgq~$x!m#x&rtDg%L%#-Nw)-L>K7)3 z-~9e7QAgo=EVa82*~MN9&Blq$w*P@lbr$wwzM?JSuv^)`t``bd=U>_W2VC75KoLk1 zy0=jkw^AO2C;(vu5~x=;ninoSg8wM9$LD!Ainj*OI}kQt{Jb(HK*uN{cv}sfugXJH zw}ZKmZvwTVqXBr1r5II`iy3!B%V9n5^XoEa%a(H;^8wMP zS%zzV1e?)(=(DM+r5k<8P@;O<#O3!bOs2p$E2Z*^8a4jc2s_HN4G;gST584kbCpLk zzGajs)%i{ZdTvay7ufWm7fMn-@S^7fZJL+Ni(97gvJ-U$)zfcd&&Xm~VqHdWdznSq zCcT4eV6u31ipt@`8>`p&VD?+3HTrdJKg7_qvMKei4kUlcxvTcjj&uA$u0dUg`fQ*} z&`y3I#1J>Ge~t(+Mo7a=K7J`;9}tP2yA>ZAoRpo6dJ(1GK}}?w@M~$_OgfU@OO;6% zh~RJ)i0AMVpjh$B!^8k=eDwT^M@mrR?0OF_RgmXB$9pA8B+?a2+B7T94@`gxrmq`5 z7u$Mrb}A)E*R*$39c<0lG{12#ptf7v0QK-cn02l{$&4?W4CY4hKwW1YV9?p#iN?3h zP`NaK4}t~czinZ7A%LRxvDX4wq!6KNQ^Hq)1ei~|8e_KdNNThc`9sjP5sW$X<;{*t z?~%rl_FrNfO!d}y+gj<-V!cAd6|ILU3HohMOa+UR`8k0m z>PiBUQbt0&bM$$k^E39-^lVdQQ%6*jf(z9qG&bX;rafzGN>@h(cAU8o4dSc%Qwk;I zD)aQhwB^L-_diQ^&kep=Mq8LO)_w2Zstw!NL+g|?N~9~lbu|00IqRkZiIKZGT`ww> zXhcqx8>owy56`If~~b zyJqt~=A~O3xQ&Uzncd=!5P)GE%!|FhtSfpy3yd;(rj-Nh9i+re+lsm)+K2)27j!cL zIu!h*`W?NWffqnhU3+g)+DWO!I6^r-d9ecw4go$-e0f|@oWlQ`At_fR7C-D zMUyHY!WxtPy1zzQdJk7vKg_!BB;8l~Y|XMS&(XO@ z@p%V#c$i)BdJ?Ae@8|k*b+REx@R>O$EmKpI`BcNA72e(Svp=X|Zx)=~EiRwU;)Yq1 zwyRgH@*R{dD|S%ScyoeKGR|JU<<-sAJUHMQAZM}1dxdA{fy_(DpVBS_$wli;rw8>1 zu9R0lm(%!_4zry$=k~~gb!J|8TT6c-49FNv3=9lO2+e)_a7iiF`Y?I5i%1^|`Qb#m zFkq*2?a0)5ou`O*uH{h~V$?sOl1?QIT1^ds0zsR2=AI#*1Ug3*EfVzxisjy98iz)m zdGFq$ag-csql?ZZPC8O1=)Ce-;CYQ9keS8hZ{O$~Z{6zC(5yV%v0$@7z#xQU#VK~= zLCHP^pLwL$AaDv2CaD8mo{zyZPdNdGCYrix-f_kw2_2oNOSP0vL3tCmxiEANXIZdW?|ib6?e; zchYDvDJ2P$KK_gYB7v!iCU=(NZ>Xp>^#UpSY?_O=K)#%$JKKI~tJk34Z)VM`SoMY3 zll4ok+uxEIVW+e!&g?k}?JC>!5!XL@fTItIiBjKGoTXbX>A^$olp6lId*i(;Qi1t* zui-7=W4N0?O7(6?Eql20=wb{hFPSDLm7E*86UtQ^^evIU-Z}!#^F~n(qClq2|AHf7 zt3eG%H&v3Win(pJqiOgRouM*s&cI4jyn53se%7(A``)#ok=N#Ws|R)-t{p)mg$Jbe zoB=X}!)=X9O|-u_-VO8Net*zpdkRRNHPm?&DW#3H5}cuTR2vG*&Dsc#f4qshq8$u`>;_AoI?A=aysb;1@{ryc07)Dp5F_OlDdd^9!V zN>7axYK0@LW5c6wH{_Crk_e5s8Yw=a=P*$D)A#1+#n+$3sBJOoAni&MyodJ!S&uu# zITXcOSN^=WSRmBV@b1WD^pO$!o9**AZ3NoZre8(~=AP^Pq$?p4DQW2Sif#FuXMq0s zncQ~~=tQq#^Q-P^^6l0qm#3-J6sK2}WL+`)VOPTq|8-;;Zvo7`cg6_?&4Nix%b$A& z8Cs>NvRkcV&3xN$l49_W^_P8|@>!`9a*b`DjeB%Q+lrq=G*~5Y$IyuZd*K4zGv2I$ zDN-Y@DlEhfy@CMS!~=kf#=*yNalhZ0Cgy@Xjt^OY`apAg!F!C%QkYI$O(1Tr-|#J` zRjo*@f@aGjwfizzyaOS?qUJJe)^(o!bR z1)%;{6TI4UF`QoC24(weZ61$gHfNYS>VKilj9?9nY0>)OkY>Dkf0IG;jO=zs9`+GW zy4izr?9?oxV`cQygBfhD6#>X>q;V-*hb#3w;X=Jd=Qq23H8tFW_@DyICkOAo|7$q5 zHZ75rX~o3w-tgWu7Q$d(AOGwXSUrujzk4yM`PF^yGrX*z)WDZvGz%}^FsoSAsr{jV zlU@DV+mz~8RJc4_=i$3(cpb$`MnYU zTH=Es*_qu%I*EiS1xFv+1&3s(_JF+zIutjD&n!4h6WUwrnZ~iebC}jlgtSsg=6BRF z6Rb5iqjgd@YEju~?6sO#+4}0(|JDMm@-#lQON%|CE=Ling0m{h>^NwneM7DAFLUFlglUYNYTJiN<%l9<8XMFN8qLV^Snr@O%J{6TK60f%Am=swG(drb7qX z4bA#2aa3GS6>jLXzolzCB^_-(eE+NSSa%s$W*oLh_H_a}{qG{77!>|3WCNEiRQRMM zElidGxq4sSNY+(2ed7G(Ul+!IF@7ikG%??=cAzx%v)(@W2Xh*j zLAryeQ~cFi@4LGE1}jQi+CO`^5S%He9*XC)onAXYl1PIKv!nmqeqZto&#S~j7B_ar zsW_6!%Pq=HQblfRnsoi4+jCW&>Gefak5!)O^M9_=8+WyR_ym~%MfE=Dqzdo*0e-Sx z9sssndTX!x6OsU&^y^+7&2v+#m_~2)DTZ*UB=&2+urKMRKyV8an_0J~jRoRL`o4qs zQWkfuAz$G^N)O{sigMt~^%v(TrU|+;ryT|;O?UPvkTi3og z=jgZGyknI|G%fJgd(P&epMQYuXhba z>TZEdEjfjr8i@T#`rB8e2J8U&9lw_Le(i3du$y}}Wuw~}4PU+O_h9eK_vU4##SOr- zkz}V>DRLXCvl<%V59Q#WaV^IomYb4}N^SXJ<3Ftv4eII4WloRuKYu zb57g9X;X{AT6;?X<#1adz6fMR#2Qrv6(&-)p3mQkB6NrBZa3h(| z6NVQik0!1gq|-6MeA$IiSoKC$)p4sZI|xwO&~^&%zMI|9>z}-ndjBfW2>T+*42_+z znh_V2GHKzXUs5wT3z@e~4bsccMm)Uc26n40?^!rk{PnCBmdFNQ2>_0vc~UOkErdDt zxUBD|i6S9(o(b3%SN4a2lkuo-&w`7lFQGCokNX;M<7=2~;9Mh2r|Zh;3*vt?358O( z5Fmm0xU|n0Cmng2O#WZ+S_I0@msUyxa5GJa!iqk(G@9MEieVT?wPiJ?*ya1C3pu| z>!MGgMFKg@fY4~ft>C3`GI@S@9oy$%FdFhcmRAkunbjI+i07^*=&(F?6d3;un>+=gr?TxKeKts0h8MdA4#<86}$SQFdOqg2({KgD<0~!&sN*=h>Vt8}+UX-{SSi_D_gyTKM?8>lr*Zosq z3lk}4riQ^Zr^gG8bcxoUF`T1&m-@gnxLxb3yQ?Uj{(|A#XD2T_JUD4ycf6<+itU4N zYHE+NqWe7YR>GaSyReYzSnED+K4=Gp&pqmZ@?377x8KaWx-Iv*t_-4ov+)m(MuZ%o zq_dI-=oj_MG<>fI%%jwvo@<%yVRapA3%09&IP9_eAxP(rcCAm_N!9v~3((aSAD&02v8z z@?HQ^);md^>~R8InY-N905E&0p)hk2fBaj1)Qs)o?bzOk6S5LMd%xv4I2EVvK_c!3 z_>>Y1@Fo#OlPjg)7<8H&G_vQSWk)_`v>R=Ye#bxl>$5TOXEwG<9syBTQ+aZeeiW3- zTv@GF2vHX3YuHp8Z!IY4vpj;l7ofo2(G(_?iK;mK?&%~H%#U-N!FyA;bX3ajJZPZ3FxeGx z+7;tTdfUf)8|1eLe;$LBFo;u5$|{mpGwuY1TY=>Gsb2T0bG%;iV4F%fu772}>A-N1 zd-@>E$296KQ|g05z&UfX9e;ux1H(P!ENkos&#EB5{lvG9bZ$e;S6{Q+K`Q?cry4Hy z^8eie(IyT|hSQwiPu5n1? zjxpz`fJZ{*@<5RttFi5QFzMf&vk3WI{jFoOhZ2`-#JPi=WTT0<7sVZ&V z93P&N+hD?0=fD6j)&-xAubU-T(d+aeF#qe*R-gPYuD?D$CB*onKqDb5VTkCiprSU< zNl*;a-o}r!@`m?EnWC2qru)AFjz&G_{$3@!-TuEhC1RA}_hUj%Hr6nqL56o2-XpW0 z&nqaTGjt@%KGVvgw|4*8SA@mP;JCd|lijYiu&}|f7B!WJnOQP%%vIZ82w1d_t02cA z?Z^NR#@(Bi&>&^i`bV5lG^;cJ^be;tJu^aJ#lTxuG3hqwyOa!p18u4q?5JN{@jwwIFqm60=|lA+sqZOTcz`6_9`>iXc7D@$oyVnlF+Gc zMijJrLQNt*9e&&V>&T^Ehigx}?pr9Sj9>rSlv`-qm389G3bRV!>Qf2lzRStM zpYf?>(}^Vk13T8_{#&k-v(D@6`=7FH{QSQ65VPv3jmuxGnXryq{=g3Db=LL{^|L6@0EXo`4?LgE0(5!Nt&3s!`5vP(GSryqldeE1<* zGjXd|{Cft6!|fR#(iT;Us1l1lmA4>abKz?2&3f?j+~*7o`!;2TUmF_aw$i1}op3F#Ec*V|wCz0J2xu`?c0iF8uFuxrJXB1MZEg@ly{EdLU=~ zR^4jkdHfeYN2j3auQzO$x@vFG9p&hNp(AAAe0PxZ&>i`RA`P-?fgL$L5uZREGr#CA z%FxxRb2{mqw%IuI>IYV2epRM4O7=4Me&NCgH(n`g=Xs8BhDjKI677%}iB*mI0z)R0 zBVeF^WUnyVemZ9Adv4iP6?Kb8_w!6?t{sei*6;6vKJiXBtV`}nI+6>W_vCXSe~hrW z-1CW0Fq}P>V2^CWr>aGGk?X9EzxB&dksy7XLl%NCWj0#y88%ZbE2eIf*7oXav#&VV0{fzt%9G_THj;rXDimM zd6OTUiWcXoX;ct==;~h?P5O3NX=rk%Y+NNvMcu z0t-(+kLEk&#vzOCwIBLrGWYvq%5mDAqFyEOPl=Q!=7C{}7lu5dRLkk8K8plJdEt!M zk63joTk};65=Fe>=P=GMoz8-^@V-Uo0#Uz5#PlS2jMYhV4~?SERZ%#9u!v6&H>0c0 zHQnv)r*mPE4E_+7&_06~Pi<_K ziFnCS7^Q^^MJ;u0Izrl>hH=7qgL`6f?U-UaU!EI>Zq^WpY4I$G`-R( zy(KaU`q=q*{{+>WK5KQ<-p|M=@_}TW@Y@RXKJ?3IIB?~Uij|03;`hEhNr0YIHA?6Z z%k!H25^l2z`t}&ZBeUM z5w%0?tyYKDEMluo5?g4i_KFy_V~60E=Q+>$o^yWx=jE@sbAPV;y584-K)ks{z+0jA z;~;irB5ou|2IZyj;Q73=)0^2%^(Qd~i{n#`#EbiHW@za8mX%Txr>VfMeL}lhdr)8e z?F-$)`@xo+sXM$FY8xO-*3T7{=&url78J2x+&4G3tia?Gw->ulYm$3GyPOzuRR#Tv za2qe~oZ78y;_#|K4#DSTL8#^L?v$F6VkFd*x6M@&viL`5-%i5+Z;4mhj;gb zDtwF2V-%7I@)ZAoa4GI6c+@r;^*_P^Y24aoYApQJm5L{g*jCS_cm-6u)p>W8ydMTQ z_3%BgDQURZgj9OQM_tL_vD_F_5q4{j=26UBp#|ReUc9};f!Wbs-|;&7-}Omf&Gl|j zCxHE=41aCF>OmKP$E!e1&%g9`!zOSBZBs2u#lO6MA;3kz2dXDvt}~8-4!W@)NngPl zz0k1289TU{RtsYkk%)M)Ma^>R#oI98nc4k1k~cy%CD~TIEJq^W1SKqL%Nq{JUN(bm zE4Hh>@iqQlt065{-VQ}i0DkqVlM9B)MmlLf2diKKBC$lD&c|n@w+rrBCY9HcS`zTd z;sDek#H311XSM6SC)3{!#>ZBoAKfY<)+3UAI=lQOs?FYyMvW)^*%nwMm0P-`nqlm% zNJb~HhEnrHa>JCK;qAwXZ#BtkdM1*)v;&bXM^sST1v!3sRfkLp5t_#=S%U}-@-0$u zsK1e6W;qK(Cj{q&g^+1=i9#U=-b7@CYA#^r)R*^r{P;X^+#kfJ?R(B{z@=_TwfMMB z=c?CyF7RV=Z@2OzpzBa{69Se@%Iwy9JbIaqVY)mdz8e1wU&U~weYxAr2RcazAPKo? zG(Z-Z@fjXwyIz(7@b2yBec6@5Wp{NbC;{qqIP!{r`FYd zg~d0to8f|mDh?BtyTjU8MVY{&eXx4gxhm0BH4Vw~4K~=Nr1=ox(x7U%I*x@q4~AVU zs8w9N9kkf>*mLR?H(WJ%i5;7N_6{;D3uZHM{`kMG+?(Z89F6{-aRt&c7k_oTo4(}X;pL4#zpTXRS zD_RMUT?M|Sx%BRnj9orTp^7i``Fzus#CO1tu+lPbN+eq=&`ywhIWewj&m6P1HvedH z8MyyS$})VDf@sgpx-xXWXNRbUyJ43>d%5vbOtSrslRPSKV*;t;knsY}CXZh*Z@rT1 zN#5zbhEr78@!5*;gfk2XrVIgSP?LEhv9t0?z2nFZudDk&YoD0!Ztvcg5cqOa_Se*( zNY?%FEn#_3gI#s+s>|Un?^z5uFB02O#8Iz2=Mw#vp&k=sm+8;2Gpe1({YG+83?Yjm+BFd^4ywac;^GsEs|V!*V&&q)YtnGYIBh83KoFL9B?Ea3f|zqNWT zR4xz^vipS6o049PN_ubP`-v#fzOzyS!Bf?yb-?p*W4)$FaHqR1F3>o5WtTi($-fJp zw--P#HmT;zm&G`KdFn@WDp{CSeY(t{@Hw7!%iObZJM=WbUfwU@_Lp(DDPBgo24c5t zZ;|aX)NCX3;nRG92Kx}Gls_FvYI9HgcB-W)g|BodbI8G9-)Ni>*LFZMP1mHNhe|$0 zCBDdY=}$Cp+uCaXA$#xwg#zZwO_GYrpvSJ_PCJ)6j;$Sh{KmCQ;cpJN+oa)Yv_Zx? zOx{he7>mC30q<-^P!==j_SiXR?U|!g5xY2~DM*x~KSrY4+7xjeR`%Z;Nu%`7B|c~N z-wWzOHoSx6haue(ny~_B`Bhd=xlB=2|B_Z4e;Fi5ocjpCg*OMJE(Ku{ zqq<^+2md@;=YVwhd^ly=M>(oLL&Eaj1-`C>=Yn?$v^f)>Lylg6A7%iZbScWr(jjVlnQ0`;F2(`Fj?S>zUQ)k$R6>@`*x|D=79 z>M;(~yjcdWizIJ;4)_AoapM_X4BvJ30$fZ;(`+rZl9vudX4$T)fD}N zb6D-`8?P7`51nLr0&}cRWs5d7`Zum+h(_{yCKHIG!0UFYsH>kvp)Ri5i|#Du4D}K3 zrdHv-7KJK^Jn@7zcohTJ3jL_wCbA$tv^Um0VaJ!xqQissvyu!Yi;!)KmVebO=mbRc z%50J0!x>c}Z5DeA2;XLZ^#;L(12(C-4^?s*MoxECazXH2t@nJT`&4?+K|wh4ipt~7 zTihS$dyv(s5&<}Er4Noi!3h&@T*>oR&}#9Ksg}3YYip7X`1%(MFiecx@}jy`G48t$ zE?nsj>}OUIq^iC=542Etx;%^}3lY~M^HCn(LhA$aC~l~!&$G4(4VF3g_{EMdV4{1J zpiU6flliH84bZ(x&>!tFwKfO>32Hicgu7Ac*3S1J1buX-D})kK#pC_YH*C0%#W!Mn zNRVl4WZ=z`olB=D|43HO^Gfo~;l-{!J9YY@MQL-~5 z&9V19KLsKK$nQATjIMu}27vmmB0V6=$J5&OOz~z}BVbFvV#R0avlVyiZpxFNLGb0| z-}QPr;IB}fDh4mR=sfdVbX`Rno__5`2_jlJMgk$*x1t$tr7x{2e~LX`rpKBE_bCL; zY2Ikc1fkJXSbpD_x`C*OZMFvi_FQ5wIjmVt?=(-Crn^G;)nsLaKhfCzL?#1z2HAV* zmzyB&$J-CSqKmLjCw)B&ymO|^rrTJbM)og3qpRnV&I3)UMX_~unuGk?a_&{K+{cw= zo~?~+P-_Lv^V9O&n+e!;AWQCxon(7>-|{4yTpizxfafhYU|%nVQV%Yh&BCnl_q*`+{E(_4#QYs5&DQfoFS<@2N9rkx*iA1Viga7ueFihbv`UpbYcIRv~P zl4K`m`{tVhUUfrtHwzAL=Rm6sb7$Zo&K4{85We(RafA4}*DN!GF)hO0@`S`D=>{+J zdVjrqUcYE2RiT8GjlQ9nle_LXKjJgG!oAy9=7#KeDre3hmuR96Y#^Lcn zEt7jo8^2tM>nr)+r+6Fpl>%%|GeOCD`ED4RL6FhZQjOQ^6B{oZ6rqtIx0BZFEQw*Q z(a8s?BsSfn+vzhV1iYbp9xxTSH)1%Zd${$+D0k=9ym9`sqxqWZVYzu+^a{!^DPTlU z|9zEv*IMX}@t|=wkSa2M@7)PId-I}1BHAxId`9}^Ma|DF!o3|y|NSwL;UOVfDz-a8 z@$Q`O`YPl!Dt~RhMTE|}_TxfLjg6}Rnil?piHs|WNa7Uo@2{i3Chu;I@iFSL^F`lB z8C5${X5iS8#l8Jx=epc!>-^=5C*0F{MSF2!jW_H6F9FQ9nAd}_oY^xyPuOleJX1i9 zsW!^enM*fh!LqcTP9MUx!S8`&$=e#6HEoP(Fa>2DPjlE)IJvgnG0%EZ9SmPy9>Ku} z-D^9mJvOuXegb}&&f1l0)&se=Cs7ICDT}!0#r$X$Xv?SQJvkXy$ghJK_`NeFWcKJa zAMIAHL{F<;2}DBqL2{ID6!30cBFs@tZNz!2UURLpgA}aOHghQB(>(BY5K^(H{K;VF z&U#$wqs0jv2gFVZ+~FHkgEBF{VtqxUisbnWP+GT^Z%Udg<7}2`NvVN3F4;xj&{yd> zGPEjT!m_b?%`7WYW@YH;rLF_J{wx`y$1alFsfNpsA`~Orn|RxF5IkYc974aguu^c4 z#dWY<4`w4l(cG#J()HjRf7>{VVg9Smi&Yn*-l2Si#9bEZCr=~%hxx>ZuC}Y6^0j$u z-J@L~#TsB8#Z4RptT5Wgs;;QmxS!!v111N=exqDJ54wks=CnxKW4Y4e+@NH+FsM?= z7vuft{mH2FfwO5H1;2ypSna0i3WDmYjy~30$dD7?VGB=OrJ#=++!$88De8HlZa)Qv59^ulo zXpnQ}4SMt&I=hm)SJ;i|g~^)Ue2TVmMHHuE<=VECFp)s4A{=rWN^dD~a_iCjrVei3 zXWu2clcRI6#tnbv!hCY*1Z0pv4%`OrVXwggy<^voTNnnJN9T&CDiJH#RQw6uE@x`y z>j0I3RZ_jc>TRc$sW5=e!TBjL*#DQ~sRmlo5k;pN5at4zU z>$IM|673B?W7x8@$Y-7dKpEGAq8l(|YOgCzNuW?R z%{RQ5q(C<1t5!GW4)ktius^7ksJvwWe-=M4@Gbqa+CqQrqFj>VT2re`qn4!Z0-u)b zr7PnJ2_I2Zdgmd;sP%Q5pZ39&j`X}ADJm1*VB6Or?N@f+Blm~Hv|g$;IBW2O=<7b zutQ>WG_aBwdDS2qtt95S#fNc@Eh~{bY&G$~-TA6lAKjVfYX$t0E%ud^Xev*4SHm9Q-=I+Mc%Il7m5XS{9S71@@U4Llp{$H?3WdK~r%!r3l&4^^Jm% zhoIH9FWByai&Zmdhym?6T`&IQPPT95HL!)lts|m9_)Ty|DO|ohh$vG z1F=k$*zjp(IElQu+A5|$vr7G(_6%)+Nj$dpocgGVIm?qm&&1UD*$GdN{QoB`7ILcY zxx)%S)ST(zUAW!OD+2B1iCu?ElGdXv3JEdU4)fuWh4Tl*2~C$^s% zyM3QSX(o&GXs%Ehfkl7Pb!t8ZAA~N5LBuSf? zQSf*UJlmqvLNX5V1X9~3wN$JQen;kE5Labj2l6KE7P$|Rj61HI37OtfDNKuhd#S;) zv~iDBm$Nj-^1i+OoT()ysS+CTG>7gc+hf#(~4wpbo;G93+M@`je|-*I%N)|IkVOH8KriNK3w!scpO(EAP= zO-DOt*F#779CtNMm0=S#DH`hDK&^&v9=Akx*4W#+wIuohCwKs+jpVsVVz~pFuNqeg zn%VG1rn*WyZ{pN9wL43kLq8_a6B1-pe6#4GjY_2EwPc+hPidENLkIf^A};e;j{)*j#xu4wPLJ8Vi$$xi zj|rK+mtP(xw&FnJisRk`h75da5vn&O@t)Yz7IcFIgaG3M-L)cuf(!UIv;2h~gR!w% zK01N9bt@*%j)Mxw?!*f`n{R}S*PVdGMoVbNb-V;VbG_4%De23ov8RQFw+bN)lsNLr z(I(i6>$43ZtIf*GAEA(gBezy)P$dQ+!%L%>zWVeuOw;bA>URia%DKxwpD4cOY{qzn zai;h0VBPo$*{6KVf?4^BUcD1~1gE5$zC-?kBNxtS*SoNU_@W}&7-=_XP@N(@9sl^u z%f^0H9nHfn%@sg5f2>4^swZ$?9{x~TQm&HQA&$v*`VmxIEB}%fp(X>aI^mx8x;G|U zB>g%O9mpBg-^NPviLN-Bs$VRfx0O%KW9tTWr+^{%8me?WS;Yxqh|VbY;#0KKF;je> z+hI=#>8*q7QLPMn$hPFNccSCpYrb5tQpyp1zka1G-=%&MM5*5Ogz&4z$rK8f>|@#A z-1Cc7Zr!PAqsL{M9zD-_F3JCLX@2Dn7dq>+ml>q|$8_K6SV zRrXa?*&F6(nYvFEPq}%BOSey+* zRNiTeUl05^V9vLP*qD$qlP|qVQm93o4Ax8ne9aZ7JO0mP1<3+VX1+Tk$DwcE#BnMK zUfANru<3AXmBP?QY^)eIZzssvGUWWw^UUfPb>8iiJ{xzYSLRSiqjc2jXpGyRnH96M z=xx^LP&)`)(3k^c@e`uV)Sm!v0UE)^us68x^IIRmwdUFn$$@)GueH5!Z@99xTEhJ` z(=Mhqb+ms_Qp4GXZfI|la$<7Hh2*vY0kgvvZj9N8$HsT<)sVkx(`SE*Ci;B7%RHaT#%7fYQ1AmS*?)%de@WuXWJ4>oWUpG}vh9~?`R|LjM+*#}SOtZ)t z2q^Y7ujw`eH1)n_r9Ge&aw}ZBIa%&zf6!F$y#&q~UXm*%;L3|AbUBFjgxUH;)s6ud|U$Qk0E;k`0aK_F&_m0s3aCP zgg0!ua$Bz={r9spmiT5C4bU)clA4a@Ix)ZERsK>5YyVW<`C{(^D3pOaxJ?L(C^cyIYViv1oaWT5K zZK4Lxek>C61vu$`Ax>K;JQQ|=jQhIijjov;>tFS45@v-@4bF&tpR=Kv!=?5TaK)VU zW4H{%hZj}7H{P?vF4%|?;=j}AqvI!p$ZC3DJ(7{4@nB&*$=!L7xpSW;vci7#4+UFS zRXedmXer=wxej4rLs}+UWawPGs(8;Y?PULl2Wp@|mN^sX6SP_Zw?{XYc>e5SvcLR$ zrZOu?Gz`(r;NbXzj*FKH+Oj~roqus^wt3Wgw*HL?k({OXW@XJMEKtVDWSeAc)rWDT zf+31weC!lRH^`s30;DC@PR3=$)0|hA)|oFUOIpl6rU{t`(d~J<0!Z+>^|QAOv*f&t!=UMo`g?FZNIq8?Yx|Lk zlj#XJYe?#hhN{P&`SE~2Ew7WD(?qN;YS)VkBBoUH`kBZFz%}JAjurEgC)*$0vA1ME zao%4@AjVk7T52XG7Fw9+f*lBKG?mm`m7WK23JA|K<8qio&P}CCEemjGF5StLVW2&o z%^h%GmR)Wfj*&Bo3U8sNx2@1=`#IG9`&mVr8h=wZs9*vX+(@RBff?8z5(7Ld$&h>X zQ71=UR#iq-KB+O$y?<*#$RLgHG)Jm?&a%UZ%Sn7Qop1lz#b|t+vKd~(Eb-|vuYO4AosyfDXAo%@h@UgyK&4 zvch@caBqISlOLJA%O6jUr9>mxtm z`CeL0jkc>&YbNbe=@)u)d8$oZFDO@Ylp*`n%cA%K_r6uPPK3NFkg7iAxsdh17p=Iy z$TbZlU)iCXU00-bo!DFj#2y^P_qCPIPb-SYaB|g;za`N4sQ=4-yZAUc{T{h4?vfk2 zS!N6#PiXh1$!J6KDL5JdF-;6ebl6L+^*z>RZ$O2s+5*!yfwubgry*w@pD_LB(Jf;| zO5unv+_jF!k_FXpP6E9*H2P!Ywb6{@E$B)E@pt>JV;(pmTH~6xJ%`3fZN+d=6YrC| z^Hu6cE}8VND6g|v*!H`hqNShbv zvA6sx-1q8dI)^eRVuFOCmb8Mh44MqMSNJGi&0%yXf>yHv#<(&yN@<`btgO~mTAbXM zQFW`liDQcfr^{ll5@B_J>weyu_o-YBx2@ucSN4Vw$3zNZkK4D}(WD?4d&B#pq;qO>aq6%H7qDro72CX&QHhpJ1M2&lfP*7s0RPd%~EkbW@L?p z!-NIjvWFsVu-cw!{iR-wzz=u05kWrjiBwOj*Q;;nVeeM*fog!6&{uWN*9Q-Ab~%r1HE66)>v~wRFKYG?G|eIfgsp*t+FXL1oDm$I$_u% zIgIHyG|$Uo1G~No4+W`H;GBm_31LH|vaEtHf` zp2aHx*s?Ak+{Zl_&$sC2!7DlC_j-#PD$2gkjCmihr_}W2h$_o42wC(zTDB-#Y*9@| zqq@&$Cfj@J<@KEuQ`eGX|0vhmJ>ts&VlVshTdx57-q+hfe{vmFP%INpbRV>=`rARK zo5>fAiN(~bi~FaNgsruHH?{(?GL?b485Kp|{9>?{sdWCBT~_sH1|H_U!K$zn-1k{isLtt^Z-Z zaJO_{8n|5lA7V@()qz$>t_ON2B3HZ;4&4!x@t%=~8%_u9dQx`}6Y=e+d8xai7 zF131k|KB(shIhyv&$GFMF7v{#d#v~JWa~$1(V;|V|t|pvvaPqn$flf7h?E0Vmy*UsX#Q>s9cl_0VJ6yf(wvh53V)s z?V#Mivjpmu;O>(#dgdx;O}=;4{KnUk90KdTr5}(e9KRJ1A(#{61gmE#m-GdCC+|YX z>Ag6q^R(VM9Ey6Q~CD&nvlhIPlDX3o*`bcUn3^90G~hcBNqYcW5YEEGlG)g+hs`N5xBb530{M;8jo zr~1jz@9G1l2xT`k3xTdAyb5FY*3K$?&GsoBbzRZo%4A~+BXY&KS$n!18F{ZkI^17w z68uD31A2M(C0%$tYCPV?=Sm3Q`{dD}5y#Ok2>K;lOm-cc01eV)zUv)6NJ8L~HQlo(;JaNDGyb!|FxQK?L3#Skdf^ULg32?V zg69J80&`bsZmKo=mERgSX=JK!WmO9eE0i+ZwD4AQos$uSS8*U z;d!N3wEf{mEMEo>>$Ti~vvu8Xz&||G_?|aYP%AIiq39)Q9Vn>hAD~yEig9 zuCk?LPy)*vmgEz*@*%*JMN?$Xvz^Gy9m#g4>c7g7*1He4*)uhG?N^WUc_Z3)r0fp@ z+NfkPph4Rq_n_#G-`H6(`VVXc7Y9XHpUPk->h5>rDr{e(5@T;K{TR#SJ!`j-E&t~h zq0ADxzkO;=9Qw+A$00&bBURI{8SI-BpqnX48p@hIrWh3kE)F^84Nd0H!mM^Qp7BId zf0dlPcZ>)s7VDLsV{}iA%COWi{yVc3DpIuGsZM$ec82^35kr&*)_#Gc5g!x9O_=;* zTu{!~dBfm$Iz#HF%1)kd<2g~K>JOaJs-@UDJS zqdX3{*Bn=bs`UV5md25Dj|~n+pN!+*p}`|{Zp<$DcC!g|fnmI#E^|VkR9tr!POU0T z$^vz~CnEoYK7UWjvxd!bm=B$q30qvWr?WJ_sy`_12e28qjRU@tqK$i&P;7AAALvQ8 zC%uEGlt*xC*3aQf3Y4p#fg}|+D)DWlCgd@g{FrTO*TexRA~LUzULhwgx<}hM(u@D|`RS%N?@p#+;ve#(i_XF~1PjbWDXdoOo%C z`ye75UJ!4pctxe_(pL8gwYxnja(r%?&$*NBe0(P};HRS(vzPP5TK3AoHX?guAI>Dx zJ~vLMVt1ZF;y4|xfQR+t%o7|rt-ak@L4^GN-8LJ`{C0T8*eY)Pm_yOTtFrYtK0JT_ zv;TOs>g`|h2bz}|5(wP;VBZmi^VIn*n)baw{KyY2oo*)0_(U#}IDHA&_lx)`_mKW$@t`{?FyA)Kb(&MU2?C835C{>%$s0 zBxw<$8ull$wheosqXz5%Kwy^qDt9sI4$^ni-qR>;1tVohO}~j(G_X@w<7_qFt;)R7 z9^rqvMSSv%70A4}85|~E$+wsEHB+jxuvzCJ3(G{rg61}_-qsu#1oUZ5kOs2=2}!l5 zS`aSjR>tKEeMZj3zh;z$ipd;?CaP4HOEnfO)H*n|=V8KQiAN#N$Rk`ey%;akIKNt~ zd-6?1V@~w5P?Af(qfu9I0x3$Ya@D6ycjqG&g%b9a!h{t!a?GxaY(mrL8NRSQ5qC|) zmv5CCyOr`w+%?Nt88J&Pv`CbHc6%Ff-`#hW@vM`TI{xJ!E?O9Ja4b)(^y#Hd0n9mX$r}?^obNH?uRiDpH!ph0(aOSnzU4P2oMUp!+b&j+ap#m6YVMZfn42u4I9!xPL7EO*&4D(ol47YB(g<1F(6{UY(#iI*5yOQUk6NVao`4;oMK{|oc+gSZ)c};sioF#imn3@R0kXl9? zA-;ZLU^SSs*lA(orhX8tAm9P(#*E9Gb6M`vo4DoXD=Vi}!XL(_Yz^BC1Wj`++nQ_f zdm2nj?y9GM{d;mQakq_RHZfACAxnFX<18Fd?@k(Q+7czr*XYfVh@q-~zNed4$i=}e zm;H(K>x9qq=Q@KN?dMkO2AMIs^h*4iacs=1K^r6oZ{y@wGDec0&|F2}Pv6Z)qQWH; zV%!W;a0-+uWB^!Yn(pGwtbD$^D);T6&1_DPuj!kj^%L#iQqWsV1I)<8DY33X{Rp6- z1N|c{r594KL6s@b0h&I>#X*`+{yy-`RYA?2X8OhKqx!H0I|H-G0P~v@Z1Imd-Wa6? zqRQpJY7bL+!)gym?eV3(*vlCr@0+^GhwWZ8{x8G*CCh$xJ(iOl%hc8X#X4F^_LjAB zRgiaLK)sqQH6Y1`4bJhQvB#1_r5W8g$2!2CGrs&7^78W?nMNXV!VLt0gGK|<_A}TZ zyrNzD>)!$50%sNL-_yGW0|7MOE7cwlk}yn)F{ouZ3ylza7dr9E$gwVc%P$80eSt%& z7Qq?2TGk@%DehGU;yc0q%~~znHGB5#m>C!S7PKJZ!KH9|`Pl{=E^|e_4U%v!*^jZbupon#$TDjhe+OPyyC5aLu)EkE3p6yYi|Y_{}?Coa4AVFvrn^1z9f#%#A068zdB+ z`QeC@N|R`yRBI)aHLpr*NXhQGe7ujEy75lT&xJ123Gt$XCkziyB@Y6;?FaKe0e3AL z4RZalx}F~-3_MQAGNQJ0_%Qp~5n%hL!!rh^`I)&eo3jn}lJ&EZqm;6%v7?#}a33bi@LgLs zLUNQYMLv?(S}SOs32&Zo&(=P~?b@S6ZAS$Ny-Y&6x+bt5klXuWiy??-nv+J0r|^_< zPtLEM@gZQN_~&!q=3zEP=;#KO7Y~mt;J3EniCcmW?NnWSuR{)su|Ou{7CFJ9-I4M> z=*Jh@gd7_g2>0i}=~#_Zo*6IqlJ)Bw6))1|XK1vR_)bFUA@i7-$eT2Zx28X*QE{GZzKF)N0XlcwZtoA8`CyNlD)NHzUPn*uF<}}>m21~_ zYNkEuHtW_t0dI55(t%aI3f3rPNEDF+#~x+F3D0{~h>=if&BlF&O5PfyTn#g=-{>BK zLqk&&ti?-9#g5a)wrCYw+;k`uHV=edER@`Y(}H~?OnpUju5Il)wrM>@YS~f=VPLD# zA2ID(3V)n?R)?;MPq-xnN2vloC67U0ob>bEg=H<+R}2UB{+S0M$FZC##1%@H!@8j1AOoKHt@}bV zH49$~ZlMr8HSdd8jfhjuNu)po^RI%R<*%Jv(>u0nPC{zgLv0 zYSrl<+>5%&4X^=iOyk$qCN*wJp_%71`di9e%J~c!mwpf>S@?cp_*#u;1BhuRzp?bvlgaWo3h@d9h0O?Z<=iwB=64cUdwM%2OHAxejJk0u zBQnO@Xy!|oAy-|o1U@Ga1%-6S@$Ni({4yy1DymGLgMkc?1p8$Lc0@j%Yd}2Y5L@Nd z> z{B4&v@&L*KJ(Ae`!0Q^mmg9FYxtzRqA9cfbH2Qj1S-G$H<*kq$`+Xq2NP1%wtvS!s`|cA zmwI6Ed;dy;c{*#a`*L=6`z}Q#LC7FgD4@U=uIGPsNmfge<9f(IDP)f|Cl9w`{yo-e z&r*qwqqXOF`CdTz*P)<%a}wG6hSrs)9|_5M35hrxFD|I5{QK{f_ABh0GV@-wgRAkp zW!*V`^E6eXQGw?Pu5uO zuK0vs0k(4MAND6H&G7Qy1J`*5s-OFA$oYCaE=5&Q_FKa3Vx)9!^kn(}p)rmy__t6M zr+tR>*N;D~WYc6gs&8nOsA#`9J$bd$-{^dfRDon)U0;9X2fg%0aEFVHv7iI*)%XHQoMdh-Vmji7 z#Y~`wS}t&ClETH8*Dbz*gTP>H8_}X5xRZX?WBhqM?3&l_#sL`#_KmKnV-l`QmNPU( zIi0$e!`(1}#?p!c)cX>CSKDk!CNo0EcXr)q9;wpn>R&bX^(MapS%&Z+h;O8Fle3sA zL%AisJ+5rp&dPVgy%07vLg503Zbpv!fjJ5f>${`hGS^xCKLcFD|jh~SkD$(Q!>T>J{1 zA8wGLi*4YhEJ{gzE8}M>(IZFVz!58!=D`{Y!3%TP>YJk4NZ?6lP0~*9McaKFs&_lL z;WX&dl2;8J-FNHfyq!t*^c&p!ikNbwZRo&x8@o7Fn!Q%kctGC`7c|BI)Qw&ZCP@Sb z1AaA(JOy@To-aU_%?Rk9b7ERB%iu|+FT*8E-lnir^WoX`d`GASILV%pMILmyGAa9T z?RsT3Uwc{YH=R4m5}KzMHU3Ly>hq`jv)>Gk0DgCzV_6=+6J`%vW$d~lPJS1uGVSMI z_Q*IJ8N|iMdoDl&AIx3q`~Eol7sS5hZ&)XD*Tu0r^lrv>y?+tAu59B!v)6!;amAZv-QX%mPwiZK670u22 zHA4-V{fH0iMDb}8&o{-k`2+#s={K3I9L2)8S?waTl3}oiNY$Rpk|c31mg$)BT7hW5 zWlXoRKk)Oj^baGV6cB7ZNX;NZ?I!Srv_-N6?h*JVCOnSfhxd0>tYG0 z&JBx4iwuuP2O+a?V6^0_CnDJj;)M{ju)>ZG$}L-oFobeHp$g7;QW-!I8ka>-^1i;E z>dm10Dp676g5Ns*3@6BeFV~~R8Y?kv&WdHqaeMhFeI{P}hIQV8ZN)#!=hXr*F`1;t zGp34how6EmBlNz`mP@CP1umaMoR_51ThCmG1|EEgy`+4yt;*tJ@VmD1YFeX!n6v=P zhG&}4$Ip>ae1oMv@lCEq+8C$8%Wc*Afpg0saf(dxm_5e&9LlEhmQl}Gv{gdgW~tl; z>u|pMek438BrV7fl4svm65@8J0I{e!$aje?LoXCr4g|42`#p7^eiNsRmT>l|9LHJi z{?b|mb8Iy=C5R}DO@NlIFKk2~yO#uS@KHR^SN9fJb>x1sXjIzo9;fKlaLU0%>V$kG z&pFIBp}i)z|2lv2ZvoMIc(BMrQ{=b3Sb}=|U&vB{B<&_9Rh6XXj%frwRi1Z~)?}FAwM)$V+3o+8kNn76CClS6S?z+A zU~P3yDzSMt=?uXSo=2Ax#;pV0T9zf;W7yqWWViy}YPjA}lBrlLWpZ>BMcV{~Fry}T zKQuQd$5Fj-kV!F)2~tgNO9%ktrerqFV#;eHo~V0GAI2dCB=5h|XEsoJ(`lTr#{Nvh zk?IMl#>pGo3(KZDdXpI0wpCpx{W0toqqLibIB-@%-@%8^4R}KJ36}dj5>iP4gKT^g z#f`FJhn)erRLHV->ypM=`8+JGQ9K$jK2md;V(m57S1$pbUF3`$p74X?AAG`qq+W0# zuNNy_ua<+rGLzwJdMcvMfIA7!4cnyhx>M_nuNT=tQyPKD6`4v z5F7?H*9!VJM=*r``X$ZLmU}IT(_|=$Z z7vItOTSw-+&ijwukUm_-WJ0+f07@UW41{~J?+1G2_XIFk_p^lHHL!252c+WG=5j)a zayDl&1cA;PQs~O&=^JGZifY6F6oPawHy^I7Q@(mLvl1f(oTt&GALsn5g(-8hC|-14 z0T5jB#W!b8wUMx5WRq*n#ga@vO zUriiMukg0NIw05DmsVMB44RfiG*q8TlyCmQ=r%7G=Z&WOYZbZ-Y)EI%QtyI}FQZ$P zuO8QwZll`o=F$o=WjkM|xCPb#JaT&KQJSH33sGEbYEj(qz3!;~oy%71i@#$p>b}?0 z`4p=UjGYw=$5Nz~ypOwE{E&)`r%#FcaWAM~=4|rJDi;|-R5cT!jy_j0$j{wBYu({$ z3*0&`Kb)O!t@o{#4Hl&Jp-pPCx4ZIpC%~JD1(}RCw3)27j~JvlKOUA&$eyJ)VS$Ki znlTu%2L5axgnnyvCBs)hm$KneEG(o=yMLluiY8E6=wtJDi-rB%W7u(WLCn4Q$&#}_ zo*rMH4xh!}%wCnWr}93pyT(Pwa?1;mEKxJi$Am@|1fQhhKg$ls;5S%M^={Kk*#?3A z)@Kb1<8Bi^x^H5=+uO^eH{nv3W}rZ{EOhqs^!|TR@*@mvDOALQX35xNn|*}UX}up+ zcfJKA!oX^`)gOIT7Ct{%l=>=zzSlsYVv!w>Axs24ZF5u-ogIy+cGN4DA>D%_#DiQB zR||4rTATdxyk^xgUpe4ye2_6TOo{0=$T2+X1a{fez?8UKgJKM-PKoy~SRV+mwp3$7 zRT^iy+D8D}Sos(-@iz%?gRBlkYaAHoo-5EXBNFuP|0-2oCjf6uo1Kv|_9nZD7!eZZ zztGs&p-XDsTwK7qIzTf{_=rb%F%!;oe#PA_Dm)Oji@bu}NttxjV$SKzaR06%BgRYL zp4zTM)M(Ub`B{*6*yE&CB|B&Gj5#Gc1Qm_1i;-2WLT5gN?A&$cm>b(Ix=kC!$% zUokRqn`^5tUK49_aWB{P&ZH8b2UlywtaM~+7GI6(Dj+K8Z5BWa--ObS2(0#tp*c){ z;E?;xg*-^(q~;5R&*W#KkHu^Dg~H0aI*6-?_lo5X#v91D`HtP_B)z|>PfnfBwrBeac@R&^@SUMk_*_b-iNST3jo#V#(<_uV3ZGD`a!6Y0<<+(`O6HuJo8&qJ|8Lp~mn_cR1$Q0Flfek^e7zfKZArll2K9WwbN!>aCoYL;rG&Np}|H8-CABSQbz$C1qRmaHMW@UALT z=v}(JcO%v`n8R_Bv>b9aO=^p9k6#uuLAB=( zyfRcjnfu}wH2ZnibOb-n>EO&q9tOQ=5hfGwiH|FHGtIAm0ct#6{9!+X7n}+w^$SFZ zc-?wNM~M?*{9SW5oVPsrB!Q^@90=KGqXbGXGlH1!win%45;x?@-+DX74HkUD(6m0n ziWKZ3L^Rcb9Nf6#okKO1gSVY=!o2BFr7(>GKmk};+ti_u6X|H`+fe*PHNf&C;()US zuGl;5Mp zMh-;v_qj6tOq3wMm~v<|+${sTQyhHPH(z#p*XY}Q@fVI!qV-V|a$KiwgW~#Wd{0b% zqas050*S@8tfrDB$1layI4@3Z^LZKp+ovUiXMaqvTmZ>m8Swbml`?X};Edka_N!{c zHIBhT`9J-R`-GinC$f%s1_3h(Kki-E4m=k!yVr)c38cPa*A4A|CiagjHdGs>^eZohOi+cm0`?<^C) z5?@XpWd3$Nfozoiq}#Ay(GgJOV95?+Th>n5CQ%TpC^4>+CVnJd!hQhY_u=n%t%)sg zuZH?IhUoK!(AUjE!{6bLrK>ya=EQoMd$P=3pVOZqUV5CwO}#nlZ4)P73y%cvD8U7x zCJhu#tPgxFiKB#qxZ{@d+%=o~2Z<@f(UhDezqU)EX5#m=t^rk+XcDK?`s}z}?!S+H zN8)77r1koAiU#t+J#*NHUT&ue^8k3=cM~QRvJ{B;?CgZ!LV9NhXU%Jybv7)q$KX$5 zi2*9*Y;me$zS?+3UgqSXVf@E4$Ft>Mv3Zt3o6ROB1#$F!nM>_>)WG!wJ30HZn>5w( zaC=zT@8zHJp2_ui^SWu&>TgY`1b1SAixIB9qYHq(=1moog9=v&GCnG^6~R@#Am*nvwUY&{=!S#p6$N)R!<5DjT`Cy z@lO8ZkiwxUfXR+6kVmlgcHjA{K;k|~-q2ET<&S2VIHLHb$!))v4UJl~f=A(@(A94& z=MI6cd^I-3uUwTA~<1=zQ0E)Te-iguXp*pa0^`!OyDSZ z1Y6?_)qNk2`02;0e0X6uF7no)A^(@te%~26eOG?kqPNeQYO?Um>i^^FtHYWM)bl_xfbErk1do!7V!^Z`vy!??sjwPW)T-e$^JK!9#7Z z$fMXQezDOa09mYCaqDaoi6K20J2Wo&J~jRCpZ?ESzOAY>yK`C|=WTwl)cA1U%JR5d z##V$OLRMv`X?GxI!lf1Ld~)1_dzf2G>np5!4>B&*yEIrh53|=&;d~@)<3M*_N&X=5 zHJ^wIHR@?eh>YqsSALWaZI0I$kWu0e{OJN`WnutDhkIWKu1)P`qfal=307$?7OC(qepjqyR{>_G^l&EQ^rsUg$zD|vAC%Wxs}t=k*l~@ zRayO!02a1W@v<5#w(;96Io&#o-cb2Lv2ZrAVoPWgZiKN8yfVfCvXl~qG;yEDyTy`CW?+p&n>x!6Q|9`i>B$JkF{kM$ zfjHR-1Mww3}(A+H!MfUR-tfM|WzS zml^gGYP9v=AR6~yD@SIf9j0JD1b{ZJB+`#*7!;%#V5&3C(zrOa=Lg-JyCM6#OYt$S zXUmG{#(@Cnq~GO7T$866>(20(-WzAHSA>XijF+_*7Z(8+b{ug_ee&y3+e?krsDHB> z@p2?qt!r|`U`K@(t!BSa(RDZ&0pB2%!?(u>BW!^7$>|9>4tdWk{Z;U+3am33gWWmj zzyCBacP^2cpOw++dSWMfr@=QtxfQaV9%nKaY?ldESY4n401sC@Dg!ovyD)ZVJ}sq# z$LKTBc7FwaMa%URv749Xi+KFyK;ijp4A5y?=-7YNK(RGp&={>S6~T5<|7fzKc0O-s z%jwye@{1vZ36)t=-~~t?-gR`;BjmQLurHf-``$uVhjY@?Xft&Bb|~{@+NIDYE!B#& z@?Ea<(=t>2b5~D0?M2JL3Y@xs3WUQgc*#HpBVB9Sb5v+{I?UBr)h{6DaF+p0+Xttd zvtGCT!@KwB6K7fPqvy%H1iNY-W{i7iGU`Q6osRiJ-~#R?Nd`0IlMY1})}9o?iu#Px z>n!>x+vMOn1n~Q9FD4L+`Ot~E$pGOLtMZ~N9>am8ehU+SJhE8t^QpJBXOJl5i0Hh0 zgJ@kx&{TivRR<16XW?G3D-TZN;3t@y_?Tm$faRuf>+L7&7RQ|-zv4IAEae{e=0|ny z8IO&BvOB4V;h2psv3kj|JsAR)NWJIh`1>1BM~FS@&8@;=n}JF@F#~poTjYmtVzeRn zvc4ZzamX(GS`+8Oqjx7dM5{cijAhIxjImD}#i8o$eVE z>V_;;D?0W3ybp~kh*=lCojf``&K`E7@*5om^QcUii79hT{x4yIg3|}d&F*v`M8Nx{ zyTPycL=a{TPjk600_{6# z`6RaS^PK&#^>JzYe-Imio@DIRqU**Y4Ud2SUxZct2f{{OA#97qr;GCPvq+zlVba6# z%_|>>-)22=DCgo~i|1*yj92S4=$Z!?-tH%8fb+f5BT(Z;uDbJ84%ruB6$);-5CA1y zMzs$F0N}cy?et`=YM})>B`RX7_bQo9#_>k89=_Z}S zJf7?$Epzm&IvHKq>&C2Fui`aEl?2hKikmB&zGMn2$*-y1;$Z#I?SybmLsdfjiDNzp ze{bU#@4QF|osF(7s5q%1Dv9=B`%GT|RepHR-YTCDcef+&9{eAl!N6FTb^OE$ady$1eF=_eRg2y`pHy%~JH zWL-symw^_37_Ici)&f_t`{^9@^qiZd%3xwp*ax7pk|g%q2-o&PcGZ@q$ARUk61Y1q zkq9LqHb3y0>8A#+8E@wDwL_NH`Qx{SrPI_Tp&s)k(UdGuaD|1?~{e%ZYNr#A)o2Joc|ET&*?djhQ5(DD!28OCSlDt&J7;RUE1)(!pkMpURlkHhVe z&Ci*={Hki#L+f3#UuPWNg7wLIY1~cIaD98_@2I;UUwR@Oo;S{Y%BEa|dg*P+aBJ+I zUl)eb2sLlK2S>5JL3pxBw9H5TYlG!M#dt;XksmLbBT zbq`CNV7w%9CR~a%;+HHBq=}Qr!O4ZH1Fip9!RUjhZgmVaaOTHNbueGq^+7jpdZBAY z8f2v6G5|DPH4{z@vsOS?IJw;kKl97T5vGt!lS+BOoHz3)Oe|ig{hnA-T_48bF3qvI zKj-G$8lA&-FIp>%p2y6vHcBZr?ShdbLGixVytKfCME&*1@&_$*X$!N3{$!+wEXQn(6bUO-)r@oUTrg-Y3vd`} z^XSZ7p329Q(o09jqX`g8RE$X(Ec2BCR^fqW@_Jb@CpZX+@`&;khq7}+>kN~(}g7n zElXz@w2CnhxzI*lOqst&ucbiZ=V^KC&9e?Q~^Z=lSaU;MW6}Hm5X0lvG1b?F2MG4dh z3P?64TrakrHZz>CamK&KWi`a6iG5JB4~$HEkM;DVFSFH-TiAlgc(sI&5+#Bty2KA8Z;JkMNt3l6SXbo7*9h}U z-W)d+{~L2@JOT@;q9kb)3|<|G^{->7F+^le=~ca3JM5lPSAq78?oBY+NCku2o_Q)9%r-5CyVkyQcnKL#|o^1jgjtn8`q z2LB4P@jE>FG5Q}xK3BK5NdRt2@72sB!Y*nC?HliB#_kj@J?#T+S}1L0JDdzKHm{of zVj>5FjoCz*C*9_+y3OU8XbOM%mDpzRT?ri!_N2=Y%`t%3CbvPn_6vrxG=Q%^_S^sS z%(IgM8X(btCr9>vw3p%QIWMD(82{`==v1i8ew;7O zBgV#IZe4abZ!ESE*l24+B^fjWkjd~s_X0_G(+rw~ZYNLlAbVL@wG`KwkP_$f#LMvt z39p4hj5E2E7+Y>Ns|Bj`*68{R-n$<4gJOcdwz&!wJS2i4T&lI05Aym$BoP>j5K+5^ zCFUeB`!WL21$_|9=#C@Nw5OBsa#arz0Q$x$d5h#`KR-g27W{&L=sREKBY}9RW#+Sy zdMdYvzqneVNqs7==N4@Aup}R3S1i+0!J!SY zlqU-w&=3BR!52Q#NCQ>qVl4sUrLa0Y;C`4RQ!Z?DJa!r$@J6CBwA8DeMxhf!lwmLt zELpDdMtT8*Dj#oH4>H`r#nr#zy2%?kxDiKlxlw>;7V&+eJyFT6zR5J+Ucqwe99J8- zWzl%Ha$Jyh@tjqxn6iU?V}{^m^-p+}ucxhhJ!eOpDL*T=coeWdHWN$Cd!*q@)8;mx zsKKCodV?`g@kFEVemi@hh*f$JR@asI6!9-D0HHXw&&uNFD+uGoGRoto^ce1C`wZ_j z{ND$8tjD8vM*z9}{gDL6ipib_-!?m60QD3%lj@@UuHc60d|RJ2^(2>2J(&?6_d= zAkU@ae-pfSzG!*n?Hek64Q!DV946#kw5eW|9B2M)<@@~G-tC$Fa*cBIAT_k5;NaL| zV)u1>AKcEohc|VzOT{$(cyyDnBp_EMZzol~U%7=S^I6gh#NQO-6$7+UvC5bG+p$0c zK+}L;;2HC2U-C_fnU$*N;ytr5Rg{DjWIH0m4CAP9*eny3W!otrR@LI=XVv4=Xf=^i z^j!7D_zFeqN922_xTkh%z?u8HXP;?WpnT&`hY2A|hFWhQBeu2m!16z+vO8lezNZN- zMo5~_@u5f|A~~leXiZ4kUUfjz8r2HRROT3g5Lny6z z|As5SX}udP6=eO^<4)gsnZFd@G(>}^crPE` z;V>*iBMojD;b3`>uKyNWZRlU?4xMu4>ol$%j0>(IJcQ{dV_IOs(GNhoNI#7G;=>kY z*ors7P7pUx+Djgfv<-6ge4~p}V^z8`17-T$iExqg@@m@ecZoi1*%E>L4h=xLomMbO zIL&-KFZr)9J?5)2saI?)wNBBp1aJKxTlxIYnQ9z53r7As_OV>edhKV8%!NP!Hs2{j zi^U->gPk$wc%-?e`JUI%b7_p(%*m)pg|+hDzDRruDU@m~ikkboa$xHSR?ruD6x=1TEwz9jgZF>T_`6`D7c0fwZg|?4A{=HPS&{QTXWc#J*v470`WjSc@ zSfc{!7P3tyn}&S?V%)t?jWyYGDQV|3jWySjeu<~~f^gNy1r^%`tGK_-VO!JGBs*2X z@^`h2uo;HjRTF(Q4QPY+som}I$_FA!Ua*VPNvZN_YN~?V?|pe&AgVuPr*}On{+Fsc zNMUZH;k_GrwX}NUMD=T~aF`^DWe0b@sv_R*8ZtG?BA{0!58*uIYN*xVu}&KPYfN?9 zP7ea1w=oq}&L5{o#6~#U(@SiwKEV1S3h`jMYN3}VKa{5Fu$fK7$=8Zs zU|_P?)7U-A--1a@jWr3^SY+0!defIh^l}0VHJSr8TB>Z)-&BRuLG8XoE=QNXiO<@A zfr$?lG@YgTcxQXqXO|0A8{@~CD&2LuYd%knJ6?~edpdC~(V4vx7U;5z*62Jh|- z_4U+k5s5QXB~B#+#MJK|TP@d0iam3=Bf0A-Q@rUR?Z z!jvsc@NT*TKa)qv{V#lH0pv7W_W~TOI%9DNCT(U4eMM z*Bm*DF1DEm;UZ0EP`~f8@xxk7HE()rj?gz|;-4I35aHz=*poR@Lu&V(vwI?S6mmzU zc(IGeK^d1_R)oe~&wv39KH**2AXDw-pbJNPO5RL>+!)zB$6T}nCSO%R{CwMq@-yoM zAL7T;r__n4J2Sc^KMUriqp6N&!sPws0>MF2@yaiQh#OdMY;1YiK3(WMAPKKCXutTQ zPv-fDyscGzc8&=Td`!`l-pUC&ezAC0k<#vuVnhxTGM&3h(UDJkXvv#KkzUx}uqFQp z&QWa1v`l)2kvMyMQMwh)`Mp=OGl)KC>@!AV5G*@FklpYi244oei6dfI$b?UR4n~UE z#Q8nTpO^I1Mw>&}(tOl1Ii2r%J4R}qVy4dgJz0POTuLETThHvj3lEugI(1f|hl?F! z+sLsj+hfwVRr{`j-MLXOWN%dhzoo1jf^S0C$>IaH zyKiWkM|7XP3;L{D**E*STRM)$%dW*+<#7#xZC>Jt*(DR<+l3HsY|O#rwn&$w)5Be& zdZ-EqkJkN57QWHEG{*!Wd48R_mvGCoSx_0sUvqw9Q7+@})ijqWjXl2H4RHIL+XAs2 z^c`>UJIlH>k(^Dvki*MSiNuwXnh{dSvl#|6_SdoBmyLA(y?$L7(E4l{azwk#cs%7l z#no$c^68w;ij)>mc3c=le5U^|2%e3K`zaaY5*x!{d5`e^Jr%P%Zm(c#gstItF@qrF zawTtTI-E8c|I0z}!*-5nJ@^t6`=rOt9?U&n!IqM)+Qdaz5958!Pj=!VMmYGYU zH}S?4l8Zt*OiVxanmU`NDw*K*ut)=Gt9yE*cjTqi%hY+vp5I z1x+im{P&4}dq752M;|C^Dn6g1rMom+8?jq~>K{O4BO4BAzwXy}4PWL?J^c*2+_x}` zxrRmER^T;Ib%7!MlaPHh9SZ z%or`usj(bwUGX6^S*|vQ0|n&HqSGa7ElLZ z91g`Q|1`+Ze-Ys-HEQPda*}p<)z-bz(A1VIANr(1qor{$H}nRuW{Ek#MsA(9Wt;n} zdu&)Z`9{@;WNwArCV1-2Rs09H2as?5^k$w8>NltaME`O~eT%1XMpp;68_5aX;`+R6 z@&sBSLb3GmxVW~)faBT3s8J)A(GcIDqd*4VTic-%r!Tz@*#0u9IhD`A$Dl^QB}03k zT@0We`Z%6JuiV$^w`RtOWTjl-kjHpY*q@@!aLY`ef|P3l(qjY<-ra zHT7qB(EU5J@pMKhB~LpXCzWWwagaSmQfdlz{57j}`gKWQwED|d^c4M77hhqPHX^f) zt`Eh2aZC!TXAUo4{>Vic0lDUB%lnY%#6E^qJk(`93KM7|J{gLW780M-e+YR$N7Wfmb|ip2!u z@7P5-#iGQ~q!4bXM}SrJZK%kd7x8uRwAgc1rEyXRV)Bx{zrA3JLsNAmZFj`IjE9pV zey*~-?iL%;6LcBL&jkn|7z4^?;hJgG+(Q(eTfLp}zE+JGBPt1J`1s_N_@B7%gHc?Q zy7JhXXGYr_)$EMd^~b2M=mcQ{HnR~$6)9ArkBR&@!GjKA%wJ)Th;;pu8$o!*^yQeK zY>7p_z9NFfxZ(>jNrgrJM)!E)KC&{d{4_SvMC*8yO|k@^X92558`5aVdpygE@A=z$ zx9@nYdAn2St82du(%W@6_Q<%A?fR`JVter?t4wYE#MB=B9Gs>7V?vw(9r+_EI(tOI za3JW3)S?`;Anruu36f~Dw3zPO!EW~%%TpmSIF}GTrCscn(@;I-F#U9##u1tegQj|j z$2*0$wxr8VhbvsYm62|TnM@C7++goc`b>!*tEL&2n%4#)>nAPCY?AWX2g%3(MB-tp zf#?O6RUK4RUMDuiA&7u_(2z3pZ{ilq8f-QBH7~@a^(00ts6BIP^s2Ha{+RX$$^Xgh zWp^QSu3t!%S<3W){LVdvcK>Oq^X^D@XT4@aNy2#W zyQ-o<_kn9QrTBR#$(z_oLk>5ZHX8XoAhDGFYeD5~Vq=&PHK#WKy}94KL`gQ~qQ~bw z%t93d8mH1CL=4xwNR(?Gi9h-QPr^om?Ad(V@6ax42Gv}b$W_=-Kd}63Olb?@`}$V+ zp6_%s17H$+XV0p2q+qh|g)u^!O{|Xe9N;*(SWXsDnf=dJxCQ#Sp@BeN(?yYAy6e^<+J@M~&`X>L^ zT`rc8a;fC4t0ILGL?ZOneqy?oU!NVGKSo7#D3HpK!i(@EeaXQDc?m!GbAh6lfG&!6 z_C$uNF>aAj_Umky zy@E*v{YD%2{SigWtOlq0U3)#13Al7WPmI25DubGkdKahb*+nx^gX2WIY1hdC+i|W1 zS)*l)c--XsJo9YC7>j@e>^JO{P(j9V{PBl5)kgo~8UEo5YQ0IK!RGlAHu*!g#gE8? zs&sfyY=ytlzt%22??4=qkhtR(6H|`y2JUWykDbm00;`sey;b+mtnJ`m#LWb`B4#j8 zFNks}TIOxFi@MR|Q3^+uQqQ(KY!eqOcgeLPP;-sWckms=1vIUH*Pcst(q*5U`PnNcu8_I46H5y+F#ZV%yXQT>pH@#8|RqPYnS}wf7<4>m;V^N&~e~w)6 zIo^iyX~!y8>P2vpXnw<7Fh4D-(Ae^K-o?T zk;%s2!O4782jQ9fW>*%3d|`EJJi%(M9l8_rGH4IJ z_}m5YO^X5R?P77SLnAIK&)x{%r!(M~X7tCbhMR<&k?yeaR6x(WFY{RlU9AjBVj;E7 zC%KkH+pCI6X?+@BotIEP8F%&-Qv^_X@=o#BY(GF?%&4077MXZg-^XLe=k#HmS&0`K zeI+Dd+sEQ`^>Lz}T+@^fuaY0_$t2zX3YS4#i&@WVBMSIM;^5j zbjS*gMnJO{6WB7bz^fhP9|y$abUbo^=S*55Trs?zX|KsCS7pe*7AoLmb-2UjB`JGNk}sHlg{g`>fz=D#nReTWLWvdB1xJWEe<7; zv!oL+UnRcZjVFy^hQ3t)OWZ|>^UbyCSZq4*tz+8}7>1bJ zT>%8=vZ|#Y9g~!UEyKmxeQy&EQlLv75OwIa#w(vto?GXBXQRRy(r0EXn~g@kPNv3b zhW$!n`%CtLt#?rF#oi80C+6_H!hgU091#!|F`jsnI54ClGn3C25Fl&_EC&n#LjoWnT3&ap)EA9>Uh z;ny^IUj;H=c3++q_swGX7D9I)A4jRNRz85pX!vNvi5y|iUIu<+O5D;c+0suNJoZJN z8wz5)n~ZO+35{@YE8+bf1)z{jxnHOto>W;)vpo zVqhU&q>T3+_*U=XlUq|bak&pwfvBs31;J2JkSp-87^uVV|8Q_yVw#@J0v0zrP=`E) zX$K(ZvYPSMu_6j;CEe=H@#ya-8CQPqex>!nbjFLw&y@ui>lbvph_ZI+g}Od+5BP_; ztVO#_n~=2^wK3=Qf2(B3x8jS29MCIEO^+3}rxl}kv`+JC9en zfM4(B#?O-KJvdF2;(E}#xr&%ql>*UA4AF)FDUl=qJXn?M56zpXLe(Fj*BeY~ctF;Q zL_4Ech6?Qr0P(sOb3^rkO>~f`>oAu-OVM(dyecn=VPn#4>&FkypEnyK(hf{!Ik#0- z?MFqpJ=kXwvEVxLIZCUqFnbdLxpaq@DkCYTzpX?1RN)P@sa=EaO*t?Z!s^yZRH0N0 z1C#h^qT0>D*I<0g3-ZLvcQGU^YZ9)i+l}PRJiofczurdn=vr@_pViJd1dDErO2XEH#g=bTb& z49c;{0~6QZ>bdG@DR*NecCgfr+1o^bsx1!9>!Xs+h#EmO)a8|S-q3Tt{*6@)Z_-y( zO&wMzOL{ozTN1~>_CpO>Np51IPkud3p^C`8F;>-b%8^gw1ZR@|ejw&OXZC@sv+{$@ zq+k=12KHGc07{}!=SX8#^~-vW6Z3ri+j@kCt!|OYCqIUSA67wq90VAbQayuO!jZ>& z$5x5nEbkEC7IUe(r`6O1)C;K!p_E^P1jul&b8N~%n^o(N{-tq8*5Tej>jDwjugSjf zyHe@ccXYY!zcPDeKbkVTOSjg%m1qQyR#AZbat33?O8TK2-5Bv26(;-ApSk&i)!lw; zN9Wj1@{5BU{Eb$knHaPe&xBJD{ffACY!_6X>f}@CFPZ0E@FT_#A8%G%^&c>MNlPN) z(?$iEI=SseY_+&2X7gUq#85)LJO4?t$~$zyyc{tR?H%?Aax0f7m$ID(R4O#_y&^hZ zkbQn@r}?M|kB~RgPc>%YF!v>^%jpHg(ineX0JQi7(>zEusI60*~14%1|1kUuB2xps?sOi!n1>KYx{xtJ8p?_+T()iZKT^P z9ui3o7TAI4I&Y^qW6U7_k`+v3bBmvesvRyZT@UfmT^A7$K_%~CxbQ&F7!7!X^*rd{ zk?q?J7xp3k>EBXLd?w;j^?G4HquFSKqRz#OGX~+~@N9IEzb`72XdDzxF&5PlBRMJH zh$P&@sEpq-%jr&!jH$4>FYyNntWPY1HR)M-zsv8}zX~Tz>gj0wS>j4cxH<8047YEU znS&gU&GPE~RmP5QtwcIIEtns$Se>pNek^L}{UdnLv+lBb*UYyJ?KSaMPToOhmJ97h zmJa)KgOQl&{=-->{~X3AasbJW(Zghm(u0dTn1CwF6nc5%S5*~Iti^s&a)L6L?Kb>lyd zyd9^Pj~Ab(107h;D$^C+4jl|?e*YVxmHu@Ro~D@ens6e1`kPHpBV@(kJ-KVW=frG4 zA*w=an>`h$Y4w#IM6Va|Q1w$o!W&IF5)kNC82`>~K(uyh;sbBxMvfInHzj13=OLIW zakxuLwE=TYHOmhGAAiypEU&5q%;9943Y9T$mV`8drSlE-?*LqExT2hF*&(`(3G)HmdHkqNWZb=#K;R{e$a>v+9^v90_?n zT)iR82a`*mGYI#HMS2Z*%T3ta7f2y?=Q;ccE!1UO zSWc9rw_@H9{lj$kroXe>ha>%C3RmcLD%*z{wxnII3rx^m z-R2YZ&$ewp+yAUS_Y9`;(sUnVoGrhzA!a-ayWXd^_vTBxxjqMkrACyPsg~Kdx?oE3 z$P?5l)YCTRU*oDKXdkPU-{7n#6dvIV%NM&(1r*P5Dy(}IWEW=Y+(*drMR^sK6K#unR<@zeOOE@~xowR!tf13sC2M5((@ruMAR{C08^tYl*`7 zs0`91|L2bcgxWO6=@xz|^|pfCTQf?XBk!|UEd|dNR__uD3t+CR{;yD?Ip(nY7xR8Q z0lg6)J*zDPQG*zt@uZ9r_$pe|-yenagT4Rbg{p|jd5&%`?|c4iC4&x0Q9jwLxL7qb zOw8F|C>*=OaNo8qJo{mk%ieF9sFghnb#CZ0b0ewscN&uIFZ_8oC3uC!SY~F9ibRu|n6j}LH|~Z%E~tlriiw6Y^kRa z;oKa)(?mx-49)(ewQh82oq{!ZYgJ=5)iN}2_tE>o+4vv*yh2bxVMFOaRhaYU&o+5s z4GS-p_~GE*-`-oSV`SCzRvp3?>%Eh{gx!2hux)G zw^!&k9|d9bJhD&8woDxZ|A~#%hGbrR)Ta}S&5Allo?$kJB`3a3CmAk&WK{+Sa#9zI z$*oM^nQ8+6OBD8+LcWc*?)+ea&?hk%8_E6(5X0`n`5qf9JYT{#y zfS_4b>AGCLGUI!fD8%O3J|-oQ6)mgmaj8X_M}Kgb|GdpDAqu$5^*7w@ zkWu@JuB-_5VDF9!Q)^hIDxpQdwb6xCvbPB6C<=1DENtnroc0KmQN-v?t%tZF!rUb|$wY&$ ztrTSHYA_P@B3rC@&;l*!8Ao>6Ni(QeJH%S>!=VD}rjHjH<-~X^U$u>y&;|k&A4o7h z!U^o0w^e5;X^*1z4SiNTJX7-28VIUmWb*5E#`dP^zICY%ZrI#lj9iSLs>!%*OI!N2 zgt(rY4mR3yv=fz+^-IT9tN4fp%IRcEK0B_V#KoS$^}xl?P}y~p+U>lY&L40wjnRuL z142&wBj_BnUHTFO*IxW{Uk)VI!qGjIw!|qw0%6TRO%Ku?(NOV)4fYG+x5c#~Ja!TB z*GH?^D)0JiBv)(x6nj2h9Is zHz(d9$IA$2j*{t&^KrMO_6s?ug!TW2e|?sbR%S6B3hf{FBpzC|3U=yzGOBD?^u*wW zJ+^+V>eg?Xm=HL&UwMw5lJ9TB}}oA zD}0GoLZ_U$2Yzf)JfvIOeIr? z?;LIy&xz~`m&9jF`jlAP(-=fD#hAQkqTypf(diQN(_>sU&O4)Kg{Ar0o>--Tz4jhA zK5+AQ!DQ3Y2qT#-SjLcbsxZeVahLnpqnJkAje%>2A6VR|ztm9(ua7lX42uj_DZ72D zVKPR=ivbJjVhfIXa7>PPh4dUoUGgjMCugR^<>{P+q%ASIM|56rJIoj zo^T7a)LGtqU)PY+3}1k&xLseYq_X7cn5lp>Zkl1cc{&+~qFpedzO2H2kQu~}$NuWo zJRUuM3kk;tgxY~qg3o2+41h*(Gd|b`OsFRxWTOiTbzW%&PoGUGPJR=4&LqVhQRA8W zKdThA>D2>&O%csz0#Aj{Pa7EnI1xEP!_KE?h2jwbL_|48!{ulXd?S`!>_OmOXH0}3 zycGI>h*@1hvcR3B9CWee{&$o8?NyT>uk~WtBpTM&)$cV>d`rlKOMiW=OsmiIJNB!p zJy;&_o9wRYV?;z%2avK@@N1zsErh~@Og1F;7v=9rbJayqKL8=UXDH6tmWl<`4JLB* zfEtvtXKJN-e)DcBXM-dX30=DLHum3D?t_>U2Ub<6ou`e$IlCMn%#7EgmJ({aG|2=) zfXJw$UIL{!={?BbM2}Uk@?JN33I`GfPmuXOV{lpP__IPXEhg{*pz^6Gm>sZkqLDyG zgF7*Q40oUcbs17o!Exs46#^4FZikAFZ@q1y4v4PhiPXOs`g64~x>YWr#95iwF?T3j zd;mDwc`ntf$X{2oN;eUW@ORh;S9D%>&F7rX57&CDDx^lm)p_Q!t!`fzEoy;9~3f|CvfigML2xO;85=HwEyQG3}SG6^q+ zB6kI|<(DmL^=Ey4G{UQ0FsfBZ5_0U0`r^$dT=?;3V;Z}R)Wr!$D{W-X!MJ2`l|Krv6L<248iq2pg4~0ctN_1DwPFLy3@Ig;c1X&C(8WZ*D2 z8AHG=99B}{;>e=!tiPoz55Vzu+~aiMn5dCA22lsmYn55!niQ!lkw>*B&wttr>My5a z%YcOKwByEAhMc#>KFNhKt$nRhM&dROUVIm&Fyj*gRqKwM@$=kXM7bRKaPJ=87N7(1 z{CKQl&TmOC0H&1LzBGCPf}0si+yjyo4SDzAbz>wj#>{*_bF$j{o7Xku1ygNV-iVN? zpE)1$hjs}}aNY#0oxskz;BI+uThD+&9I|=tw?*lfn4pADrid|qvCm+YeMy477@K_c z(^ikXk1ezJ(en7~VxLS@8s}^T_jRd7?)3F}jFafrh#>fpGw8O{;q+va?yc=z#Yr^m zWD`UO9Ac6?;NCT5_uKLlOR9$s&}U5QW0NgblTaf%`|Yv z$VKE*_y+zNs(5M7(5znr3d)C^^1lwPe-E1!qw!JzRk99^+l?hjBDVW+?R}Svk0#Gx z&P#e}Es@P7Q>sNLl+vD2o?~cwtp`i4?M?4RCie}`kAMXJYzG>=uY95`M4?ExbkPF@iZmG^5`DRf{#cgDPhjxpoHCt?ujl^oM-w3p%`aib$Kdg=Mkj6%EnB-DIFGW zBXazLRq|9;cR=xL>EC~1V8vf>g`5p;Cg&v0ZL(=;t7aqq8Pfqxt-qMUK0#zl|7ZM+ zf43!#y5s+?jz}U6{zqW6+sR4+_LHO|VYHDE^L-=lR-(FmSeSYpXnPb^IxkKFI99tB z{t~?0111N2O`Ei$>@Zj~{3z!wf0g*6ekc8<8&8djD)@-Ph_)TBA9K?S3!1Q_1ktKc z)75ga101_vk$pSh+NQMYQR%38CJ$<+$$t5?8x%Nh)UD(R)Z^Nt9Z(fO{-y>LJx4qA zsMszF>mh?voxZDrf~j__IM@i+Sg6P~pN4T&(wlx~kKs|M0@tuE$<*KSNJt(>2&NVZ zOyu3p2ETg-$@omQQes8+P*kM>bbq5RN4AD;tdkp~z`&k>B74$Fr2vP@I68Fa4`11v zwSGZRHclH!Y>neCfL7N|_uk)0kd z_OjvINLj*6moWqkl0{dbB85mMj)1OhqqzH8S>stGN;3<9D7f(v*J5>d-78I{+=8zA z@o7pB;Od**_(rHH4!M)i(b)$|MyX%x_(FRIUOfWEr`TwSinAvUDEz>%iV0JA+tv)u zc?^DU;IvI^?yUm^%0Bo?`|Gjcq&=%(NQG|>Acd;UoWj7U3Q#Z<{wn?x)TkGgVPJfdWIQSkYmlgOWjC?LzThV(G-q4FNH0fllqq zsFUH1KNs$MgZT}(=4K(4_&T-;+PFT=8X^Ij{$@39==DXnOe%lceZEi0 z;zt+M97pmy?>Kg^7XF5Hu77kc_x7|#r&&FZT&`LVeYFl18N+%LjYO{*(rhaC(+zM; ztOOq-&O2~EhB$N4i$vwRxKUG6quX?mm-9y*f8PIZHCn_^AVV+#At%gk+01*f@rMIl z!aNkM+nWQV>dsxmcgT)U@FoD4?$|r?YJOCUgyYe&p<_nJ8DY|A$}Ox`84TaymzaBq z=UY-mw5(%aMT_PBB)W7dE-CIPALsKtAs*agAub%-`imXjA96VJ$%%S{4n2DU;Q_H- zpF}vZDxiQ}LDt+~Bpb{L zRNP#yv<0G9z=^lM=yt!9YlO_>ljstNd6YFrMRE@B=zvjK7(3CIX-tV60-Y-l9WaOV z87W8aU&e_YZ*@NLvdYbyFjm)IFH#U+r%1NGMM}Kciu=|qWk|t^v14xBDZ=1-?uODT zbgyzdhoOid@sHBE`58l;o+H`K-XSVT1_{Hj!l>CtHGXTn}lcvu1WUyOo%| zH1^!yvX|PjzFvEsKT!H_Ph!UQig?(mpA>CMXBnZ0d zy~Xb&?YwwT=h`Sw3!C04U+Q62`m39+D!r4xZZJw2N+motujhuY*RwYEne(+gTyoz$ zWp#n}?F~&zZk4~%hrXKph~lA^JIvaOV$3?Xg5vWpmf5C&kqUMNdqcXo)_VZy!x?s$ z<4rm5m9~OUqU)6Mxuq-r1G3i3U{~yp9ASUvxY^>-i6d2qg4-@5UZOT|AyFlghJ``8 zb!8?(;Ztekf6b-YUn$JbXZPKHxA#Ku%S2+vKct2K$c>3Z4+b7o`CtSr0c+B*SP{zr ztJq~`dhEdTe%C`Xb`+^fZ}_~5J7}4T#0_6 z3VQWJX}p0W_`8A%XAJpx{<&{#h}Rm+4#S)BOHCE-+&qb1BHZGj=qkuH)Z`uTcbeKQ zEO_}8FeK0xzy4C1)p{03Z406UaD9?{ZxdHBYV1@|k?Ydg;Xz>-23Gk?LeU3U^DZqJ z>-f=6a}eQ2OC;-+og zYMNqbVsdMw=rHk}oKHye0BST5z9ZJmxmMm5le#U6$F2FUfN3;%!t_(*HZy+NkSA=# z1T_cyV)%GmFJY*pV$5VvlK8L?GYHIA`sc0t`MiHL0g`Rrh@$3_RiL;#)2n+mw!R&N z!MxuqwamhL2RTE^y&rpCHx?hVkk)7~!r2A5`_|PZ%kJ-^WJ{LpVGH4xrtwLbl`LOe)ayOGV$b$ zG*%Z?YO&Zp{{EC{sLH;a2Z;cpyYdlDjgv*O+Fb+5Ai3iFkLNBn1wOWrsmM22aI$m| zu)54syKK}w%VGym#B!~lk@>D^7D{#RKhtTQy@uR)ON8rxG3_6x)w4Ukuiwh|#{IS3 zqP(X589brX|Cjk7o(mzJ<(ELWKV*la$m84Nt~gsNo|~ z2l4St{%Y7ri0Y$D_B+^~k0JDZYt9Q$_71PdQ~mlHw&nBHj}a16D_!=RK&2qX;?5AH z^vdLrdWlYkUF6{@FiiRSn01r%Bw)VEva#`i@lyv}i@!nmOTT>7LDvc>jV~8Z&RenW zW?wn&-s|#zP5Po_I|kW9oODv$0WHyWlTQQ4h2!1!bbsVI%8^*j0z{J_-#7*B$~Y=4 zK;Igyp-(Cqy30puyg*GomQ#5{b=_w5m%EG*-c2pL86FPw`gi1Rx~+jSDHs=Fc4i44_YswkJBT`LRZcL&HbOKNNTlG@H?V@m^gro}b+vh904b z*V5>02GA}MFEYQob^%luH6J+i&(QM5Mwh5d4;~(C6Kw~U6SmHjdwPD;w=Yq?fK@FTH#e6k@cMDp? zlYA-fQjgvBZ&*xUnZ4{z-S+?(IFeAeohL-;z4z+@K_&KFr#GqrF0t&6o2tjUeOC+* zb&qnHqmRSqA~NJi<%#r-5M9Jk)wbVPYiTceIsA&h0wxv$Bf(Q{6!6Cvon!p5xcu;y z2e?6W&b?%h!9?ATxDw!Pw+qd~%I^E!I1*aNQD0zdf9@z+1y&g0`8Dn z^t}5wE{TAHbLTVW8nOTLBqa9v`I&HT7FR{o$P&=+?vb)>VQ{g*=X|r_{pj$`=ATEX zPe|_Jy(eR5F}#mK?8msJzU*9TJLIees6Tqg(YUe$&F z(|^M#5RO{yBa*v+$k>uP%IJaulW*f|fzO#;ze+t;-LyK7&Do-rvU!IHYC3ttZetjK z;x#1n%9gMqU?TA~5J11vGgB>04fCQQx_2x=vt{34c~k5sWFZ;5U+zi-TBOESsr0?2 zTHyjHn8bC~=&N}UjopOMT;()9$<5QuW2X=E(9m+m0nn$z88qpW{zCBpBGGS`qAjJ{ zmb+p|q(!LwBd4%Z021a@tJr?-IsIZcRG{*)Q_m+CHqQ5p(HgRT4@7}k2x(NKhr^#w zz8JjRFZo|n=wL_4850z?lMpqOZ^mfP&5z%N0AxC?8<{PRj$88dLS1f9HTZe&tP#iXcn@WmaQK5dh8MheZV-Tj>rGpum=0t%Nl8E z!a>u+34o27EM#S6+Cm~Vn|8ROHEdU>hHcF|#>vstK{~s5f&Y*1d(H*D62!)-p&vW`8dr1+EvY`WTNc^bMg(A_4Sp89u*^-OG?vfZ z8iauR(%;=FKbcS3Rb6YtnF_L+9x`YWp@5ZG4RnT0rj7>-1IJ7TSU3r7V?9Sj@YD%9 zlHetQsqiQPE58aSKW^AD|_*#-jXyXeBI_1+hSokd7uF8S9!d;j^c z@E>7!SnpPNZ&YmMX6Vb0U1~fsku4%_UqJF@BAu0LDWV?V&9T}-=rkiuSXZ>Ii z4_D!JG)}BKJ-kUI4IiR;)ip|wanLX;o$$!DR;cpopvZn+e~a5b`_$x@o~b_^Hhd0kLs>h-+iFCW zAg|}_i}kaFm}CCUcnZC=j-lGsvD}KY{X%?5*iRiQaY%<1giNU$_bRs2${BjmD)CHR zwG7S};Dgl|ao4nn3$fj3Vu{v&$D3B>yLSgR#F z$=I7dFrig;tVFj*&jkQ>wV1<+v0Kov=wE*uo2q~n{$udg#QX^=!Z0*xUvmkZ$ksh- z{R>;u8|*Xl=Mv$aKTOE{AhU~+jeFaO`7}%4g2rY#;x{h7qSmdIB8gO5>&GKA$~Aec z(E~2O+gUYQ>pHCS-Bf`*h`vH(j?O@ZC=Vvb8PgngyT?JdxKF=a285mFL-$T*V*Q_E zdtxn=M7JZ?n&+SYjh7)T1XL|Ih$zDjD@SG4I$1?7nK*3pJU)%GT|J1>psULW$~#AV z`*QU4KYH5_2r562SbJWUJcrEvyU{Sx^W4N0iIz?S1!RjuryTXx{Y0SdKeMipEw#+EJ%&jlR5m(!~OJ$XEZz>8Oe=GbCFoW5ZElOf4{G@-EbajNAcY_=4w*$ z%OG-G#E&fm%(ng>lAg8(GF7#>5<(q2`bH~CKh$I}-fvG04ET(-MnwOxEjgPHgcUY z{%vBQxF9EW7SH`^&>Ov&jW%rVqYP;tG@`*^_A$cWQ(QZ81sm6U&f|mo0xGcssS1a_ zvJ+KLQ)qAS@fV9sSyGRLjfF@pLM=u5js6w zWQ~kVq$&EZ39lh@npxsjMIMu&S*zBJm%p3)8o%nH=yRZ87`4#3%k8u*jG5*n0t;X* z1{=Uy*XylkDwHZsw_21qEPT@EH|iPN{>yisPW?V&OH%VPKrDI&)Lxd6T2@%@cQb4=glZ$(C|(((_ZYShXU5Rcqqo(pILykQ#JdvtJ%xKktf`7$Wh0}?w9s-xff zO5EGD2s6{8GN7=grrVy1wgOR);vbpeJD680==qu&#c~(KM4J^8OJ{$*Q zp(w1%u`dRj)3I}TfF}^+J#`p^te<_kUr+tC$4<)DM#uQrX7GE|dQ3w5!-m4fk4|KQ ziNaC72SwiCs9#N=S19n=h5|8a>j^r{f~7;|V>7ie=yt=3WCpB`m2#-QavFr_N=wb# zrJ0Z~Nq^ZBtk3bo7QQ1VfJ4005gt@0L8W&ivLRXq)L#vGZVEf+9i_L!6Zi(<#v$w( z)%17AT$TyM*(PO->YdvMpy*E4UuM=5X#D`Ld&txE&Ncg-d`Ww_j;>R9S~B3-m)gNJ z0;Ty5(QSOByA+zF;fEg1Al zAIEG=UK5N5{Wr^?PCJVb!ndo|zW<~PQ^xdm)bfdTdz@j)W5xshH6B-^Ma~txA6{{{ zMXq$i4RjOs3nYFQsWUt@PsD39YL(kGIm+*rIM)I*e;g1v- zz#n9Iy3Hahfi86{TK>KLBBuxg+xJjH(VB}h0UTWiNGz`fG2Ax~UrXHpWU?t$C!ep99iVPr1-#I2cGIw@`wpWW5^aVI&uDRY`f3L^)Qr!0ZT_Dk6vH|9MJ>xH6@b?1mo zQp`e^av!$FWtb%>8#byiq7m`)!CcvY+*|eK>Rk<)uWX(!5{EL#8%be_!8>*Qd&K_uXz{b2UOiT#U<(Vwd%;DG z&#oENqc6Un^*?=SePF@aPv_uq;9^<>L* z<5T!SMC*GHmCUHp8sKK{UiH&X>`g&9X#;eS4?aE+15gMLEfE3GVdtH}#&I zpAYF@0X8w(l#O`@xIkTETSvysa*$Ho_u~-R`%$v1@IP{$DV;%608`kSiMLzChJt%J zDju8NFb}kRfPWKXYOU+n+i+0zJtZ9-g(>bP!HeD|(bH{*c0V|s8u8)q#S=xOhhvVM z<7an+14q2YK&&G@I*#|IA~{9ymgJ zz|pqeO!N%Nd#UQ_@DDk3mP63Ik!e#>qQfpYyC3iq+$jtL)lZp<$cCI}y?oT9<-c8$`HZo9qVwwM&ua zVm@EVpibR;^4}mrtdGCT&k+&lWNd$P?xLtv@RSOjy|L58pe%W-g_aKe1A!c>L`&R|Q_3EakDtk%5wuKxl#=KTeLjg3nCCKp;KVLr#aPG=K1 zjgA|?`R=xpO~L2bl-`8KgfnO#Whm4^=(!oemSfmq?m1M0i9h5-3mItWad@TmX_x;V z%dcZAQm~%XNn7LEW2tD1iAhl(B|Lah{>3UA=K9HvgZEi+Kafz`n#!;lJ2Glq%9i>s z*5UB{``UFdiOYdbh~RH*jz}bF8miNT?g$|?++oR=j)Yf|XP2_=_6xOzrZZXQej7i8 z+BdOj*pdMH{-Tv6+(_2jL0_+qX)ewX4FzBk!Yc6!K!04|4Jo09Lru7P*lYOsuGUwQ z(YjI^0BRGVmP$PGYC<(xn$$$1RVC-d{fLS=m0CnHsHI9+NF0dCW(O5iBe~0!<-=Ot z4|Ym9LEVTwc5{^_ing1hYeF=(O=rOY^2OB2JsrysGlTeWl7zvDCol!mGm&jZpV(N= zMHJ3Ogutm$4(es=+~ari_<<2wG4RV0X1c@dlk8!c&lCm1V?3x!V1$l9JmRmSIJu7K z(^af)ADBAo%riOP;2T{j0l`W*Fnm$41DS6|=H$xM^WSV!DH;#iRtAKk28PDsrH<8! zVRsk_JEe)LhvHrYw%c^?3J1ng3GlG7&NK!Lv~>j7OSzMoCT%in6H>SBGm)5M4(Bf5 z+c*;lQ>|X^HeOt-8+M{#9Rqya9>#xXI7hO^`k!jEWq1t9R|tOgf|d5p{7(DymByZro2LHTlt96+c^M;XX!G@v0pBwA;sw<*3N9+BZ7{LV4L>a=KEOj5x?9XWrsW z2{rEVOljZ%FNaR6Scez8b^rU$aKnf2@-T_E6}<4g%6bCpJL<+)A9UT*J}bb_bDbHP z?(y;RIq}v4jUT8hkjT8JN}~W!Z*JT|t?sgHTISngYvsUwvYUdqrV;fQ!$N)*XAwU_ zvI+A+N|hCW(EMPdn6pXXXpFQ84hNFqm+{{!B9>~g!7%8fB^2%@UM>`pCOdXH&AD8~ zM;*#x@j)! zYt5?%>I(U4!U}|{+C8h*^2M`_?k$M4VGZ8pl7Hjji>dCWFVGly(^YM+5uh{Zh9tNt zpAl^Z>tKIx6WKL@bt>xfZ9p=nM6o^<$|Q?2A0oQL#dlt?ak%)3Nv6$D$A6e$b1%oU=XZQcYIKJob-AuzY4>>R5`tyfuQAB zv133D?dt+K&6i1*#4zqR4PaT#i_9s_0;DwfM{k~WlktQ-j-6*}B0MJkTpTHowI$ln z2(la3U<=6M`DO1)wX4?SURqtRb%qCy2#=&#Y~y*QWSFnHJx1{<;`0(#EN3)oX0L;V z&|_&=u0$JiKJvyp&$;t;@DAi34pWmaHuKlivQsMu$G4jnC3qNZIoW>S1Vus%Fnt;Ft{Zrzr+#&3eDYHy={&0vzAAB;rI_wcRpErlU z5jhknU+Ct#e|{mr^9?o@P`CshwVRYk5D}B<4L2iM{JBjRPYy zK0jcO9VdSYXL(%}*HQGjH+=?PJ#8pyh79`sEze!T6r>XEVVY{g5oizuRXcIXoQkX2 z00*$8I(SG#OIzZ*8clhUsaP_s;!UX|P+crO1rbKtKKjVOz2-!;A5g32n`$zz*U@%F zZyq-x2XZcJM*Rhgkg!lOKoElb`c@tr0LHSyYO64c_BlisU&)+C%8s20E;Pc0 zYuOlbBB$Rg0)CD>&-iF57O%2e_Muh7IKNyyJC)4AX~H^5J5CE9Own5#uE3nt9sXCx z?=kAoC>1EC4;lZqkye5g4gR>)Ok+PTrHNwG3)n&MX0TT4$suu20_7Ntl|4tc zh|KcM3j$lQSh7>b0%RF-ifacLDOeFS#NOquW=_?p7Re3?KJQjScOe4QG9r&gUYVp# zFKbLV&u%YNY}#&yox9mRT?!oJ{5!tTSD){LTeSTY?>~pzNRH}XnxC}(X%XCZhJ4N-yFRww7~pu!*HaA+Hd2?pzG)0M;~rK>_i;OH~x##ZG9+o z&+C=Y0@Nsw!Y0o%lkpoveXq>Gg){Ty(ZQ^D3PQtLd+|7yDLi&H&rBQ>BbOB_Ok`XA zP~6nM+M7mg$&RQxs~;yQs?mH?B?voF=72_6v5%9^^y8`bGXu~Ny};n9NqjwPfSZXO zJ5>SQ=OpQaEY|dXu!uo(+kM&niC{fE1oDK$zNzE8M-aVX%ZWu^!PdAW5Ty?pVBg^z z+|ZAi9?13i>QvFvr=gfRq;;VbsSR7cA0HxLG>_k${XpQy1N{KeR+K7htli@3h*iI* zKV@4C@@CBQG1-AO+Ko0)4uhjTK<}hXkocF5_`6#PWfa3b*xf;&2V<(i+SFf%P48e( zkg<>JR2|0)Xr0!^t93+XT$QtdKg`jsS|KN4lfkr9-$bb`YFbUML}dw?E8_!Zk4-BK zk~*Yk-O&<(B1Dg&8gG@fxExc=5ypZ3_no>O#~UOER%kPy+dMc+M^`a5M|yC>gxzIk zy2NV83%!zqQ*;752T?qk*+X`cY+Kw&!(@SDZueTJv&{Ym9>tE}#dD1>vlfQtZmi4z zA$-8#F=41L=9|!rV;G>Uip~%$n54Z)kWI8^TMHciOA>1L7H5Kz=ky6q6hdsV$t!B{ zmG_e8f#q4N)xAGMIHPh69!yfxClzQzM$hE#egG7>(pq`7 zRlB&DM(Ud*l4CoqN%zL`bM~s%g-xR zw`v(btL41iu-zz%$1IpoJhZ7b5+eXzcp;j?sYp2oeDQ(u4Ad^zHF6eaoEM1!GKLXN zYMncP9uVMTyR`z|D%QqNto{3L`5#p7ms~&T6Vslpqov=Wp#j`)ah8?N;yNV<_c(GL zZ({COfr8Q4c@f&1#L;igxs>I0^vs|{YReX*8sSb74Zn=6>6@OZOk+z2NP%Nm#kO{# z+yywCWP!(fNs(5rtEn+Llg#SF_DB^2NM^0xF{L8k2#1+fU%{Z`9}EfLnSW^`jWRne zP28kZpW%xo)DD6$Ow(5n0bW~1In2M>AQAyBIOKOL;I)>{8*CYTf2>PexWt+XxeFoA zkXY*9>c{IMgqhZYZf*?z9qvzN;j?A;ujrifk+WF!TT$cMr-y|P^Z$926OQaf7VTi4?6F^n zG4>8+KTc6EtgRLG^BmBWCZzki(SMN(vTa~Yr~dm;89z+F9Zb$+_rHWbt0ZA(^*ynGH$Vn zX@bR3a&sa13Q9;fPaeKu{EdxRws0%O>?0;p@B>6)5>UPB6w5Ct(lcYlwIV=Qb3jOyX*En&GPA;Z(+qf<*g(yre2vtlg!`?>i&@(i^Eb+5c-&_?#l;I72E-nn z`EcCpW5*TE5i_6~rG%7R8L437-$v*Z-$xW(XoDT;Z-OjFj%kTq{9~DnCc~OCg%5(C zxAkB>$E&EFKutHc$JFir`zdD1;z(pzm<_^FG2{;$EmwX$tgb$(Bwe$)cCY*URkw1X zp!qdaui09^cys23`CAMW=^$ww@}58Uq7FI9#wo& z;12bO;o|Sv>U<;R#~FIaVyiBR@DgxYU1yObCK`jv4m!3U>Z`{1CsP!M6j(}go!H9% zhrhe}ZaMr$QODGdSg!SgU)-OBZ zvBaNCtg^vbOkR~$r(MQ&q`^n}!$ZwHH`GKrV~w}uL=G>PJ&}-SphHwFQ<~IJL`#N1 zJx>44_Dg^B8Dro-wZ{~=Cynk>bMf*x`LeXO>%}z{b(?!}y0S=esQvMjZZbODl}>nb+&DEDNuNh}LM^a)$tyy#CGZWWEGw>`kPO;SO9Vne z=m@Y5uoTODtJo`S)YXIgv4R)z*8yQvD)!WsIoxtR3Yqc?10MvIek z{cv$&{uCyZS#Z!i?aaW>AhzChsnq(goq&OmS2Mj)k)m?|69r}+W8++c)TDj?-2>FfYkcz7|!LV}s(=AZtq63v`LswDH-FV7=Mia8B$#C$UQ%ALh@-&80Rm*XTz z$l(GVf!uH@>DE>s%l;h1OwHO5g0O>-FeQo=HW;V>wcJwl}bdad}b zy%|kA9nOz(lt@{v6d#BvVl$H;NaybQrM*_=jbwYpAOZGpoki+;t~+beUgHQz^KDp{ zZ@otS@JMi9jh6MiF`19!N?!n$EU=$I=l4vj>gJfnYLG#B_g%Jq(=)_&^)Hr|pwCL0 zNJkF4SdAIiGK`8nI zF?JAyUNz8nsK(5{z6e`Io)fdi#HzScnt*Ru8_ z({243?VGyBMU&!&CMm>iOjBuAV+d3oy!o-Oq}*?URaw)m)$HQEU@F^iy7TAzb+pPn zo1A06sgCgpmwRKT2Gqs;2=fuGqtV;CRNCb8?1?~@C|i@`Qsq%+6NScwiE54N>#8ZA zsTMZPlEtCp^q14AOhW-VDmWIkkA9ii7G@+g)6LyYLxNCC=MZ zsO%o3r)J_202U_9VKvC78i`aiZ$|5&+_ZCVnqKfL>{xAyG0;#;3Hp{;FX>)9)u?sx zEv_m#qwv?wDR47$u8XF&?aZYlFVZdKIJmWe*ESj64lILCT8Az*A! zM?&-YevqwovREGI9h3Q-^qPpIv3u4oi2wbCK@)={1%eVO$NTBwW?MNN7Jykjo`HII zD9IVmpNW!WLaBYP_;9*&luY?lkE`dn2IJW=^G>OpgYxbG*mpx`anSH zs2oGC`qKEKgQR~}jZ^kt1*@eBtjy~aVQoKoIp$ocG@Z6dO zjEfWZH!Aq-SPRIuX$5c31wH@9F{_HhZDZxv88HWm-(yLD@%IL(757L{@HP(Xb@uP_V-Q%?CwTUxUJpS;b7V(@S`b9=5$`O$_jn-sxSin{FF!|$}BcP z5TcT!JTfP+1I5UWoPz3L)?(1g4v7ir(|((Xnejsmb7H!@n~2W#JSFK!B=cOr;>|nX zl5hkxS4^>52N&s!BFSyaGKzgMvU#tSh|Vxq`CjQ_9@$cDk6+&*06j1!oZr#9*TWA&TYHcYt=Y>s@C`HI*D#y zC=}cf`Zxu4eWiKwE>F)->ldnIbzO3*ygbQK#fJHAA8au8BjWuZx1jT_-P~mU-wl5F zAK&7L$X{g6xoibFJl(S%-4x8O;nrsPEg8T>4N28f#r|ErdFj0iUZ=bSdtLI*~4FpH*q`Ca`zoX$ON%Rf}Vy_&7vDslBfP? z(3lQED}BNu2H1>d?|iit=)e@0=Z&zeB_rj0+(^i9iY``F^#Y-dU zq9Z{3QADK1eKA;$;?Wp{cjHzL8~7@H!Z^8VIPB(mi&?x zRjIrJTv$c$NH;a(?@Wj5huc1jCi0;N^$nL72;&7kERk`7dqiUU%hlh@rI{Mvm;?*9 zq-^LpQFFZGuFECeR+6LajStU{{0zQ!|r)W2gbBMAJ&3)HKR>!6ug^mW0ZcW*Zz02?b1b<$|h{_P$`P5 z`JYQ{zPypg=k*Hu9)k3hn+^S}j@@g+$j{Wo?te}XEqPnGJWf~mZosBRl^yhCKlb}9 zi4epm#TdU}VZ|FEBuI_Jt=*wJqG8Fn!(pWzDg~%-6RguyV0=Uhc(dZ_AYfmMiVt@I zw}#9vyDs4he~iEun1~EQW;{fRKygLfPOY+`VYGw0RNJ8#@@5_hAg#kR!Z=10K{$Z- z?bQXAGH;cPs!)E)thD{o%cz!O0e5h}migt)BT5!R{|FKxFb+>IjcXJ>GYD>{3?vod z^mkP3O9nSyS1d2g?oQ~5R>%~1z#EjPXdDdueqoB%u|z!5~%`w=S*JvLeo$xp4h zsauiE;L4KNs#VW@wja z!*4D1a7u}JFC}t?A5l}CD?i>!SgJTOo4YxTJS_Pj{}@kJzE`db zd``~h2hc1|-S|^&+Ks2eBl)dZOb%+qu@$(?dx6*WgsaW;JEk(`%m2>B4E!Plu4hwZ zT^D& zLk~SXC_U3htS&15&s6}dKy#i0s^c{vTkRLP-3J)njdGc;J>Ef6&sMOr!>(zFn^iso@&|Yc|U{q61Ms+PGv70iUyuL zzLARh20Sq|``AMMuyT2<=oe%SDmMAxoede`-QvZENC%tne8@r9!bcw2RZY504&rs*MX76yh75Z+*MGh%Dlfb^C!HIWN9GoB zq#P)=Hb1)fa6W=n4^zj{0uSELk?Xd@KF!zBwDEN}XwbHtF1Zn9UyCU>xDURj6gTGa z(?TRMli~#v7NVIG{i=KuS0_WHwpuanTUZBm;`nw{lJUrs-Z_wtIbA&hj;dLt$fl-o z@5l3a{Wy31Q>Zyt`{Q?s|3E%dH$rU5!eb@{(voVkD%^MVT~l8+yq+6CSyE=!b#HA< zs(jQ0*1cqtD>PzBLjI;dZd42U5V%M&99vBseOA7Tg_x~YVUtghs3(nQMO42_D|EPE zv)LjNVHVXeNf;v>caiWWQ{WFvj!UKCKQhL+%6n9p6La25C*xv0a~t7y_4=-IgQ~x4 zt5RH{7u=Y$ju5%Nm{gB2^??aECsIVeq!cNobZzB5a}Oq7!yTv%E^h-KSJ8_u8*qHx z=4eavb(l1>UBykrdxMmGum)Y@FhJF;FeG0|-rLx;xv;q|vO6QCX-I)v>?DXq<=I>_ z9W+}B?3`{k9cPllp*TxN88L ze)b+<{e7F*A%Z8O4E&^+AyGG{;dL}20?CaBJmQ!tVXr~}@K`s$)X8!d`d;jtQB|dx z33Vz4#k&?MxDsAOV+;mH1nN*h_#MXZ9+I?1oP25?t~9oBSk9lh`OF zIkoK~e`gxRpWLz}36Vx2PGg%gSh&L!{x*ye@QydUSaT##cEZ>;Gl@i`*jxgaXAQ&Z z-|by|7{u`R87milp<+>NT1tKy5rxNEoSw%{us$B5jqM1R@YDAt^gA$lxSslyqHj$n z28*KznicebVSoYLb#*>;Eu#AK9TKRB3v!AmXUZ3Be+Ab-T z>O`Y}v7G%>VYA#vC69@rPGr3q4(g|O9JY2CyF^@tDfK_XRy2ksQY7b>TDXJPq)$W*QY5;MO(G`Ds02dN~QW4}qY?hp#5!=;k-gz<+KIFW+$5E+UUMyITFt(#0XyyOl-N=3uhi z;Y^D>BA301__3M}Y7F2~^09ggJ`njJSZVlKGRD~7$qn`)>66|u-3?X8aQ$`?2e_7Z z%B})OX|x+hHwM_t^F91x75wWF$Ih}9CUUIUjhP**AHFgG5vwvuug6{twH0qMl}GxJ zbYezB(LyW8rkQH(gWz7+UcZc;A+Lt83^e>wfmgg;GJ!4nE$2j(r}(aM;0{pnjr#69 zb1^maJVfI+iUi4qC^ZDut-(*ne5C7sn|t9=ktD=SUu_xg4{D|juEkgU~{0(rbioT1U9O%_T zvriZ;MRMUv?$3d(c(NSEcLT8PSB+o3@Y$fgVc8_QD1GW*prLQunkIRv+8L6;pqNTWJ_U zh7ZWO=TsIAY!1+XJ4p)D+w!2O-wTZ9tX4qg>Sav81HZFHdXHe~i^1SNBH`<3Rdb9C ze$zt54$ldnGv9j|M0QU>yiV;%81R-)<{0M>R)qLDHW*GM&K7}XJ3uP5NfQCf3?)NF zuUY)AOg^C0O>S<=Z2rs@y=bNQIFch=KKq&=oF%$jwYw34O1=O~%jfyEL~?ye>Nv7iYEuLJ=I4o%HfXz@I0by= ze91TX1ikZ6*8QQZ>|~38MQKRDpDWS88w9kiMvRhPO?7Ss-TA-YM&fSJ3lrCa{1FI5 z^^)Qr>SU`Zee?YXKTnzibqJ1PJ1uHnxB5CWqU$^jB>EiRy=R2QT>EhBX7t5*)_ZP- z39LSna?Fr)h?iXHW)FS?pmvZPWx8hm5kGW(fR(voC0Eg0z@ zD@I})2MJ25^G69@+J|7nwp@1XDVQ1p zOox1|SX_LlY1!!Cx)p(i@*M^74#Fl}OksceI81@*tQ@wdS;)|(F@L-CmopLeqxj7n zB8%g>7nJ$F|96Qv{)h^Y$hnT?wLN>zfQAndOg``DeVDxQn-Vyx%HZ-aSoyI`%Hi(H zni(vm3Sd6|NFbQzFha5EAeoD(+cJ0ta!bH%nZmN!)Z%bx_FNS={(QHesK2bh zFEyo1x4^+!23CQ=r>*z^M<4YkLN2cl#N;}_M|Yq??*>}xsKvc|9|4W)9Z7wZzH-a3 z&SOXln3HD=SI1&!7huLA`>A2)PWuth_YeJ#O@oHQc%N?~KX2r@h3@-$Tto8jC_dmN z(fCKbSa!Rn>y7PrD!Q}~Tq)gFF&Qd0fBY$dB-oA78IpIYb(PI{buA+_o4x>-IH5&w5{ z8J@ozb4tmhA8Vkm5~rGP-gc{RIRFn`>rDCBqd#&8OgK0tSRqxcwIeJ6)yPpouLV{yPFY3rib`HIcB+{Gg zVl_WBzD{qqx`Id?M}s^7F1WE@V{MI6X8W1 zfp@B%tNYtwH4+~7W0sXp`1_XNPLdsn7n}s zU<=^RNAIncz@hlNw*$uP{l@Zm5;zWr@e!bV=7hr9&w}xbdP!L9g;%J6+!-&!F z+w(rZC*Jq$kKKRWJ7?!U-}`f2pDO^jI0e))i%fB#O`cRxz@;AB<8;9+5bwynhkS+e zxi^rZYf`pDCCApee#txTgyS0jhbPMEIs8DxbK1kC^o#3>-k$8SiwO#3O;o6RxeXKEcqe z-EP;NeU0Y;r5j_k){{s*rtyzqW?h9Y#9FUW%HDu)yhksc@Ix%$fuldD>#d#BerL3Q zN=y}_$*`+i+>9cz9gyP@p9EWJI}>oFGrQm5-Dmn^tGiiK0AR*-ne&bXPcbQ$$vrS5 zdMI1Py%$|&L8ILcqK@sI#8SWfuK|&?Y;`6+Jywmy;E<(;p*BJbHaAc+Ni6C4ekRji zw!Do3vv!>j!54%nqeekrPFc6>@YyfORou55J!=En8}K4ECZ+(`(Ga~+6+7M4%)WhZ zP@GMojy9%dAMC&L)a`P-IFRHOJ&9Rbncs~qx-@YaW$WvewK*B^TvIHE6#J;)UU35+A4FG5h`gSoJ+me^U|(w zGB)`1j-U&%w0U+${9|26iqM;Xq!x{EN!fwg5>FcM9QgI(*IVx+}L+ zJ8Mu>vj6ktMU@5f)V!ZHlXwyD%+C;!NMbGqjQHG#%E*8o6E%L@{92ls5CqMxo|F)y zwOf24&y2N;ksk|V;+=7sVl8?o*}p##^|$?Xj&-lPs)nt5>2^84P%!);>`70eWBoS# zo{J#%ET=~GI`w@~wD&=yr;{`+cb_&%qTW>wVkV>WsSKDp`unDof(`T6yYaa=y&s#} zWz_wiDY(W>>o_NShuf2}wDEc>WP0LSXjOi%9e(3HzrF!gig=4em|nA;nUUWTpu&}3 zUtj8Jl8+<@UROLhf0L;9h)0Rax{d-JKN6yS>LewHigfP{efq;zB4dRrVMb1NHZ`C9 zO;q*1#L;ZEhzY`P%~aJ(WfD8v zAq)*-v|M*F@2J$W?U@ENl=HA%8*Z`FfBqPat186{>T^9*qF{~BMZ6<-5}LQyg}9GY zDss@@JP8oDx@#C)5Qk%syZtn%FGgc(a$2#{BR8US;_GKG)d$QFRtOJ-x$toTFT(kn z%~==Or!)1JZQVhH-nx|7)l!c8OTu6q_yw0#vLXA}cgoGqz-jV6cj3$0=8>$#s~igi zG8iWEBsBRqmseV(iX0BBr)FMllSUl!s@V-q4nF*QX)P5uP^?yXISJ9b7VB>&9c5@trarMcF9-q{LR?Ku+Wpe)ZN7x zz#X+$i7mJ@=6?3SK8N49YXZ>AGMfbvc1Isd(f%*)9+K@c=MrjlzXd$BA$bnc$V@ym zO$9yyv$HWH20^HXK)e7=CUuXalZ1-bGdw&Zjv3k7VCjrXh5yIfy zjQ4F~m_QY{vuXD8fc9=1^&9{x@aLAs_e#o+9*HEE`MQpg$7%tkueny5V68DGU2&T5ffNU>4e#m9{v(L)bQDn-tDdHfIBrqZb{wioTp$Csm zEd{6I(xNL5j|yijY!6_Sk0Y)9RA7hHs4=gT`k^?ljo`Bv#v%;EVBb|v*K~UpVD+j2 zKM(*58%H8S4DlKy11nEg%)F#n3gpZ(-3PbbhV1_|`5H*Z=>U2N!Rro|My3*a0J(7s z)GE=JQ#1B)Gl8)-RS25$=gc9Kfj9&fdSfko47}9vtmkqMH zX>9|Vm5`noUm(;DK*1wW=~1!E`Ai%_OqVPtb&5Irr)~9DO)J4hz6Gp4D)Q1;5G>|w zK$g<-je0jUc=WpRMG&i`I!SU^L7eB-Z_{r*h(s2%;GlJhHUSEokX47hUA%n77+8zY za(Kk-N`3x?jztLiCwkh@+ir6_efo@Bhja%eM^R!K^t@uny33M47=o>GUz)g5KDdK# zC;a!=5NrjihMtzMWX+8Az32*da$#Kj=prCRpOekpJ7%*{mpl_H+{7kt`~y#(MCHd% zj9QhA^;PajpT125NATg}hLPJ9`I?LA%%Yd&x97MnI%yHpVY4ach5CP9&BTYuD-q=^ zD)L9wk_=4*x@@TG@TZlYd`GU))od9*v)jqaXvKPzYtOB&Z83RH(<#apA0QxaFJ%<| zb3K%c!PuL)n;{HRzwG54mn55LE!~CN`VjJ!^l`~~0F~oO;boc>0)d*vRP_nxS#nC3A&NoN*7(wxGU*uquM~${QoOk-t{gWfmsn6x~ARd?q zMCRBazRAsd_}5uuEx?1U8+>tY)}3XB+jyvjhW)IlmC^?&qTa~wGs)W50acsN%In;e zYHJ=%&1enr3-t1C_3=|ntzVStwSpD71RS4ZN zn>~?f-*@Ze-XBxK>SPbvFJzmZwTG}l7*aPVt_E{=DO+&5D9vKYLDmny@Exs>ozHC` zT&u)xE!^N;JHJK+jQGwNdn3#1uUqNKRK zQss?WwARzN1FAkW!gg+#!ZaBpKcPNdzKyp^sYdA<5xQ&jQUe~4*1>O{$JQP0oB5Jc z)%lAw7?ZMR*xX_V<81SKy&t@Yh9!AqVr~U9bHhGgU@7-i-m0B&h)R$pp$+sGO+b=l zGtxnqw*zbU&8v0e-bxT|)eleq^2rN*^Oqipk{@+s`N$d7->!sNJ719QHnNZa@Vd!;1{@F&;e;?0@ya{u!(Y z?430Pwg)$&fN|^iyC=Ux!s_#ISO(?reX#Ubs;MOtY^^Lvyp1DBLuZ}tkW!w)UF zzVFE?kHuqzrn`jwT&sF z_7MTix8V}2@n8ZOUVU##leCdd;tLWz`fBtv=e{&S=x@HO8VCE-u(9{T)A|5sv*A^f z#OnJ@lrn{M;E<`&U|gWS^$B$5@*C<`o->GK%{1AU5FR1kBD{p>BQ4$AZ_qKuQtZ6b zysBggqC82lfUP->9ei^dcv3S2ecnn29D&rej4Ytm2$_O5<@a!I!uCtc!ruFoJ^qR- z_F6qn>X@#(W(@Znmu^_sCQ^?` zIooaydPna!wCCRkNBZ$z5l|K;shxYdC`NK(=nj%m-FLP_SQUm38*%N$w29INTmy6@ zKbL)z!~d+jyPf;R`d{rjIz#TV$_rLj18XHGd+qDwr5{r^bal^Xrd@0>5?F#Er!38N z5K|U>9=S&N(SQVpgi1W+F8-yP;!4M71b{?<)L@u=~TKe>kjN) zjbW+hwxgF4`>Mke{Y&2ytjCSs|01_M5)5P`3tDf`oVoMPik4iH&%k{Unwko&Y~Q%> zQ_2ARaR1<8P~{b$-)PrWGU&!wx6i#il`UxQ+`FN2avV==>C3lEo4Bga zTPe@fJKIfrlB{H&29ex}Rxk?@fu^shj~uV1)>$1AkN^{uT}BZG{F4*hPXwL!>X6s+ zSKZSH#lM2gX2RDdSIG)w;2@VF=TDZ;t%u3EuZmq;_EiuvZqb07Bt z5J6U5!fdk7$OA|kB&5Cw_4lL+`WS~Nbppx}?9lW=Rlrx6q0PeiUxsPnv`(feV1*>v zQn}|FYJf;hL05Zr#h(;K;&E@03&28h93ZzF!74n^%Lu4MgGL9;kb-Pf-F%!Z^zVIe zNNElqay=Bs9e1YQbg_!@gdtQhplzH6Y^l&^3xI9^i?=VVxrhGHGs|v)yg<+)F(!b) z&R17w=Z~j&)52Lz08iJP)Ril(HOb4mV{{HeI$UejWlxJoWCoPLY#jIuXZ5#)SK}*< zSg=JwqbEbxQWVBg$vct@nF|Bi8OR!hhn$)v`iZ&ixDVr)jy<$<30dPY{`tUw=#R_sM`**xes{z-7i5=&NJQFx)eQN5uLYy-JHPY z@uW7XTg#7lUUQ|+IWD+oHLsjMzGN%#JNuzPhASJ)%UnNg#5kTINm)^I+Z_nx1Xh(G zR~6Yco|Qc!e30}J@ZOi23-E<5kT5ejqP19YibCa7lVw%_NbihY4zaCj=r0tC^b>DQ zJw2<;FAK)xbr28J$>_|W%cwxd(GDG&44Xn1mR~|Mz%;+?WR>p6RI|#xEMp-4{IytZ z{X}#8eXc_z=h95-svWdQ>$7i7##Wr-`oYmH#p-0DO-_gFLBjTajPmUcy#J?rptGR0 zD6d>Jc|z8e&NSm4<8-CHR|q0|1S00Rs(m&+yue>GYx*BdX2la$KPJnY2i@LkIcaOR z3vlx1(64Eq?jt|7ncZj~C&uU0|2_B^H#Q`_Te z#Hp?H&R~ngR?k>s?5K+?KPgxUh)`no>9%Q8z;hS+S!Q+JoteDD#*CX6hZ6TT z7N6eHc6si#`vn&Nc<-jnamg_Vh6PSva8S%t4PcTw`3 z!g5Qw^orlkp60#W`@hn$0#QqM4>^ZD%>SKemCURf(4*P{ zJQ)bV^TByKG!YMGwd#gm^`IIRVHX;}vIF)23L-6HTDgoGH z36MlKEXi_Q+v#AM_!kghZlBR=uHObY#9h70t6=*03=5~oM0NDmTAn^(l5^|?n=J8J zvWd8wF|PP^i2>hfH%d5*w97FUP9$B%GqmtuwK)9X>{RUFk2vm!k9px2kUaeG_67L0 zeL;JlRSet#ct3qcwMe+hjV19BDIl{Rw@xn$u$)vtTaq z?mA-UJj(lGysl-LuuO-=*bmc2_YSiHGb6;z3>~;tWPw!QslqnG2FShG-1l?5|pW-KKhCTaX7hG~EPDk>CeNO5&VZ4vr|u z1lb)#$iNP)?VOXibpOG7o8%P@SDT&fgJIbjDAU z6bry+Q26@z^vTg{a=vEraHY_b|aTsU+L7x!tch? zb9FK|p+|qIq=lkDtI^VrR-S4gR(I3x@uGrqDvw@nCOq0+q&DGZwW}s}CMSg3^(yxO zJKGxW#JOU{vpaQXT5E4Qxy+N85qHb*)(u2$YdDp8V#i=~%9qIDJ}Ikj!dJN=y$0y$ zy^TQ3*$2v*>Lcl2MCQ371L}X_heXqB>uw6SXuj!I7YEFzvFW{i-GOHxDdCj}2RbRS z94fIWpGkuv6>Dyk<&kMHQvQn2ucui` z`U%%jJ!`|gjHP^f>D}%i(;got7sHz}u6Q%@qhscUzxw9n_R&R$-#6=XWc;8u+qOV*P=r7Owo{cmB9D z<)#xG9|;FHok#FpxcOq%_V_OU24C?cA7{UZM03=-BEF~vPgSu_JRZ0x4n*oN(y0V6mVP_5mqVy+>G_v8<6w^5Y*QD$&hi+EuI1DN;` zZXeEbVBtP6Gh6u?V@xJ&8Oh|~0lWaV#Ml;b(2XZ4G;jjKZ9qoc^5PSDVw^d$!oP0B zje)my4!|}-@qHYuq1CcmyZdXQ`NGpH7Bkw!KB2gGXex1sHa%Zqw4ZBo6N(mQP@!_l&9ndRv~ z>c;Rus7LJysY|@>F-weVPq6)JFGFqM-@@8f$0t5I^U6{}A&*|R@$@3YbkUm&(m!we z^JvYRTw+V|vzUiyvA?UYOnU}BIFi{s|GhFf)g{DpAfd?9P!+fD3YnM@<7o2Hjiq_4 zrtUPiL@#Zp@yi`i3t=NcIkJuqsTMSh;c_EbYg`j3=Qd5*c|ge!vR$=nfj2tl)=RG2ZWm6@oZW=-wqvbz0Q552RQP2 zMC@S!BWT%|uu+07EV{6!L=ePSLoaXF1I?Q8ss@vjU_oD07&b2{T?%#xv->y+Ka^4U zI4U;LZ3zeFi9-85t1zs;O4t%^1Sy_K^|F(EBx8(Lj6gWLEt+4lhX(2NVLoqcDRx^M z6RoN`tJ4BPy|aog%?!g-f)`VfBS9&S{fK0A(^&`?^g^hM=A(0d0;b7U%=PGeCk*w9M#dTKY2Z~A*HO0*I1!3y@FCD4Bucc( zv0Xpa9NlY!lkZ6Q5Q@T5hNXQ&eUI;}5ZZT@B7Y+7NRDkOw32>k;fchGTz~Ou;E+!c z`Qmy@ZjijPKI5SzVRcIpoaq{~*?Wg(u%l2;x@TGjuDy3r;1XodfwM}6EuP}^E7d*% zevb3g=621mOr*rr-1 zogGycZSX?C@H@ppE4B2$-s!z`P&fO|;+3(Py}lkbj4Xz#;U$|nVVz15#i>J{R7 zSLtde*mQ^0vHDL~`rBcYFRTo0JUxlsI$h&^QE^R%lqTx#mmM4#a#^z#GpNg@*C|)y zv4$z34H1)V-|Pkx?aw!fKRToyOK*m4Y*D)W$Ub~ZOc_@_7X45LnBOh4DSj6Ty~sHj zR$oqKyB(*Y2X?))8n@l~-c_Tt|CnCj35OC9v!8;4W^TBdsr>PB{{$-(-uO5woOPP@ zBoaFP_{7`K+R8on(`HJeTHXpPJI;eb$UrrA4axI3=nZR|zqZ5tkJ-S)Dh@FYlL=QP zio2nZq-Gp%82UT&RH8EmT9_6z`xE?+ z5HYUi-fNOx4+C^%2UGLfMFuM0pdd(vJ#?|EJW7;X$anpTpjS=aIw=3|%53wh4*s?F zS+LWzGV8@HEC zE9z&x)6aGJt`(2lP_7;P@kco*$eHloO+%SLd*#aPv;TlQth_9k_LgfAb8vfb`7YL$ zwdI)FbEfdF4Wc2@R`PR=F64HD$qrKS?{~t#Y{WKaS~hEgX=09plQP#aoGWFF?%Fh& z4hEn|GILN9`5WHYI+<%@l1Ly&f z#aMQac&%#67XPv^e(0@SK)(ce?VcVN+?dR7?CY4-qQBJhjrmL^s*mU)J_y9RKmbk? zB7xz{w23$($OhTCJz9(mv8s!a5ySRNPIE|>K=)KGlUTsJSfU9)6;f0HP~&T{p4 z#B+g8ndO{V_G1O4OmRG(VPRpJK}*T#vgT`*hK8n}%TBCt0p*KD zdiqVcp>TjU6XwXPg*%_aRnp>+#j)Z(GJ4jqoT0|JuJy8?iou__DsN~M34JRpY%)XP)!!D zJ6WAfEn5A4qyr3d;SXT0!9VL4?mZuZdWb-ZOE7u7GSk1G9etPY`%h4}- z!CgjNkE{VoVC%z#eUf5?44vRaSBGjSo_t%B8df+7|JGO%&&e#`&fX=0nAAVOJs=1Z z^WrQqxEV@jN4RL%g9P_ag^zx((X;kfvIVB3DXw?0Wpc|r7IwIPZRk0imn~Z*v3Pt- z-j{z}-Q3Tq|H^J!DUH4<_ztJM^1ojqyM9F2@Z;*(ewI*<&VW5dF;|Nh9GvVVX!|6g z_loaY!k>lLd?-_|9D@=u>VAHIa1Noeqj)x*RTLzks(3Y~$eTDeKvndM9u&XbT6ENa z**E<_B;41Hnh=&xfM?F`5BgP>8EU*uVWJQ}FL8OcN(N2cnEo6ijF6OPHQa(~io57O zNlHO*iS_tj3k=W-bET~MH#vR2pObl09(v1sm1_AmuPn2cF}a^2RqH8VK058RP``7Q zJi{S4``TjH{^r zr#nLoG40TcN`5Z(rmt#ErJ6D-#Hv3$EwH&`nKQo;{s^g^oI$7c6^&=kOHhZd&(tw) z9=JyZ`XMhM$pVTt);V-$^;a|74rRJ+YRCbUTbWF1rLUx0(D(kd!|(93c&T&1$$My4 z`B2*%g2b9n$}4!+EPqC3PN@DG&sNxRLOK9NIV7R&Gf`q^9~gsW<-hHk-?%;oJ>8u*kW2MpV?z*vlVozO6-!kDV%g`L5-jx zI(~!2d169I@b>(7Mt_dg)%)phZ$0A~7pU!ntOt8*;6KGy^4qru-R|J!Ql|gKXJc8E z7+Y(F4fo}*p3zE9+GFK*)A@^+pIRjauU|o3^4ACa;)&FHW`KLKLz?Q2f z=yW?%#?y|;7n>Jd-Q}k}NGjBC2e6||hb6+yz}^F2aFc*%#GCOJ%P~NkHBD;oV$pH> zX9Nx2f{vvWxHV5_0aU!=T2t8h05BZGhG6cNgNZCrzb+91^Ky`S+$-3UU}N*301(`# z^8tWB-+^A>4Nf)3q{VlSK$A}wMMZ8F8bw)^%@F3ZXRVwpunL42Oew25oKaES8tbxT z1?V_Z4z;<~jZyQt*{C;_x0WyA$@rGOfmD$dgQ^x$ni~9VS9&69% zyWZz$G-5&esnfyqrg@{_&F&@(jrHviD#6pxsdopuqi^?mr)Ijv&$z;@Q)a&%U6v^t zs5RWSS&40V{xUt=#r_I)@o~Dr*MmbPA+vV+91@0DW3 z5D$@S6ie z3||%y_}tID7b*Nit=o;*?kRF+i`m?6wIBh#`#!<*?)~=(xdzFor=ble)d@|;`q=Vi zj48{YB4mEwkX%_-s>+i2?)tBC zyqF;-4%?r|h+a7el_yieOq$ld)|)>+9qUVCf4scveW%Xkx9x0y+~@0g?3kaBKYzJk zTK`J-8`_dJ-f;A;G2pz|{bw$#6`iZAtLHu=iq@Q$m0HML`sbu8iG7HFaKmXl{HM7Z zkIhGy;Ue5Oh;-qkN?2Sskz$cQ24D$1;xr?!16D8SyYCqX>i~$c)_M{3a?_mqj~EcR z+GoVwX%LQwxT18V6U~Z3n~(4COg0<4feTG)Cpj!T=75PSwIWd<9NfS9SdGj8ZL&RfzD?(K=`Q@?+L(@V6D_gP7?25GgfM_c#cTBxwiT zy04Y_|A`!U$9O)zMNQ3LowNpMY4(;qlnJfrIexp3!I`?BTUDK>&8uAJ!&XkBB%yIh4H_rL8})!zpf*M4zY<;nAb(dmT8 z%ec#Ng|Z7nYLYxk$*7ADAMag|Cik1p6z!gUk2t1}&$8~Vw#(o|)f6QMBzg4R8zXud zTTm~>>y#O+$?vJ0Gw**bqwbqth<~w0r9ccMQR?n|<#c@Zml&yJEbKPcD{fz#zM91E zuW9i%5za-8D+Fb3efi?ZurYfK>~zkI(CX7xb6mI)&X@jQ|=o`h1v z+JivOBvDP52Gz87;f#&VHKkg%AzCMFe#oWj&5z3wl&-%IBi9>)M<{Gx;(8J}`K(D&4NFs(Mw-y10n5 zyW$qtM-g@Hl!!8~S#Wxx^=fbR>^GqyLpM#kV z5&ol1@8Ig120bXD7uSTf8eUH?(l7k}CK&ox`Yg|NsWp7ZTBaQvS+rX=x%jWGCst~` zZ*^FBUb%DYkzszjHQ$xM+0Sy63s)4en+|rnv9UOwOqgTL{3R<^)wZea%l$8n_P@TZ zK2x>WVqDQ3GtIrzm0`9|O9u&b%iuRUlM}9H_W>?YNCRfN^#RRJL&?nGPM=3sIK%0MWLV4~sICNvc8n_D zF4ARN`iJ?2C=+jy=mCAIpngF$K(!T@^5k6%=v>OAo8N}q=AP-3uYCQ^Y@ZP+_!^u& zUvy4%87KtR?-Qy4z>c4;bWn+sd^TN5^T#@=lJF1s4Y;(7+Oh_}18{-EcxHomY|>-t zjd9q#^SqcDXLGx~q*2rBbBLJ3YNly>kg*H%BvnLoV}Ik_`QL!`Pgl#|n!BWnS-o`T zuTS-VGEZ5t5HQONykde{`1qXmPGD;Us_u@|S7y`?;1Z#0()QcKkL}ie;fdMyeX4aF zh1+Rbhko1rVrZvv*1O62E3a+{D-Ms@1`UInjTt4ND~|}4q(zO#0byz$fbq&p#=ro4 zQ|CR$oN}FaOU58sLi28u<5Pw4g$IN|Y>U7zja$$HNiTAWxoqHD%M$%juKbjziC=ho zvaj(-)(3dtq(5FNUzY1S=usRO-QRoRKE3xP+d$`AMZfiz%T)B5hC1(2SRal5CS6v;U zo;tnow>Sw7#r|=jYQvp7+eVc4C=^$^AKTdsf3Nl2DX_RH<0Tlv)xmYdW!J(30R^~3 zot+Mdh6$s_2Ylkk3YbC;vq;8;&yB-5`RK=!BLcvl&MhH^WS1W}A%N}0d;7rc41UVl z`h5pD#lwf3HilrIt6wa94gx#wA&Y!Ug>*;_(eI9e;2;(1Ki8Wy%rqy|Y-p>3W)SOD zDbut=+1yZ^)T$VXU&?d!BBY36&;z9))m*s>`TUh~wW%2Fn>MZX=-d?V049vIFlXe@rED^}-#l zctG&@2hFl!I55yfgwdGVz_%zK|f4INeNHs3gP<%w4I=vNwzrrEn zC5}Ne&!D`N+dt~M2_bkd8iZOEZ`;Q%jEl5B#ea-pzL+rX zI>tl%xk1Vrzup;zhObJug&P&!;}TYaL8&HOT`?r*g=+}a+lQ<)t#D->C5P^6OfJ8r zMo7p@uSO~4@HryS+DYe{maYrWcHC*rR4d@Uzytu z2E)Wmnj90MMB9&rc8|3;ga^Wcua#l>-heUR53Bm+ZAadIhOFiPh6~fKQ~_0Y?VU&2 z`n|UHWFezUd30jCaQ6$a1=fCpeM8L3^4*?(4xM$4sJi#xX!(CgS4FB!RP)2poAsP# z^38wQ&=w`SH|RC3t*zsk<)!pwwpP!}LpyD+YFWYJHatnB=l4SJNkOeJMB)ix6o-MC z6}W`g0*Y|#2fYHm?%r?2kp%;uosN03!xZR5DGrJ0=ewj+t1K=FUgJ#10}J@d>A;CF z9FO0fW0YCf_ZkSsgZqxVm25ue1DuwaYMyliRPap$j=zygVKavC@_a^V)G-Mux{gm-6Y-emD;tNTCe?;TAP9Q^z)0% z(8LdeM}*$I=Q0ka=}6N5Y+axfBwM4!UW5Zbn}-}U?(vwDK>vq4TA(ukZ-zGx)~5IaQy zIj&;e(M3ZQDGyFObLi%=*dFCv)2ozEk|~u3;ygRJ4YasPjuzq}TU;q{$&+^b`&;>^ z(SG_aW0!ys;Xk$Usd>M8R1(9?@*TZbH+48bIyljd8=244KL;T_g&e?i#1rHCb+oy) zn6+t@Fv|R*6~7>f05`ppNhc8gzND0m-<*y80lb8^G8U;~Zok$&ZrbJb=6ddrbqu3> zi*OU^8#t%aM`mV4d%HeL-d>t~Xy|-!4~^LC!=;w<6#c&89(WvqTbJLx!HeZ|bO60FwysgQts0e4n@`GcT3*-)F%@9a@}l8+QH)mL8_L1q|U2$hRjJs&{` z;Gpx^TTB!J!M_Px_jz0Z45(%edctdg`2dbmLi}le@=Wtkhb`J|rW~uTM4~{7v(|ND z1SIkOc_jyLgre3VFA^R8z)x!qh9=4vS%r6q&!xw_o(vc|dr|bXlgqlidNIm>233iY zG~tq}GvHGN3}ESSuaB~3+Q+-w+lg_giw*RS`(3F0PTh=HS-oPh?prM!tXWfF^1sZQ zMnvD}sZ8sjX8$y7ZlA}h_d8jiexfVf$)D}tu{vuD*i2Smv36_WeL34s1>Cxgy5F<^ zRF02Gbt_XroycwU%fn8Ft0WXm^s^jg@OxK8pO;0xwkiSR^v_$z==#!W=x&`|IeLIe zdsJ^Vxb$+jBzUF-!LsXzVY!0C`DY?S**<-tm_4Vb0?+eO;!g~>kc7p496hjiPTwgG zY!!iqkIiI;zzz1C^AmMo6Y$=Q6z~rd;YhJgxENRXV4KShEo;KwppvjU!O_Gp_lT!< z6W>5g=$ooi;*>!hXNkb5O4m-RlJm-Ib60fQ`PXZ@PSHBkl;b`fI~79ye&S0eqQtwf z-QsO(<#-FK0e7}5xez|7R>_7@2i-irEGK)V;uI9tg+#8M_Gwle3LC}|V3U_>+?bhk zv|8`QL`feWEP>V$XLjCKdTD<(_b$GU7_c)*fuj(bmz<$W5se%IP0AfAE}X$$XCGzR zJd-mHsDN(+^wd`Bi?;{vL9zQ`{3F^QnWJ3PSzT9~Gz^@X><`VHZgnS|?f7iRkbK01ZC`G;BL5@9`(e9MS@8qgbHORjBW&A}p7=ByEQ$lP} z7C&#qb!>UpBuH}n^yE=!q$D)muM+MX%|OpN3_B?EGDWY2xa|#pj6dI9br4BTMUPuw z>ff${tMW<;kp&1i`L?1Bc{ig)$eXp0q2MeN?(gBR8i-ye&4+3j2=4e{>_OZ?n2wpcHP7gQoTaPd%rdO#&GA@4eu{!_cYoab$G<0}9v!qnt$J&e+b(d>3r>C% zJ;naIu>!)^{d8`$jcfn8#XqkUdBXmY=eKwl(X~)?Cyxp-CUt)^`gqQs^J+}qs@csl z{(&UF9`vFVofXxP??0+X=0@3KJJ6-mZ|U*92p#fQ{ecd^f=8Xslo}hO?=E>hCR+C z00r|FaLF5JnEy=ZYp}Yp7(-9D{7lW`b+6Z`_0>vnMyWgqVb`0jhk--(GhWD0C*{s|i5xT31GtX`4*OFC-e%^!g6+!3Rz zej55b4nUIuTupq9_uZgzf@RKkW`?e&h7R=L8eb4gd3=!&KsXDOOF4`yS+TPF=OTdC zZ2CmS7nahoE7Q-}|0=6!9s)iRZexc%?K&lL;3;?9@h7IO7`)P;=<_aPrdMaz zOqOk(P=_S3&LlC~EYz3b0m?@-`!1IMFxri8W0$p`|4d9Nb78?$zuoNflD>4;IlJ!? z`n?~rqI>C3XNfYWKdj*^qjJjl$jU@Yp*g$D%tV>nafS&gXcyN86@Z`Nn9pVccTTHF zIPJ~?BW#xjfxB>~ALlb5?scYC6yE~w`?3)Gr_~fN(&Fnb4APOX1OJ3P! z94hpRtQ->%Xnb`ZY7t=_y1_>^omUlUaMu&W@d?(eEC8fnbz> z;hTS8ee3d5UlX@}WM*{Txz2lL^_sL3c5xKORZv4@7>*a7VE-V;fde-Q4?<}uSD)V- zrO+UKRdXxl=Vg;~^t+*uzw&^>?OgNn!-sAb-F_9haEL35z#NJ`fzKGp@&TfH8-+$! zp6iA1ppqH#Q^>zJw>Ww7Y-dFTPp=?HEU<|$9((NHW!#h~1LTfmE&>ox7YI~OlLbqG z!9s)1Gh=`>lP0Ugx*<9NpwM@gB4IyfvpGX2AD~5stx>D0#al7Mwt>tB@3ux--IX5P zEw-h{!Sks4)f^0-mok+oDw&kN=YEc7_BywaJcroLK?ivl{sHkNGLw(i;qev_sqI!&%Yz%;5{gCi@}@W zj@hCdJh*6R@1w|yI(H))32XZIUuaf%l$CfCx;svoKItugfNnNO@`;~HcM%D9mwaNy zgqO#|4=ngY%Sm0Yi|k|q4`~uc<`KhCu44nr>Rsj==b=2oC*aJ8jii%n9pj_)jg0|Z zKKPxZe$d%{LG@)C#f>gArvu`vM60cU*HT=s zc+!dbxRB?!^|bcw@9aqjfAbgf*HP~K*hj+rKLVt7OVC}w-PB5*dbQT=lnASj)wiuB zlH1?^gBup4JCxHRgfqOxUtm){7L*QWZT`Iv|7YOk|B1U4a=$k5xi=U5J@0g9iWk&V zT9QwRy4v@r0utNeB)zZs3qs18P#uy9;`ITwD9!V>hSa|T3Buk3OlYEtIiaotHY^m> z0U6@Yn;ikBIWSk{u*Deqy29kwfIr=ufKrl~o@5|w*NZfAEc6=}wR(fQ1;A#7V0(Zc zpQ6nm_U`9}e|MwbkSA~V+c98y*dFKs!gF8c5-7|uR=Kwm_vb8BWL?mLicw83VG%-> zC<|AnkPh9^MxS>y_r_E`_nh3SPO!JSXxn6#V27UUjr~l*4#GdeMB-LP+eD3zvnLg? zaw}Ypc*s}Msdakl@4(fR;Ph1;;}t+BM=RS1mreaZ0o`k}38OL&cW3rY0HVuQ51+4n zR{Ua60nZW84b!u!^u;v=W#Uol(PzY}m0xk13|hF?odO#Xst8fW=2rzXFSqoIBnw^> z6PM0UCa>l0FQlk=*vQ(LJcBU-i`So(^|RBMIju;&vZ=gkgdavs;(dFO^`7i2|0~_C zk4~o%F@?HgLa!73?-QaG6!B&`^4V!0a0apy$W}dV@584zk<~sdX38hB5s?!<{2+cc zSFeC;p`h(#Ez7+OT=D!e7*ny~^ZZ%7o4GuYe@RZ%immSeY`=nQ()0C{z1+G+x4Xv) z6MG6W!kKF%niNra2$X1Z$OZd%OH#~xMgv}7!Q)L_=V=X_hT-N_or1U9oS}?ecN5iW zPGgzp=#dxwFO-ke#r)=5WX{*B6G|n>-@hSXp)66cF+~5JDQ_-0P z9}7%}_FXvlC56#_4T{N{(9K$3YI;MWPbJOL!nX^`*f{wc|DWBzoIaP7bMnCZF(F%4 z`M2ndWpCw+jYr=h?Psy>mWSjb3j@fk|3K>Na4Kxk9V(H0;k4B9W$T{qi(Uk>*a0ar zeLHq_ezB3yO0IC?VR6|Jyi&vWaiDYMcWogByAk6uZ%vlbklhuV^#=!hDN)9F<)419 zu7VO>`@5c8{y(bDGA!zUTl)-+pp5fA%@=c=5ZKSKn)`d#(HatWStjtHmg3ty_%-o=$bM->;HR+wJPn zENJl$3VP;jDGk?7b}j>Q`y=vp9}@kU?`qHC(A$O85>|29(TVL6Y{y{_u2x_|aHtg| z-oKndh>LE?pMy&L`w8;ZT;XaInvfXU5BiGzdrMZstWZ_Q)bP|59_fb(s(p@Q{!FN% znPoB_$MECFeLD~tBEC{?GRbK1N>hzgTYtU*V`pV*?$OS1(#$@vc!}Hho5!Jti z{C*lESbZrZ^9ap30Rx%3rVWn$X-8@^eyRblq-wUQmt-_l<$wR=Pp>a14NrDULOfxL zr-t#?Y0LyIXFw%C5&MKjtJDzQ{fH`8SFqhvR@x@ZTH~~h`>0;HB8^PrwNNk5u+kym z3ZN5Zx!mb)x5y%V6)T~tAZh;gjs9Ul64^uI#|ED;CUy4VIHQA)H-{Vq1_8T$S64Bc z)=bJywHk4PAn-HyUtBk?QaeSX_nA)m9C$v33H6@T4BXR`i^R}1DVvb&DdU?@cM?`3 z+pJl6Lo+tN`)97g98Ku(7~Ey+Dn61;y@-uC9F;_MJa|7qy(+d24c#C{*BkB={6%ag zwFu7t&vyC0Rl!&1U(oK0ii~9{Gy%gNC2cWc67Xy5a=Yt)Q@Q?KMR=pJdHk;;ucSO7 zJ}IzE4>Ne^W4heZQcjbT!P>P`&Pt7ZoXubFJEivb=*;JyME=mOu{R5^cd)3u4iY6R94$#pgbVIjJpTo zDX6Nd?k4$Ra|dp~B8z@D4>CR!Q_F4*sK_W#~&;(WiWc0 z6G=54ve^kSM4x>U6Ek;xVjf=)x_tr{YOKchzN0C^-FWmOQ-Y;3WaW!*zwUW?iss-3 zThNP3mT1vOvKRG0u@~RENC(S`rGvH-*a0^)Ox|jc5>o@=mKDapmIlCIj+g`%Q#voA zKkwOyK9TjK$r?fy^0){9YDP^uh_N90>>`|r^^uSgrx>kO-O2~o{K6brF-oi;+h1}( z$Xt4=$5liz#3%Ia45IOw<(MG}G3eTE(e=z7X!HA0yBvN6*1R8m^>9KT^=^0<*u$Or2A!WUo&%kK#e)V>Ectk&U;953zBbN5t#D z`k)cnqp_(-vKOKDQT8mVe&;W?_BZwfDsN0P`A$-)Hl-HlZT_b>>Vh~E_?`c7>XnEL7+AA{YxHL&4C)anJmKDHF}mTATG%y4Ok$$xGoz z+75Rob5SYK^SCP?nFvm#h3E4KasPgjky2jB@SIWzXvjbibJxEtH*CgPGxc1)YVS$| z1l~HKm^bKJ=wv6`larX=162tBbe6`;l-6ECMh*>G z8VlXfdv>}_rS;|L%P|bX%<1YON=tGb#T}93=A?P?=4!FusxNxz;M7r-wbh zI@OP8gAzn6S{=q_-+cAb?}bp)y=sAZhqx56jN*gBC>i6s0*?F~mTOn4chO&{b;sdz ze{0_*49-_Wu5~sg)Bt@8plJP0gX3i|Ku(V#%UsS;IrMGvOxbQJid%f)rzZRwFxvnSq%D z(t_q-EtdGQLAHbe(&(y4gQS|V-7HfHb2HurDRZ@>etFrrdSJ!7tU36Az^5NS{_z6z zrNj3Ewz^xkQ-)gmL=Gg++=#g%pZJCbSZm7Ch}uMHzK^vkAjc6{8){xuT}FnE4Lp4D zwGLZeaGcSQa{E1>kSCg0+o0PkcX297^gE#jnYf%(U42Y&oeKjW7|Vq{iRrwWyfmAM z=u^1d>@7}5szy*Z$f@o+{|?o2m8Rjh*afd*8Qd#a|tET-4|v?PoN&53?tzxh! ze`P%$eFG@a8cd-tlP>dH)c##IG)JwNmhGj>;uRNR567NuMVFy8_c%>_nWL@E@}?#P zx#wz5clvaZlo<$G;S&8IE<+Dt%>blZAihVt^{K|UV#E;H`v}{bIgr0ptB|(7v(_Zp zz$4@@A{@_}NSoDc<%j{~UhWNaHCQ3Bwkfcc0ZI1FkAFS(?bgR}!7g0Xi+zQZgwQQ;}c+$kzo>$^mkbC&s`4iKb(1IBZelU9*Qrl zgOL7}oO+f4otOUKQO!`#LzFM%2=zwE6*C95;Via{>HWA$!vtBzEcij&pHe9pv+x`Y zO{hMn%IA0N^43Drc_+m?7aEZ&DPf8P zMT|dQPcr0KD(@jDw&~LMV|eT;EBH9@JGHQdA`v|wGcGYLwUR9=eHz!N~Ns1YRYfzsz|nNmlgNZxmaJx4H^U0rAj7!q=0cN3y64qJ-hH!bp;3t6ukq z(C{HmhnP#qaojp2U_-9je6ECgyi#-?PpT66%lSfhiZpR60Hw~GI?V(<@pWFxZ=|;p z;4aiw$@wPzD(D?%0d{}@nHSIup1J1`^|NKGnlC`W`0hh_+u`ZZ`T#pLQi+l$@?auy zpFe_Xe|XJD8HZgyHd1nAS@TQv-Q0zh>M09+!w*yM*(Py+!+RA8vG4Y;pK~5C`@cy% zp`Q#5m>X3F?W5WJ0WayIjSmn5Sgx)8!gi+6;@tvB$_BaocL5zp`QR5m(9?|Z3tN1e9<5Z6uecq!j+P*o3naXbA0r)PIMWOnhkS3-8nxwNOv2ckPN zvN58ni>sO^Oz`D-0mXaTxf3c9aQw}c6oa}_UY?I5J$i-seZMS9PS<^6Z#-MXrAgeU z zQoNz}-q^U>V!DRfd3SDNvQqH9#%*P1J%3b#h0!Ait^7w4Rl0wH~Bk&jgwqshm7 z{fQ($xs)0533Qy!b;guR9qR$@76+WS${$EVlwzW;J#W#!bD#?}?;)62oD{Z+V+Z9fG?zZ(KqICq2--J`hUb4OY_XB+BA-hTr|HPDicXD+d*$SnYdKK~lCd~UV3^A)t*5>`^ zyMH}Y6$1KBva6X%CKx-@{cpk-S1uTr51M30hlgm13v1dx74AXZulAjke4^?10PH8U zlUSoE!TGclN#-o9-!N{>`b}}{A!(!kb<)XC#KYP4bSa0Ay}#Hf=yK7T-cy5qcRE$t zyGu0jopW6%Fl#z{i|E__`>egUFDL*!{!g9?zQ=gJLNXp}glUSMJBW$PpO_V?DXrmx z$CoHL{=R_6w5-Dd1B)K;qszOCE0DB_mm1?Occ9tIPbkw|qQ!bY7J!7AcW#G?0AB>n z?~F}$<1*IbxG;7$v6Af&=|qfJx;PRTP3HJq zCNa!)!c6OCpkUb<`uIJX?jbOl=1T)5l2bP`3?hnLW?>uJDG?aJwXEpY;IjWg?&%;_!KbRwl?V0Xh zug5i2m4O$Bwd{?$C9VamnkCOnB%fg|b-6!(lXitvo$0V4irrIw-u9EoG9#{@H}LKm z=e*>mrTvp5_|v`%`#5_gpFVBEtsa=W?OH;bR5TXqqGxaUnjQFQSG4*(O+l;O-&>6y zBCD1yAsQMjem-{>*N=~E)HMG^*h65-3`%U5&Jt8maWjgS4y?~h!SvuD#HWuB5n7CL zQHE+TaN5;$igpjj-Svinr{~dP7Qy*sSteo&x`7MXva3G=vX&gzM0W)yQ)s@H2qYz>!7jmc!LeZ zl0goL`q)DpzaNs>lPxDAGZX)(yHC2`thJ_EC+b6kW?vna8T6MhJGoMs{m+Y10tb?BPBe`A5M6#GayRW584B`WchXUs&GUp+Z?b4D zz{%W~#KEhJ2EglSJ=n?*hH6j5J-o{Ab2QW=fL^ppcxJer+Xj~?s-Zdwe9wT;)cuC% z9vux{LQDG&$&(GaTCW=!GcGNY>l%2I`c4ze3fE%^CEw1l@0 zSI1?cb6?{b@I!4e~Ny~_&U3&xHRUj)dMy~jGzGj%p{NDzGL;7mbXRqqdQ2^lPJeq z|NZ%DNez+DE=HHDWAaBZnAhpW1I2pb$EVye=M(dj)Fa*Oiiq!chAwh>|H=bHu6;(i_m&_=)J&%vXVe`{*l5$lUm=z_?Zb z5C8iF>pwg@_R<$8rnvW>ahG_x>}L7xgm^)YUXXFCfC@=Nl9p(w6c=fqc;B6Zg?L9T z3azxgP}nkm?h@DcUmejANP;qc$bRy_m|f&d8*c44acjn7A=`;jt1IONXXOp9PTXp& zvv(_zz>!)cM(u|EQvvL?X55oU<@G?<%_ZmgKgHEHa#nvXnaIeaQs57F&~G(g|6RKN z{X$n7Uj}`QpuU*;E3nRb`EXF%eRVhG5PhlEeBUTiR|9ensjj?b27JO{eiAM>jA4V-O7eQs(=zv+ z78}7UtWQ|_KlzDsvAc-*zT@l22?QRf^yF3;krG)y_QHP8xfQ3M$3e!o^ffRTiH(of zgoPlVD;7Xt@qP1tR3^WO`G>)%o$q_u9IdUeLqy5G4g*#R#YHm(qYAN z!vq`mCU}G^#U?9nq#8GKpC;`PM&n=;%Ek)>{xQOUN^8Mck15~od)nqQKmllr`KGNH zW`PTKE)WoG7++!o6}ORb>RH5A#b)i7&%rf)6hAFxfEkUWS|=BWQ9J{`F;w!O0^ZSG z^3F;Kzh437T>nz{9*zaaFR8x3)GX%z&id5Xia#CrxjW%`2bI^W*t)0RCK)A#H8xlZ zTiRM!%@7k@gkY|I$Clcf^qoO*!%DXeU?av z;TJj^@MDd>3n4~G)P!_w$C-)*yOV5g=N(~e&T}9IbWz%0?I4T~6ET`SyiCRnlI@1z z1MMiYsh=?ug+F+NB+jiR?zTgKPRM+(ZgmJXLtdYf++{2^unB@>Ge?sC1MVa6~XU;HSpPnV@-~QQdtYb@}Bfx(1 ztV#)eqUY`fNseT`&XI6S)RhIpUD=qMj)tp)wsd#8??t{u;#?vv6qmRB1O|yPivkWWm^Fba}8kAi&aBmHv}d$?p*SRkN|`rE4)eULmq) zksN(?g7QQciK^4w4MGdIJpprI=&pE=fzfh@#p3a=%}-d-{EI{Vv+53ubk zHHe?m8YY(yD9Sl;mhjfz-GVt)I6hga+?#F{PnI%lPzX87G|28wG9soVu3p>iuJy~D ztzI5e!HezQ3l&r=51R^{eje$~ywXH7;>?eJa@gvM1_hN5AmWL7gf`Dk$aRdx>#Th3 z7QOjeTHh;KX(F~kej*L!-eR=#eSI}}N8yh%#MZwN^2CWY`b*-vMZX~sSP(9ca4v*W zoj-2RYlav~ob~&ktY~&JX!EvneE1M1Z`&6RueJb}K~58_>BJ;p*&Z4aS7%3x(+P*v zlK1(k_w%;7C7uY8s?$~d_F_(2T3YaK{>j9Ym>4<$NAVc1)$H%Zp*7Pu`%QgA!6P%) z4?exi-MRxQ(y4EBi%M#aw(&b;L4Fe(wG4yU3=f6^1l%*Sr#K&}RuyHxHIxB5)oUmq zQC!Aud&ODTsOP9|8pv^NM*^~cHnof#njX6gMeKrFxF=#9ys|5;h>wM1k{u=)ZXPkM zDvjJuG_GeeK?F+T_TxNndsED;X20v7jPvF}P;hj{%m!&=9eJ>q9U-Vn)sv{S*&80q z;=D`e_{3|tM#-`dOuS6z+u#X1^4nm_ZGPy|zdtrUIoG^1pci#xzDHKx3xz^mG<0S! zBdo0>jnUTA$ZO9&SPWgB!}xaU{pKZX32|rkXLhFN)s_)i*ri)<5Cw5BTzf1MuwK}= z0&33%zqPd!HG-PkK;x;}pYQ4Xs9>XYcLrJk7B zx&HFl?Wc~!yCNq321ircswW*xMm-%@-X>dZr`>qZW-De3(?mw=o-+FwjhG* zXsPYsEw0jd_G_0JdXJpn>8hv;=8uVxLt~2=HmV7{0i$Jg*l6SHKFrn?*5CZ}pLa#6 zlMy?Za|sF6zj*d9*OXQBU*EX?ji`4%lDzC0C{&B6rxOQT-NlPIE3elosoxjg%IRyg z{PU(%2-;+yA9Ip+KcN?;mNN42m8iKsN&O?lQjzK8jV!J7lB|r6_}?oy{sKzRc7?5JZ+jw7OHYr=(eKP~T}kR{3%d%?5L7yaXTgtK z#eh$kiL*;J-AzQ5V0N}l^c|}wu&t&D5Cp<%qFAYj{>)s#rJd8kij83|?SuV9`csLI zE{iOSErw+bK~p%!3s(wfb=>2nxQghC?2c-%p_Sha)~uqfDwJCM(>pcDQay0x%d3+= zZLR1LRwqSwUv%6MPimG41h-# zRePQNU(kp?vLde3pyMg+WP%Ty*wWah8N>!QcwYD$uRRfkU^Y&S?b5IO2;n}Wt3;UL7%fbrnluTX{*f0 zjjoZSDa7*!F%rOK}ztR&siDQr=I*?s}IG~XyR zVCgw!a} z**I&xC}&Mej}y($i3h|XT64fpKfn+c}*5XPF+AX<2{}ub5fDgauw4q`}ERS>*|%o=WG=W|cG@UaGmE|oj9 zzgGx6R*pz1J!jSunOq=Ewv!w?OM)-nlHALeY|jff_7Tesn&nm3!lU;03Me|0V~zx< z5R>f#eb)XJr*+OXhHMqgK_|dDh%)bunG_v+F3Gz`8MFTTA0wp5T*+vP%zRHJ;X7lM zZkCzhukbi2V(04YEHCnG)X8`(b?{#+6z~c)?pm2Q*Vsbb92QA@<1QlQ>ba{uQQuP^ z@_2i1V(8YR8yD$SHIdd;o{b(u;77N3_KBL%M=XWRhvO`J`NkAkq7#1L8?$nqpGj(eArwh5cnO3C<38gd z8-+Tz+BXUbBsbd_5A!0gMsdc z`iXiRm<48DdL|H&pa)WRUC*h;4UYV0zQ3!>$aXg?rAWjt8KPlw#?@P0tNQ0?7Cq|X zsINKYBSqZkO^V2$QqO3qv&3y(U5dE-+l+|`AlGQy?q#0MRtxp`JmoaE+?u|+KmFbZ zMq6}SEoeK_Q_90-Q>6jOBoL)DsJrrY?Mnc2D%+3wMm zXtegzKQWh#l(eWk$sl1TEh=l4Jj@;1w68qwzRvK|_Kk8plAX+zL(}EB{GHWpqpT~& zE>Anli2a?^xZeGgw9KlP3Xp!`+Y-sDb6)5o^f2cu|Ln)uHo3npjp4IrOV1s}(#R@q zHatV?gK5TbYzIGXFj1Wa(;SP}8^j~pm{xF?skR!eAQOL$6_2(b*$v>-uB@{{vX`{T zJDY4hGR6LW)9XQp#aLi7^?p>#Q0|}4<@^Utv}F1 zI#(d=IqlxUw_LO~yAZJd?N2%}kA3|=fBy6sRq&DR6CL9!&t6Q`R9OTK%`+q}VBz|_S1f#_+%Lu> zy_XpySq(;A=$~!xU-f!+P+yL;a4w&-GsS50`nb|dHnGo0T$tV-4?$xCx?z3i4+oC9 z*W07if-dsW^uTAj!c|11(=rW|)?|;|pkD2v0SitI{%OiuaQY%W4R) zD+1Z^H!Y@j*yWMy+(EyQe%GVbNbBr5U=Z_pY3XuoRm$}8||Ee#4(e;HBxW8Lk z8H*Fg6%9$Rf63t87b@={-C)L4lQ%cKukgk1{*gFiP>6p&4+jTT?XPPke8pSLWPcJL zxUnCN5bbH-PDt&?Zp!z%(e}fm8--2qfmhx;`V9kqVaPFDi`p|fxYNGEeM&%rR2^$X zVB7%0RQMT1_1_a9{Z~Dmj+sX1ogw`z)?2cRDp6~?<^pBPV^nuC>&AJE+Ha72&IBbS z;XnoCm&uKa?fnsg%Cp*w)W3$eiM%#7vOCz1E;zGPkn5BIZm}aGHt)-w&zbJj_m}1< z9!|e@#*!CiF-`FXX#bg+?zv&7>>cc5ijA#i@7)m?jhvB@%j*zjl58q$We-K#MxQAP z_sL}$ypZIuVHh~uVijSGk9)ql6db7ij~5_cxqodhO-ZXDLvy1xC|po-u2EaY?=W<~ zJ05X6avgBNcYVxzg+A6J)HKv?=NzCSc6e`k;k?)l+YEWbx8``}oY?t3iQIt1`6>nW z`{Qc&narI=e;c*OuG%Ya#bik1`=J}BI7GpTl0DE zPMg?PQ^U@+jfdgo?cUzm55M%@8V}dS4Bgg)Z!7&rz=7#tVY}fW=Nn$FhYbauhLR_~RkQxMbox3EEKg$WKXQu_(xGJ-p6C zw~m;O)OUs};uCu|qgDG3Ygfv|IX;RcG>DOMG{)8IH&6Vs9o!C9o1a1Zs9&gk1o+fR`A>wK0&P4dC<~yZirBkW~wq4q?eX@ORr3*4;6YD zl2-f$q7BNGfeF54fIljdN4vlqiNN8HH1IG03V+tOR2`vhEp}?nLSBZ+$>}EKvf>;~ZQO%t=7UX3huNaZPvT zzT3}3S0mmtH(QvG12>aC2n6AL#>eN_qaMf?xxnlScf!`eeU8>0cYK~C?bMVJ;9$uPH=X@*1 zKNmJ`$V`zVy6=V`b z&&*+#JQkCjeZ};a_%+p2W~>@kRofh18R{M$!1MK|5{WTU_!iMH#lG#Ntug_p)Q)G} zqAxYHS6+RNCm-b~{CfH~iv1yaL`=2lMd>Si|>h~r1sl?24 zS34o~a#eLoewMK}N;c25eJe522MivhJ!G)_vC6}cqX1kV^V?TVlj3pOIip_YT!g>% zeeY;1Pm%=YCTKcmb!r}V%BxW(?xH@j-;qH)UfI$8j4N?!sT{cY8}3`4K^^@fx=srj z3zQN-p%sZ^dJ#?7(5z#OyDC%cfw#IX}P;k=%_w_Q8%QoAFZ%Z zVo9q0brpN_DXO?^XwycC#9!1PrQ5E&Og)p0YFa-U5^g4syz-` z^zVkrs~@Z@Md$@5>OX^#hr-MbcW|cODrfTb$vp&>t`-H}gk}d%?n#CX5KGv~5Fv)lepRGhIp42TZxCe4dk9doBd|qU_^j;`?1V zIw<=%EKNAEY7QpYT$j@P$Y8DynjTZwJC@;ePbYo(W=r@EG&F@@%6 zv*f7c(Ji&-s-Tu3LfpoqJc412D||Ymp?CG?1>AMTlt_FB>9rqwq%gAl*m+)*s<%jc zECOWTomcS#I7*+6Dok+;4M;7_=MHGTQ)8Tgj$REjs{5+eq=9ufmAsy1B`Sz5pIKkb zT^R&~IgWdSYWcC@31%g$M?WO$*ZP()^l&4JB z?lY-SFBMxxDW(*MF3RT zn>g4q8*snr0sK*;>YtgGNdp<&-CA?8^&&{;&+6hQD|zVrBk-+)q7hlQNyCu-ld^xl z{CBxHo>Vs7`ep3ADrHO|Nst&LQV90(JDiN`cpY*r=%YZ?x;ekMZp`#xiRl11>vc!m z5Cw|g1G+ZJ@G&|7Ujd(a0&DW{Fml3yUAkQeGNsS-5hA}%;wUgkQX{axbOAE4)(T3r zd}DiNEQot^zY>B`zet`+EWT1Rc<%9oBu ztZDFrKVi#mka<~wK~%f7m0eHIg@6O2D9T1IP%62bK80XBqn!mSCYYkjJtxQ4f{Sh) zhk-^~{`E=OQD6^-S|HFR=7^yazX{hAD{}y_y$`*Q%F_-klzu18?nFwm?=4+dX2*QG zs7}MDNzwJe#mU0`Zz|SZhOik$6)E%QTZ}g{QMr@fP?4-WnvO%if?8)Ss%Qga^v$DV z_4x2y(6M>jr4e1rw!j#7+24Q-LPlz0V?a^M$F3rbiyfHkbcctz(>J07J8ZCysVmE0 zZ``g&snehwqw(LV7y~ylU9y9KvP9A@5eC9J$`j;ah)ot2kw!;iTM{zTOxsO5dtxel|iQQ^`O1R1V=27-|Z!TFHMyzxnA0SEvBg8HVBxp&pY=kV= zbYbH>RilB6yI{ZqD4*AU?{YXu3M;exB}Bzc-*o6I-6m~N5>7?RPTZdZI)?HjNXkGz((UTR`up`0r^CV4txJo%K+mm<1+-e4USejx24W90>rH?4Dx<4E14#cxU zy%dzG{!@)NXqcLgBQ&7mO5?kA~4yFJ>%B8a@YEc_=qwD)P zSQU98tol3-_Nyech;E#eyzfbzeOL)OP!TP&dHL5WkA2~b=#|YYrVz<{c!S-nZ|u>F zv;m*^;z(OR+B2&&N)SCXwS2U5g~?i}^;Q>@J649XMh?N8Q{<1rrrGQ3V$lv`Q{{Y)!ZY{Bk7|y zyOZp1^4#H#iJaS6?1Y9^{i4{^s}4>``e70qVpIXj#zh%nenD;A_#O$Z{LmIc_PJiK zhpijR%I#xS(9;MuxQyC0y*2Ep@IIL^o-uyd{Az9efG!vBFwJx?d{^zJ1qDx%7nq_6 z3$v*g8ax!CXg2nbgTKBlU6rUl>tCC_CrN)j740kMy}AifdcMgd1uVT7ME!R%)dfk# z?m6PQK3vG%>$>=}l^;aDw%~Zx_kg%Z?aeH8qqM6^o~TAa={McGf# zFJ}aZrbup*fOg%7hClY@_X;h-iZ|>G@V{N_@FoS1Mp?vxo@yda&P7oBwd?Ret%*fv zGvpuPUfzylD(Bzh_drXS?b@)OF>S`hrOJvBAN4kYInFIWDu$Q_w}@NBX~Vh)$wHHZ z%Y|hhL%cdz+s_p1>*ZotK&$)r@y7 zoJ?HH4B~kG?@ff&oVritx^|N-R+QDvv!B(9F+3O${Knt!_n{zpRg%ntVZ&rl&Rmrs z{8pU7(Tll}?d6I905f@QJJdUsH(qR{{RR1>gyDX~o} zI>n!*jd9Hf9(!v-YkY9D0^71FijFhyapZS7066I72~NE4vKZFGz?@$~XngLXr-$$8 z`v&4|!fVu`){h%Zhe9w#+$Qs&ft-spra^xTzFmynU=PO)er6#Glk^5jkAax~xe^-b zlFS@sN<@QI2N=M&LWV6HaoObFTO5N$7U9?1D>#!EYW9nD~$d$fr!GVZQBx7`eUB-FI1y`6ThA%nlso|U|A zM}o;ovv$Z%ETa`f3&26OiQJq|4#=&a+B!sD`fYZ z5MyB3^U%8+elF;Lot;KxO|H}aK-cqq>iO3%mnH7qfh|YJDMlEMz6%R$Kf<=y&EV;B z>I0kWAFDFh@Qz=>`}~Lq$oCyONa^qmg3JH<-V-fA4_@cJ1BPk7a1K`=hUvXZ;Wnn9 ztw;wlP5eC*rK>Hk;Ma2glJ^|<9i@b^Vl4#*&~sNAB5ZNYk2@`@aQxW zA3?q_?3)NRSMNa(qZLhCI}Fw0wBWh!%bq;f!|3Fp?=bi)E2V|13FlLT%Gd(e0LDGW zEQY&yVq% z_N}SvZ6%kltob$141SB9{=Iexx4y+|3KHZESMOM7FMQN!>v^TIWK*_&FOwpI~ zGmqobAJ(n#7c4CaODnBRZDV>AxiTigG^du{t5%pZ8xdAA8) zY8S2-EOIto^d6DAQ=I3fd-dP#dFNvSu7eMhvvj$$4>c)WeQUVODHO!G9d;XuKuQ3b*A^B|5Tx58GkNa^l zvAzt%bMd4>V-v8xv%CfLT9{$Mo-l|}mojG4ARY#-z{_pk=q=p?A8zC0Xt3_|M6M(kT6bapW~H$MO&G%kr0nR&mk@bj-M}t}lvrt#PB6cr2VmY^ zc_&9U*l%_djktMq3IBHLwib@9qp`t@7 z(>$2QECLuCS+=!b9<7i`mz1Q50;T}D_)2&Tjw>R;_cV*RE|||Oib`OATi4*B&&HWw zVoKvQUYr`NN*WIDv2d_#0)8G#6%FVb^d17jfX# zA*;cAT=S6;ZU%n?7M|k3Q2qR|;*e8;P3EiTBD(a@X1(d!Z2IVG5HciNOe_&ZlT@F- z*YCTTjmar#@IKr`8nxC$Q^Rj^lOwhFrS0WQX6gE+bjBfy-7?K|xeexU4;@!lPoDzl zQ=@nsN#;F%hT40amld*IK40pMuLW$se3MUU5DneXH<1cr&JoN?wC6o`edNEKJd2>k ztSKi*^IE1OW7lwC1R#nB{%+@LB;@ZeEv1ZlHTq2?Z6l)j~G)KsVhCOScd-J z&JuEEx`1hMoKKhUc@;%#jxOjO4**T0c!Y;3oit~!z6pYLN-miX+SRdk zk?ZoLgZp^q5|^w@kGu$|3HWWoh6`hCl_dXp`{U#ga$1zvd`%*$;zd^0><^5021yCs?TLR{^4gStLd1I0C z_#PihCXI@3>?D-$?B2=qg83N(*t>}*Y-9^+h!G)iq4H}c1cF&vSwP40?CsspD#kFqbyoW@j+5{>3s55N z)sL@CUsnn=Jzg3@m(s|JpIEh~FO4uAbFD=|uiebq-+Z-8n5p}CW!^DMY4F@dO}Alb zZe{JB?eA|*&%MiB7FqpQs8|hH$Y}&1AYJmKj5nWW zG7kfKst}+b&&b3>L|9L*qcduX(dgA9b^ZYdFzJGY|PybJXLaZ{3E&B*`y={iRf^xC> zD>~ttGpjaCG?Ju!143D!>SXTK^&|1+lxe{E=U8SD4GC=y6iLLs_S{@0AI$&GknG`m zYAR<&$Evm|8yGubQq4|wYX#LMmnl3IvkIl-wPCD?HHiY_G6^WyLWKd$g$YmxO-yWn z4M6(o57~&4%6OB~n;a+bmV)a5HMq8yVi-EEHPH%^#roBq_c)`ss1jptdBYBWu7 zSe=ny)x2g6CJ&>NmjKt9mBLWq2qe$py3_a!oSa79(8IQv)&_`c9P!m27$}Hk*D_td z9B~4?iD;a|b$CHr^rbKoZa}5+zNt3oq*m;&Y4P}BXP%0CPfc;-?-W;NFN{1@s^)To z?@BKypkGoh%PIm3g}sIFL6_hDnSDhhW`G0eFOsR=PHh(#ANlVCutNh@ORf~E=2)2( zKR)9Lr5^Sm?9GlD>X9JZ1iNEmWR2Q7We~lkCvidt&^>!6s{moqtU0aEWY% z8&o}kZ5p|0dX8HGgPnD1Hs&a&#v#-YME{ZPkm>$F;(|5povLG@`y>9s#%aMJ4sJk!?^%KDD-eK=nzEd}*#O1K;VO>D7g4Jsz28QQo zdvZJc#3Q9G?2K|IWH_EqzFE-hwvA~a+{)eZCJOPR-&)hh)q{Zv7&c~H)O&-FV%crC z>h|{j_du$UBcjbZz*6=M8g*VO!;+|f)#j!ru?Uu8fKRILOqXD5;h9SxJdL%{a zRi8kQDP0!3)#Q%##XBC55->)P#sG=Ds+X^vc@k-%+W-r^GelocN7-V!;6$2s>(Q*d zdCF;#E&YR5sfho^k8e`1Vb34p49_nk%Fs>_ec8s%`}Px!aK<~_M+p+$*L}n37mh=) zM#^IMjiKx?IL=Jq7IJ2`i|1tpS&s}aVM9Rr-6T>vGfu(PRQwA>AE(Z4X`PqXzYUvz zBiC9HuE@1yDOHy`aVmBH*xa;5=)#dVm*{Wy>D>8#NYg{0wkrJ5sxetO!aZz%LET1C zo{z!^kwT$F7K7k(wF;}e`r4r8I}`^f{!GK_K2dk9arS6ww9?F!o8CEgn}(ly2!@4B zz9Xi6`W7zoF#Popp&Mt4@B2JR{y0QFVyqjvc3FKkr9W6}XNrdqycbRmd{1=tNFV1c z-kRritZP~@gNs-Yu@PHP6j0F^#Ywu;>HFSJk#afuKY}3-VM@n6MLpXqv zB3F$LvVMDNbc#BPG>F`;4&G0bQj}mz-OE}wObSWj05iKSK%Q>A$Gf`sEbMc#urC$v zWm*J=Dk_o}8x3zlIkeqY+Ifs*&z)EAr>s*5=7i%52N$eb zp*_2042R9;|Iz|HQ0NPXgb*`L1DEkG)ie68bUbQQI3&d#SBG#Qg%3kuP?b)fhJ3Nc zQ>L)K0KAO}X^x!pcRe2CgDgd=z2)h?V2?wzQc&-`%;NQWh)}TGS`Q>X-gXB=EIqK) zMVzUmq5@5d(Pt>0N*a-&h!&~x@B{7Fscvn}4GkR+UX1}QS$L77*2~_w|NY5Jx^r(1K!7OYfrjwTtxU;E^qtW5&Pw}#Z7XzN0n|+O zDJ51fClSk8ec52mP2==T(mh^P#~0Bm%@e!rS}HkIl2ZT5l0y-9|s3K;X^A+ zncR?HK2^|1x!3y5!-*v1qTx=Gzn)@zy|D#t&H7E_$)h03d355F)u>VMod(VBkU^v zpb~AHv<6qy=qYeCgsTSdj5ix3r}}`{&a7>e`~|^=;X~NEG}(Z^qZWn}gn_{u(E>n) z&SJr&wet)Gb2j!fGHCMIme>WxG-n%zSDH@^0X?S?28fvv18i8U#t2XiWuMO$o2GXb zY)Ea23lU9q2@^G%OZ%vmv$z!!K@B)8ln+{0$$T2bAlyM9%{6Pt-Nodb47D%7ln0nx z;1$pguzO>BV!f;7X4XF^#u$^fmX%GP0@O4OBoMkM8vrnK`7zVk01CAD4~TiJ8@&{r)FKQweY{Pm^k4)f1p_dz7m(&|;`c@J2h#mliC*nFpGHSmAq9LA#)|HmvQg zd6zX6MH!ehKj|gKtvj6(<&EWBeSL60jzDBEv{X&;khJH2GLA@ey zzlq<@O!*Q^2^|^-^y+M=WvlE;+`PMkr5+mqlsHO>-~}yh0V51v)3Vre*kjEaVWe)8 z-yUPgMJW8FhaK_qe?>9#f8u)~dE<}NKmReOGaMR#dWPGQ5}Q8NE7@9ONzRBK`^Ign z4wsgvB+!dFmQ+Q}56*DdZx->cn_pU~h|CG|ea@+G(Ir;|C(2U|=g<0uK<=rU&VV1c zuAE2`5;g+0P;GrE1EwPjmi`!$WQ@DyDBrX2?fSj@emvW`UO|!8jf%gg@BY;!{UfM7 zBm4yQTd<_{*_Wpy6#&V$G99M73W=}iuRy9lj|JGIb0o8Qt0`>aHKZX3j)CeDC|ZBo zzPQ>^xIu%i^bWYp$@fTZ>qF)azdIm6<>U+C%5Kk_sI9qePPUjPbEN=g!BTwxUc5|}W<6zmndo54J-2ZZ5^G$J)odRSr z_;J>SLloy|19{R)cAD~HBfhh|Q(LvI!#z$%%t^&}fnJ6#rlt3HNe3TMqlBiarOyqM zSAllU14_9~Ye|8v-$VlLzLI`EK8Y6|+js(#UZoWmvra!9(8;$@r<*0=ApA%slmGCo z%}e<9lWTMB7F8rw?z~AY$8S;JUsIRcK zG>D!h-9SGPjgkm|odo&pEtsq*Z@z^Mdwfh0xrI!fb9t|&5x(&<-j!672maO58X`lG zpm=)$Ip46jx~T|sdfQkLrS>JE+fNEE=r=Y#o7n}UC!0TNlhvRvKXG! zc-Dx3fE-7FI3fbI1Wikg-8{V-15nN&&H3o#6LWR+XU8SF7#Z|-SoZ3z+uA$XdsP@; z)hczy{KyPLpp)!eW#{6@ig&Kba<`-dla^iEx#1VXBJC~KnxPXdjqkQc>7>sSC4)V~ zpAGhAKl6JzF8Nm3H6`;0#Lp{@d`)fGgJp7iGvvDvq5)omosVhWK=PsQc>TLhlu9Z& zGI;nPN}CN-%2oa&%zBh5?fMwJJ{O0t%y$8?WBe=3mtth`8!puOognt=M_(r;9k8m9%?*-<>IM{aEJBMD#V! z>_7h+|M=&j{HmBOEiGPmTBnIR;eUt-4zL+GWB}`ACIA<(!%`VoimmjOj)D*U7&uU& zl+X<7s9cN#e9#8nYgi{!^BWJ|KKy32+axF-qP%b%v?E=kEq+OoLhsQbg|-lJHQ_W+ z1^!VZppFK=6)nMlDVQs?2F=LNLxFvSQ7e7BD^l z9Jpvqf5s}nM3$8T6|1B|J#p;~{a=i}0{6Hc%?pt3JP)-H7sZOYdHsuU(Ts_RK+Tsb z{?wrvewzQy!G^Dak&a+f0F$)JkbMj=8nFd1vU!*fl@harOX*tdiA~c~#DVZqv5zqY zCs@J!3AB%s3>>X)uy1IG(*f7|Ke>DuvN5(h82?It#4{y=0^Z{|jx&R6L+?6S)wMRj zx)&TuqLgRTxb#HuX-ev1ZTGPVYOX?qpPHqOH^ge@7p6x|7Kx0A&#~4&rP>S6EYd3T zt4NgRUqi*^$Zgmdr#f|ZpByE+aN=XMx69ZX}`gk=mv+J~L;okwjv^u0LtCU!yQ3CP_819?J= zfqOg3KtM8WcVw4`iB0=x?3=v3rgWv-b{j5VqEvjE{G3b^8>P24HFlL4h6p)5|2N#5 zBE87X%*LB!rsgM)?U|aI$kz_~wjJ6|rb09wTZ4kGQ-3am1&1HY`#xj1Ax)Yz)EU}> zgh>^cbaK_9`uh^~%d}H2$`VtOZiBhyTEb#oFd5f{hm`6Q2yrk);Sz-#A+NxRqn5ywN@0_kS}u1^+!7EycFd z#9Xir4;{}Dlb`|ia@~kutNTBfiA@@u3}{?gv<7jhmQfFve6>WnyXp-HiKlB6VKc_( z!^9mfDi6l%ZH`LqNH}k4VaTe5D!K_=2OYF3*8WDrKagAf!-RPEFPrgv83`7%hNr)I zEg4?xvqEm6Kc6Vt{3w~*?gZXA6EjPR<6bW_hT@SXOmg<5lR{b5hq2*ft%YA)ARh5M zZ-iWxl*Rh(<~UW)HPX@uS<|LobJ2R0>1FJVlE>~8{(hR9Bm6;k_8fZ#@?Wns=bI#ubDJM6MMQ1 zB58TPuXRL*Ij(~@B|YVh7ME*wxyXf+!HYQ(skDUWju2k!Huid=uBC_NHvhpHFbGHW zF~__!V_f`kf-;RsKPO+Up~G+10CuiOhUti38BJtCSaa9tLITWjgPrJFn zNEDPBB{j1_F1Vy>*q8&E9DZtb?^(5JkyTy(P08Tz*6sOHS@TQTMzQqn11?rwR*EG= z`MlcJ=%)zscYHfyw^3Gs(KSf@D*H5M)&xj#@H5sKliJGr+G-K@=^Wu6PV*yG|G~5z z6h$ys1nEfX@p#lJp>?B`@2nG?Y3&Q+vWQ3acxUKT+Qm3QP8`GlohV{tFxJX5RJqmO z3(Evf?kiVZ^oX37k0b(PLH?>bDNedO5AFo=f_uqn#|i^XYfbfpY|O zadLW_&rv3~-#TYP-(~ToolY8FeC)Gk_d_OY=kOB~DE_=vwf0?-ra6^6743Y&KGLZk z_-QqU&i^)VaHh*Gu-VrmDLJj!j%*kq*>06c2TvLUGMAHl_GKEc%X{&!vdxt8U!b9f zL%N-RJm4p#z<3>56w+z;>ettGT4PAy;Q8Y|Z|(nh`u_JEs9u+VEqV=OZ zQB>d)`k7(igSmyZPWBA20MG+76jA{2oz2^KO=Md@el;fdnzCZI=v%Q=2o9%b!BW*q zwA7hMTIkvR?aT_~PIF3q38(|V*`eCxoF;)18{kf2EbY0435`*xWwA6hRaFd7)wyi zGGLSA81NY*Pla2w3A3<^)VZK3oYaAhQe^NBY`Q5roeKhghgsMQH`t+)mn?eto!YQW zamdE$Q_bxSiH-ruIFGR%!Cim=Xs-z7S;YL;c_%6MFoH5b?G+9Hw?G;VNViuDTlPx1 zD%81IDCn)ln;Zi z@yo*mLaW^Cy<*@&iC^0WOgpntiY7voOZ*n<^oKJfnJ7(6cP9%Gqb?d(#`~ccZ2{`D3Rh zB&3o}h>)k!ImTXX5)8UydrX*WDxC3STwSKBK#F6yAn<2H4W|m55c9?Cr|J(FXNS(@ z&%|22LOK0eN9>xkyIKPVKL&l>x;GWuULC8C3+cPNZuFf&1wKGRWwp`c_t(Z7GKX1E z%DWf1KUrrXNa~++HH|GV+`BwYG9gedsTtOH>em*L1Wx(SI41j!1u|FGroA|E-AVrD*>Z&1eh1^VvEd&QjL!fhhleaiiL4Q_Qi>rr7Xk>CRQARVXC+ zZqVB9YiFu4f&|8{zf!;nIon;_2Fdplhp?!Bs7-S1hbY}Etey$<#oZlj;Z2w|0{u^q z<4;jnx3=@bVHCBswIYYz3&W=MU3@q2x`g>Rc110f^0*|-nILvJH)F;`;EV9Ecdct_ zvx`MHJHibc!v?TdPa4YvZM6sM_QVk5J}81P-0;Ya4~d2d(V=9}cRbsRuBi-|G<767 zW0qT`9FQ|9yFT6mCJ@(b$UfdUH}u5<==Qt)Mt_* z+}pN~6mi|>AV_3R#Q$UuVr#3PAaoK=dt*@2T6@eKwJ6tmpQ8q-gfY= z?1E_fE<(BETo?7#U0f!m2O%N_27a|qj&L}r*Iwu6jFWvvSnntc-;E1hGv%(M){L2| zV?P-qLQq`Z%2a>p^OaaMMjZT>!r3*#DEl%&ksj6Ih^QbM)%00&yXGvspUm+m=Hfi`HP#&h?#SuWq5IH~?N1lNDGBTT zKF3pi(TqvbT>hL6FDf$BTiTI;41{$_)-i$r@zgADlOsL$u{UN+R z_X)J)XAht0;I{$y*MTwJk~DTwE&m_Y!}v4y@87?-h5~wd*U!rz zviZ-S?Qcr~oE7TySN=``j1>B6oWK(3@Bj~;>qe7K-?y@X8#N{}4_PATt7AnE2m0Fz z`@iWzhN`y*ekpA^`)Og0q`Ie$0_>OlO4xO&V+Et=X7DQrv7`Gi@iAgz&A`gaGyoQW zajWomDsNm0@)v2FnBX5@!9W{MJsMpxLo~H6!W_`#kI~Vj9dW9lup^?(xcTJFNJbye zfsH(t@ELaWXfO`hXW*u6i*fD>t)^t~voV53g0umz+~odrQ=LqCtY$2|xippxK4qBQ z%{)sX%|RTteXoMcaB9gm!E0@^8X6~VmO~oT^ z*g~bX`<&1K!)4-2xMe2fJk==;DV%G*A+Jizg?04g9sSv{46Nx+m+4LLbtiFwj{DgA zZ?-*`IE#2CN9oA~G=O@``Dp}s!a+pP$8-n|HvpdOL(YWxipVjw+Nov^yiY!wI*6WH zt~<(|r1fTOVl-trk#k2}CoiytgkU`V<7_NVqT7~|D1)OyhJU+rdr-HD%f>SD+DJBk zp{|9tLhUClM>-Vt!>Xh>(%Sb4#(5e)IbF41u`nhdoY%5>Dk;h081F)DY5953UD{e| zd$SJDkozCs@(KeR61=SOM04Q7h)KKWn?%{eAK76@8QW!?`Z$rhs_pNTcsy&L4D0io z!?5_@Q3Tb`y|rr*K6vAmN4UmDWG60uT7NXjLhsSNmE^Sz2@`i|6HJ-uk14BS+%^0+ zE%6_*bfA>RW^_O|r{h!sa{{Y1!%^q0Y>0sU8(wO8jF0jwsjNdZPNYeOPue_MDUlpi zv|p17bc#GtoL!#;2U!HGTIHL$SNvVkKGKJcwlChGkg3VQ<3FrJQ(^G06|8cCxG9qC zB*$yiepgAazlH|og~30(mhgX>$hw$%Y$o0tKjxQ=eLCsZh?7lpc!^5s~QLWSVn;qQB7rojd7?~S;((zOJ&FAQ-6&Ol(siGrr9ML z1@$?G8(n=E`8(J+s0Szvt6{^72ohI&c_MlaQFe@Tdh&)=4&CRD-g;FPhPfx^@@k4~ zv#r0iQI(O$AK0AoPz}DH90K4eu;|butI*HZzxqCx2ffYGa-T#ANJDuqW$&I!U<0YU zkqRJS=O=t;YTJ*kp#{l=vZ)SvXiVr^xgbqYOfyef_=yak)`ewvQ!BJBwdq#D(>eXA%yQf+_l0iat&(Moh_dR`sYtHcP&8gD&T{qpW zIEqtWeV2k=#JIsf_e4z1Avqz9(H3UPiUe;}{oUe`)&4_e89GCADRN99Xg@;hFFV`I z_aigqPT}3iW6I(VI}isaL$%XEotz>mGW}9#FwnxyCR<&-d12->{zvRhfGms{ z1@TRB@H}}Wcq$$i2wGR#Nc*7@@+mJp&x|c^^BTaV}dkZmiX> zdxQrsfBy~QK>gSa!c@hm0jM#Fe<5w;eI!3l_9H0nDkJZvI1XFY1rzuO z*k_xYHI%J!gvNh7c1mBNoC5kpbp3=@S}S~fjVW~?Ucj?NS>c%|ebBP%O*!e_Rv-aw z5WYQN@g(1u>XqRnibi2hf}<1)M#?TDnJTB~(0c)DPfTU3a`Fbii7%S-X2GTy@&wj3YVrf6dThPM4lV> zWS@%rhg8b!QPX8iTyw)X<%!8-!hN#$=d2GRETOhRiS+W%4A&ml|Ida5I%bL~u^wo* z>T?=;o@A;~)cNJfWTnC9=Z^PDd=1AEBdo5%t-P~1jGyxl#Aut8U*{X?;V`aVsATZK z8CG&gqp14*okGe>h^9Q&nBNry3xuJ5>I|#XT2ilw*Cf_W3VQ9S)F<@hE)48^6;6bw z@yeQih?9#R_!iS)AE*s`)7OY+h~ZfOHD+l8B{M*8_cWGcBt{3?dkxH`o{6*M{}!E} z$LDcnFGGnDe+n&gAf*}RrP&dek9*G#FG*k?XLYpuJ3%&qutyupD7uVSP%QVY@Rm}S z98S!UoW387Z>yzm-Fx@#8uCZv48#0o9XC*kxAAt%IXTz5*r%#1o(=)O*bTu*IaPx->kVtl0%A3u zQa{Z7iac|?BP+L%*D*XG0R~_Tu+BQ+G>(BQyRH<1?FSaYH4y57v*dwmE@iTTqh_);g2;`HY0)KIJ! z?_kNB+QYjqmQ6w*XFb2$NT36S^;?L9c18U~qj>4~LmT@n=ZAXMefCt==k-|OmCwZ; zvvD5Gy*Vv?Ohek~!zuo3|D8Q-tQxN zm|vt)ea*FtG)4<0+e(bWtpC-S2$!PMyTy3#o#!6{zmfGmlnD>E-xpi^nmh>EE-o?8 zZDncz;d+x_kGuY-xJLKYKTJMg?nPX7kH1x-^I_};B-k!*DeT6hv28g}lxfoQEfn#OP^&0H^|ynx^B z%7C=UF7canC$_!8!j$akO4M&mkrgXQFz;gX~9`YUs7xca$(rXY2ccg@725?`}e=_(YNRsN6OxAVU6RA&2_oWBMgp-xPsW zVVF(crD7pJ&Q`F>v-#br6N%3K*xa{nLE*vSCRp#BQw}#QF1y^@q^<@+H|{J@e^oc) z_#alBij3VXnM|)dL1$|VDt2~upS$kt@~1qezG0kVTwE1gepLX={rYN9I(7c!p$oJB zxl>Zyf+c(z0af*g>tW0sCMaGm?$F_${hJ%)msR`UkJtaAujpacdlIa6{O--}2=;H5 zc$6%W(kJlpiuwNI)InLUd%WrODx-DZe%wK&o2KCha+yWl;~=>2FP}yoq!Nf5USm(( zOWqfM$1~DTo}_l8CK%?}))kXKeEsXTv2@Wu2YBa;y8P4O6?it{wR6zee!&QdAH3cm zuMfYK*?x3-ulo%CF5zOidiMMPB?-@$Pv2OQX&b4t@45(B{6ispX-Z~<%KJ#f@UWw*CbrZV!JK5g*nD`y5ib)cUFhfRevUY%a{;$usqAGoB^H z+Z`lq)7HwcuH2jrGRLV~UybyYkzNi;+As{g(V3>d&kH*i35nO9;T?GHCJXW1V^JDeGULhr2Y^UcBKXJm8;HjH-bd!#AttQj zX6{~gAGn62X+u}Q5Nm;@n;%~v<;{u6$p;*&rYN0f==1&O4(gv*E@HvddoDVU!MgW2 zT{z?$I$bg(9O#Zb-r!Xrt|aordxm2)+Jp_m{PXhXaV+p|vAj4K_FmF^$5S)=8TV(# zqpj6B(ar5_Vkqk-S!}rr+4Q5p1WE7tO=1en2XKdiIN(bmMiB)?+<+UP&egtGI>B7trgr@-vK=*&l>YH|TE)TAcy1$|B>m1@c#CH$JnBSj#r1y;tJ;4AQ}w|fFS zukaTW+B!6!EI)_kh85(1n_CX4{Q#LrIL3CxR=HH?#Wrt_s3?^N-08g~A#>NlVF2$W zlZb?YL#uwwr(WSl5H`38V$*;p_OuJY-oeG+F##KawK4lRTJgmcIBKaq_LA=sP>FL4 zXuT4al#W3%gIB#5fo~IuNq~22=*A6(BdvheSshGeqH5|E!NVDsWFKy{*j`BE>n%&o0m>Q^hXD5 z@mr7aBOj6BsFc1+w^B-bZ=k0(rPXmQ0qL32wz(eXoHx$rCf8`WFJ=KzZly|j_V~&Uypy$`uSqu`BZL{k|JIzG$aK13-3?d zEo%X_29021!X&~KgxJ~Mp6=hsBoSu%(ZGJghsQ-?w#dFBf#*{i(wbr)2y-_dBq9w} z&qG{4uwc=wv+DC`#j;1`QZO*yJrEi~t0Mk=fNi*rLiv>SvJbGU`lav-+5uy0)OGlu zh5MB4zB{xe)Y>{M$RcEHZHD3LW2EyS-WFp*qXj5*gjOgc3Td-NC(qu5_dsX*{llmC zh=Hly@C*%2_(8m%NWik2*OJa;GErYZaQ7u_+pgO3Iw>tm#&(s)d^n;@OTD*F1bKkk zHE47-74SkSwzVNkiT;PzacuYP78Mbga9d*x1w2U=xW(7>q0NoPXLM6x?DJNtUEX zzKm3NF<#2uACcIXx!9L`H@RnI#=!AhU6NcQVI_5Xz~XY#wDGF-P6Af@6ZK22-%5V`EW=PmyB zPR9179wWdlqRvZsd%kVPtThuXWgdiPRhg`$u>fTo{f0?g!_TafoFpEmvPKOq&5-vX zb=;pLT)DLqrxabOrOk-~f<_wr>MxdbY}x$>S&kJ?+jvT)rIEj6;hCcz$BLR2gw1sI z28v(96Q7hd$MmdpEfVCueywBi(xX>`Ola_K=hcZc-p=)0{lE(@#gJ?gM<6JC{PVp+ zGwJfPFyvkRz@3B@`EAe6Pp`kXIc7a

AK(e?N&t!NIt?&&6F>o0}ugK25cm56i^QLUT2#$+NyvM zL{39+G?y-sVKuEunA{M&iq8u*W@FIR%oI|fVv8NXut5j7jLzBuf^a)9v!ffZ0H;qmJb(hfj;UryREM&j5ajx*V z#Jp332r8M#*fLEexSkpgf}3}|)UegK6M~vPct6H^DwMP8O6WBsJ(rF}QV6``u{Ge| z`as#_z9f_*OA1pZ;UI!awcISrKm4EgWi0hyQK>9*N$BlRSoJgdYTq|Q$jduhxE|}Nu8(zDr*y&O6@6k)lY>cs|a(nJ4i(j$0uPc0a>0<=oYSJc$u%?-@KX!jt zmzp|OwvTI?2fXm#JX%cHC(QZ5#N^rFWL#NdqZ#t!OJvWuQH_MTt^P5=S;6w{GwLg{ zKL&XE6VnmH7~4LBGWI_VCJNb|*?|mS4a96xNJN=%*ILzHfg&i)E= z_J$u`d0meWZzaDIPPnr=N4ps&x9vu0wkaDtbSe&PT=78ni{;D1H(_?yLq1x|F0Wj# zBbBz&tr0JMg*$;|+681$UAI6H*PqJ;*}e9Tu3rz0(04%Sf8GI3dRY5w7he%E@A{f% z29rI)E^@cy(ZueMKRv>RGP|Tz0%O;*p5Jl3$_10Ne{wyL1%Q-rH0bt#lWB~_1$FisdT zcKMjaGX?oE{XxS$BW_R#8dvQ8o)r-Hjxw&kvf-a=AYo1~ z!>ut;zr&PCMYytu)AD-P%o48ut?B8%b6lVJ7-(hZyy<^4w^lYrYBc42y+{V6`>SP!`?OY z%_b|Dj+`)+6s44^D5z|~!}|U~=wAXxv|0$>HmOIH zRSHImb1I2R5x*V+Y2+&7u|fTP{D3ING;Ej}D~aYfL_75ara}t!I>JcR&iRI~T+3s= zxSyomJ=VS~E}ab^HrL_G-gnEqimTu>-(bg#Oh~lv?5uxl`b$9(N2pWOItCwVeWVH@ z(ba?3`(b_-cY9JJ;zazF0za2}d9P+oVx zaG5z1^>QevUP|sIvFm6&Drdtork}}BsX58631F;@#Bsj$SRn6<6La;5sVAwiqeO0 z7wIZ#qw%0`k*$>~4#vr^?|$)f4##d{yk_{nvT&#`%L%S*nUPR{VB`;-U`FmJ<(q6Ib-4rh6HK#>*OH%|WL zp!b86HhDUu@puHF+@*CfQ~PixM1SzS7evaMpXZ6yq|WsA`OnsIYZ&MHtDjZDxr7Ff zFh-ZNA=xjEZ41`7Nw>&l_{@I& zKm2To%)KXD5-3_Er~epQotmMRGEq>5b)8;|Mk3F%8~R$Mov@OCf9&4bnuWyI4ZNM7 zlO^vxpjS+|GueJtmTha(YI)>#vKYwj+&P?gI21Ct{}ddlI-W1bd4JbTI(U_3O{MR@ z#%qO4CLMb3WoP%7hH{I0B}MZHxBpGZ5Ij`>k`&K&rI&L6vE@W7@IlMmJUfX?ptCW2 zqQBt1hdN7S24d+tS*qLC+Z?r*iD&z}UCVa8n8-Hkq>L*Ro^=$x)Hd*e)Pg48E&sf# zmNaQiVx_WAvv#uh|8?kW1r#F3NkBQCKb8n+uwRd7Lg-e4@Qtsb_5UdE=wc820X6oJ z7V8aKkvejkpP46@VDTGy3Dh5GuuGN^x!+TI$Oagc@E5W~4jRtw4Zj`k7K%<`U_5=e ztsnYCP)v!|$m1TfzhEnK4gic}rLt#z5~s%JeszKcxMT5*e};t$8_cACiuF-S?JR_g z%Z&-#*%v4o1rimB7~|glaR(h`R^1&{pu}@JV-VQ}qXW*f_&>@E+C%;YUKV|wI-KnYKhwx?O?DuZ} zeG>~T(88lP%OaVy8lo){$(g9B%@ zYziGv^0BFRebnx5>EM(FFNDWOkqv0v{^cLE|Bgw80W(yGKIo9#Fe{yMLv~#FfZ(zf-*BPhW;w5@{w@b07 zPdAk8T8@qr7sD6aZ0qmfDRWbjVl2U%_t{N@?R3&5hvm&P5A|o4*s{O3u7_(c{#ZPm zsa{KoSoN({7p&V{RsOp*3ag7aT@6FGAk6LQuF#SK;CG?5%!F_39(6`ih!+8;L+Mqeb;!q z4a1YSzo?1%apR$z>@-jk`*w#mD!y?*0_AqI#hPGxIFdI|CQmi>ccaB*XDZ#A7z}%{ z*CRgW{ZWJ1K+VXOScJiP6*VZJrnDZH=y(ruy*^(W^c(W3pE`_2fh_D|aRye^te*V& zJ#sJ4Y+~1#WYWeZHp2-78XB6XQTij00m7q4NAgV^ui4k)_{k0j=qOa0ldo$QNB3hT zwm7^N=BsRxo$BN>Gg zyBuX1%w*vAIGhf2M%ZVcV=IYeuCQ;#aM1b;oPV(Dw)GHIIc}R&b^FL~agS zy`m0iO5i?x`N74foOUB^#|gN1hVpXrx;h~p+!0Z-{ew~wL8~~Q_#29JTNz^g`!KfQ z{!{QD0fTf_+YY{6iAe<}+-(sF-|P&@edV+KUo{^(Hw!g(N7wu-Nm~;qbJf+e8n$S^ z;l^PI9pr9xpF-ggLWwO$-bWjB_^H_T;?0TIWJ-7eR3Zigy83^pI_tM6yshoeFr>gx zQqnD5N|$t_Fmwt7qf*ihgLHSNN_TfDLpKQ0AdSS(9WUp7p6_$cx%LnHAK2I0dwtCtS|B>E`r{S51;e{&MI0#i5)5cjm=BpBeKBOpG~=oKbMmh%!B#-On=}? zp8mw%5%@dS9$p0BDYe>EbsnP+1ROwv6G&Mx6jKd4KDxXdYGxSH23TQW!u1@1g#Kxr znb|JP@#+k)jL+BXQKm}r`Z)YzThtFG9Y<)G(ZWel; zD~KS!oUC@}x0UeKDaVqskx#WXTfl)s|aTSFH2^wPk6vKH?pqYk%>QF%054$D|+E| z&H7`kE{NUZ+`$TGrf&W3q|6)g+QawOLTa+(vUYwg;^((HU3G;zEK$sBe_~yW{qdgo zC}&jMdJOomJK;GuwPDG?AsjRrJEfi72`l0ru8`+P3dBRCr~ zpEK}?+#a9*tTiQ#Ip@%q>=v~1)AM#muHPO(P!d%YBq}TS}TTaA-Y#fni%?`h*8hd{B46O}fkA}uE z2t*0`ts*UQsF<17uuZi^rH=6)RnGNL>AStF}1CT zC%y_IG`p(uHXL9um-jVSa{pr({`)FZQF>#IVhV&3C7h79l1<1L3_Zc>)Diu$w`G+x z?X|?NFDFHa_(uy6gcr#nPinp#UnVEHwo0eACyvOXcPbhEwa`BSStMjABx^WE61eUp zh?z_KijWU5x$?=E441`gnntq{5tKfuttAar<+<4?47VXZ9v_%5Q$h8Hh7#>$Q;R-_ zD_a{94V&abKM?Qd4iQ>gDf1%WenwW$hG?5!&(IWYu>w+uA|Yu)1^WGSE3ev7o8)5pJj@jqJYFB26@8^f|e3v9&Q2q`83Qo+!y*`Q|@Gx>-UC~bz` zh*x`pZE8m6E@?5Ae>62w$0TChCW$kwcs4cFw8;OJlBu~)b;avKh07n(2rEqge&>x# zminJI^NKZYc_no@8H1+f4R>PZ)Jt&zb}H6`#tPf@Od1BFnVOtXC#T zmww96QcZppZoSLIv@^bjIS60Rr8SXXFm?oZgfjG`^(szpwA83p94J^xGkUB7FTUp} zG;)S5Anz>}Jua83KWz@C3ZDM?kRiz#Eq1fSYnK8_@cAiV2Aqj1i$5xBzAk1o!K@}+ zL^Sa*>>aS_r6ZqRPUhbaM!Qi?lfX=GKkrMte_LjBqsKws;S#PpRIBmmmv969kILMY zV+hiCGFQz+sE004w|lj7D1XRyU0r!-wk%!O{a<+hKL{U!D+YRTH5s)MX}{uocq8am zj~BYP?0L*jJMegzw6wJ3uo%YDvn_dvxBDh{{kN~LQ`wF_DZ>qQ#JY*!2b5W0#8W^H z0}sw0jQ41v82c#SK(|H!2sA>o{Ark$s>;D;F!V^}Ry;5J*QaWwNQFS)JVVL{2lsx2 z_c#Dw!}cD+25JE=Y0n!Z0`?*qlv8CZ)+%xym4eAZ&<|Xm z$XQ?;Ds4u+!r&LN{)?9~ns0f2=^kJ;g)!x}$rk;{9OndEH1rj#WH=9wBg z@;zu*+?wwiD;4KevTZh-Z2jgZt(iq2V5v~qHo#H7+z;dyg+Ymt4$@ZXbueWQm^g!a zr#1P?Co9_$XMosOh2hI>{kh-LpP9h7MliNr&ba^$0jx-K8L$}s$V-hR5vk5=QN5S8 zMCF|`1flZ#kzDN|hXZG=3MVgWxErLnZbP!;$|zOcNqwr>;f(>{5CWV2%pFpPaUo}$ zoXO5}DQTxr%L6L>DA!u9R`V)0P1bTjfFErNnlN$|Qkm_;pX|2@q9WsNdpo zf7G`sW#B`0M*{p}f0$0+%S*!MGr$ZCuG`7-4o%p96V!SR9g5~)ZzeeC|48X+5Jc8! zF4wjfd+^O#3TEUx1XA)n(U%P5PXQ5OIN5Ba$}~mb9$(oeV@Vky|k89 z=BV>F+;?Wt=SVoD@0+&TKv$@Hw`*l|%)ZmlTKNaIf78bi=N(t)e!Bc;8QB-tGqCw2}>`BKN z)%l>*ww|cb_YguwE+a^ucIV(*p+|SU8?z}CT-l!@*E?RrTmKq4{7^L;u(FkP=j6+F z60>re)8j-aayKlw(R!N}GeEOgqGX1z>y9^x{}{XSa(R^|oCQI$WrZ_#>Ze5yGVdD{ zljLO4Mk8d8qrmEid22bqXK@M8cZbA7)r()8{#6y?#Y6t#rtce1d8PuDtzfkUsDudf zGE1iBjBIbUzA)XVm7up!K77}1Xt`W~BII*)s)ELTnTQ%5e-u{&xO`3AfrvEk8|4V_ zaCX_Muc0>g;iQcVinJVQkCEhf7c^Ri_vp8oj+<1T5NSKtS!j(^88A|N&2dcgGtNyp zkT4)=&~R%PX1y-XhB5VhOy?D&fMYerUh`#>5wXEOxB43r!dg9hUi*^B=@g`(#(HcF zHGV68dYPnGyffpBKu7g}t$0s~`44HaSIJlDQ8f(fp-d{~xS9-YUpT`Z=IWb^9&`u| z5$fhVj|CXVXol|%p)&D=Q`b-NGYHSm=q&`dwV(~j>IAYz2vwW+jBCqf}`dQ6J<)D z*yo6NpG8Bd=X(UgWb!wmbg|deM^sSzh~ge=U-hKP7nymp43qV zIv4wE`Rr?rFdu=Ai?PBUW(I|x!&^#9``g)+r<~al^UG>mR$u3^F{EB<&ZSL>V&f}a zteiF_L_o}D`|#*0ZE^nAd}iIo?QnKIcOQ;jgyCTZwTc&p{N;QPuj}Lt7DbWne-`rI zO8-JxoZFibSkG>wxBb11gx7e;`lcaO&GA@;vfk^r|Fw=fXW%-te4Rtn)62wN_fhxK zIHDH_JcOa-X9Iem&qZ0pdO@i#6$_1zyR*ieAke<+Z7!cVm>!)LM+q$!8;bT5CHROe84gWi2ZX?+{op|KnGCIX2SwjU?Gm^00aQv63<$$4?NlO%{c3qb zK(?T63kwT5VC~F?YWJ7dU1T6A7fq)$9d)PORb9fn`l1UO!mTL2nQMMBuA1w#W05IK z_!Q$~4ByS4s$ItKgLlJqp`2Z>vtnBz^ryaKw5A@7&pt>Y9i>ns(QBsa(RAa6GV$GF zJ>eq&C++1({S8#dT2_RJ8JD5`=O>6D$}|2Yz)pKwn!`c{jAmyX>!LN^lPL4dQ793> zHN`EJPBpI)q}byN{KJ>N+r3QdB?A304-=7^x=38AhMlrPd-kkgqHbg|CONhKLj~KH zAM^e3x23SFC*}(SN-I?FWRAWOG0NQz{Q1&dJZTaVO%>dvoYV?pv8lql&D&?$3s#kP zZO}IOGG2UszZ>AOSlmsYTs}nWNf$%!UL4tb=yU0w(jDg0xAod8;sR^I;qAIt9}t(X z__iKRI-ULLj|v;C@;0-eA^Uyr9Jp9ANEQDTSw6{yJ`o`XHZ)=@Y|c59@EE)`k2r9P z>GVOqtKQ9xhU7=9)plzZ<9XP|WHrWcy3O-HY@R)arV4Vp&RaP;yRsFJ`&E+Oui{pl zZGJl^67ycr?diJ^uYOc-`VWo3s>XO%0#P>?ctCD!V(61wrEe{K*G8|(;UlY z=}G%_cE2=N2bW`b6BZDALT)lL5Br!jM_+T`Ll^~>Fr7>r!{JM9kAW|!KkbQR=<3K4 zXhxCec38}q)XV9i@@m>$!Q(xK?mj)_f7|b-@^knp}U2ggJJwy8J~Wh#l~B`IH8m#i+>lk zr2SR1ntV{8XR*^lGqCB`Ak~^^^q`*) z?TJK^-@GS1`}v!jxCb9`wY#<}jVdjY0N;#%&Tsv(;!rhZO=vwX=MlM0P-6Oey7o@Z z4^!QoQ{dysz`HdlZveZtDRtFXO<^h(h_tFXo;mH97G`|9N2{r3bl~&#q?w$V@yUv-pMvklj_2Q179Zdb~GjXB!@%KU`ExXs_@Z~H(~|`-r?Od zwB!O_3Cqg+Ock}HWl?U)c|13TeX?vu8J98bAF0>OZ&DkC7NR`DwaR0fVn>p6lGZ9U zgK)o&$W32_kHtOjsVI&&(qR7Xk$mTDY1+*`>m5$V#!POLV|%55pEz8Wt%H>n`iF`(Ue~O8>v<;di!`IeBBx# zUTblgh9^y97$EbMI@5E0DM9FC3;Na$Q;MARE)ArZF8x1pp8Lsc{dd<>hjbvheRVlv z{YeSalfCX5)!pbLCq!HJpN0Paw@M$Ux=8J=t7r#`~Y;_Z(WlwCQ==uyaibSqcM59 z1V~Yc%%0C-d=72{qAJJ(l?T6}vN4E~gH|!SRBge@TbDpnZ3@lum-Qk0{irm71X8Xj z;i7y%?5Q6K8f-YR)V6RQ*JaY6`iAoneDx_sZ43Adc-0RvFebql94Q{)kWV!z zex{#bc?j?NBUjS!D+7xKptLF?9oLOoQ;d#gbS5@F_Z0}B;@Rs$`JjZ9(&D3&N-BYZ z`FBowo5_E@A`M}3e!30BYBUGJT`DgVaF3Q}@Y~_P&T4*Jkq{ zajo!jMX8x6E~>Uvytkx^xPdN0zV%j;Dna}c$ca@CY<>&_kK#6m(*k%=iAAa?fiU7kd`^~Jhj1pdQMl24FhW6o%rJ1;Pzcu)4nIunE zCRmVie^#UGv#gXgjA|!M^&-|s_X0N!Ldfse(qV4ZF=oQt2?AS8VdpM;whRLC7wSzm z)Mcg09cG}kOCk%6;Obb`-3)aq$~41|RMum2A)8*Cq*JH#S9!WEjHKPf}C5Z8TezON;eZc(~8*t@jKg4{#J_T{m zxRTw=WxW>LWzzU`kF{FZG0{3Cd-eKx-|1cS!kZ9q& z8-vS{(^b&j>m&z8Ia)~1_sbqq$?G5P?#pXJJ5k6e0v{ENm6SFS&yYy92jh?VJ08%y z=iRTggRk3}$b)GbbBhBb0D+M|OwYW=PA0B@usI*+^w?lt`>GER^c#q$1=4kFC`2q> zeKqIUpyrKrxNi2=TE8L3QXOVlmg6BJZPK88^8kq}=IpbP<9a9_Bm9BgtZF!rpT;O- znoV38jy2rZijZH+OpNunnXKzvA1e@uHcmUsTdQh{Mk+}hH(n+^v+U)STtBspGaEKF zTJ=D@KGCCgQtr4-r`#qF8AWuhp*WyCPKyr6a6{=#KfNhJdHw~Wk!`4SCjIl+3qgx4 zEaX*N40F!6{qEj#Hi<^X8GJj81QhhJp zW4#qsXF-MdVeY#ghKG>rg&1d!c;-KDMhZB)Auj8PCfEc~>wSi5J3Z3;m{2s}E@ z*ae>3(=pIdrO}8omfkZ~R13r}%*ubfRBB}VI90BvDAyp{^4A*k!`fpsZc?&}3lf9G zQZ;Cayt~%M+}jO7lVVvz)Vw38t!>R#7lEs7`(2lhnJ~Q}NiB%?qPJ@&;2?CDIktTEhgI}eMrkj8avgter)EB;ySXMX`uNL!iT!I4@;paeTvG zP9I@1I?$Z*xxi}Wu&*p@S4lS^EnBXMC^(N|olfvt+&sk=v*A3*KMgCoKr%LL~?%C-Zn_S>JSyKR~I?7pi1 zMOpPm9jTDbUvOEJy+v)->4IT0Rr>4`z{#ucF0M4T06!0s!#`$josWj$r4u&jKGO(k z(Ig)7u3N6^+exHkI}zsirq!a1TK4rC-t`q2&mcaL6M$MXzrM8(AN*s0@>{f4Ew7!Q ztx`OmoZKkXy@aERU>bWEZGTf+C2Z)cADONpOXNB{-~ip`W;1d`=K?;?XFj_q{raV9L= zgoo?flk#||C2mDR?r&|!)@U@gU}Y@2hrghwV!=ME2620O9`Ce5d8#vPD%tbf?)2oU^rsepZGWo!hAvYiki)(JOan_$<>wDiWtVHL7 zlXYjkPuXVmk>2Vkt*3sOj*H!P;rd2V@JZcL@N9;V z0Ku}x?JGv36PbVsQ6u~@LzqHh^#oEBKGKZ6#JB3-SlY@o7~<@|443m!m+U=@h7{Y_ zW^0vfsjfuja2yg$h3Z~4mWbVEjQthvXuUkP$IHe)&QRb>Qz>Jtz(*$i?LRzsJnovz z4mDxSDln&*jC8mjZky4xBmwfYa*6Z2#zVRr1dATL7a109KBzkFC?AQwV|qo@LdmqRE~ za?gBiH{knWh{=?N*YxMK4uDI16+*yLwm)KTYeSf>r@ssB6$xR2SL|4<809NKC@^g~ z>IgK9>Pf!=AoGuta^-t;wVJ`6Q?Zs!JBJVP2Aqo5g!yN|KLa?;=C*Z*x{z~k-(anV zOQMLmeFm(e_fF=6A`*9(vJCzE<(V};*-z97&N@q>3j-4Yx}JQdEgc{2m@PaiJ>o*R zh*&E=(qJ*>^CQv|e~4Luax1bh=Agn}$=SgA8CLCQ*8Q%E8l5>wKu1TQzyHR(&^Tvq z8$PC`Z0R!ExCRDUcMT_P>jb}#C*ILA5ZtPqv<(#y)NaR*8TL1U;5XUXH7JG9F(nw0 zG&UQuc=B>PPGODfBsU8P@%0kaFK#cP&ARY>7a?t4iZScsf&iarOO1@H49RJot2165 z3!WFszNqS(8x%Isc6qHUd$wCr30AKH%6Gg##q}3>#bAA8iT<2=?AFdohoj=R8tu5T z7dKfl-<_`lq<%xpjwD=-6tltXJ4+Imm=PbAv#gKk2v=ITUd$PW#QbiUr zU0xQoPAyQ;ixQU$QO~@PA=t+CM1;?K@I{MHmlX>*y6LjoX8mlN|91ILPykv8DO>ym zuegwN+wO!Ij8RTjIE1ZuK5H;2pyrU0Y&>ghF8RL86;i9R(bC{u^vB5+b=Yku0ICqu z*?j-!su!jn{yo}PW8bk$Wh(W|Y9+02pmI^zb>5-x>Ia@LzTk>5r|ZId7W+(NEh>Be_PFut-8zQlV>V5{;D*)-Ki;xfpP{$> z`i)VfRte--n@B&8^LCan4#BY;(&SGY_$!!<;gAP!fb>p@FE9DegS!NugO!5}{#xbf z`L&z(TP13!G=1C#t;y=tNtCvg@&>h%qOtG;zu=NdU;8!nK@8}qt(S@o-_Nx+-8dFRMqkt3Op&YX4yB+z?V}2+gjgD3>z#rxV)zI z`hzY?Z+8^x9ar$yEH7hu30_e+=K5GmMm+Yo1vz8Daz#_YSwG*El`C7G)(0D>x#Wwq zNBA(+7`T~4KsY~N2fKb-dw*fa)zOH*#+XsfnlqC5(C2;?gVzJPT%s}tVe~AItuQP* zA8~y&Hj|kFGH+y=L)}5*?uAWpV5Sth6>)o2c3+3>#aSzN#L4E6#ow8PsheN_2~Jki zCg7tnooT)C+eY*C{$lQ6GB zrM}pJx)QFjA*g{-zh=HC`FBj}0IRlFmeDZLUks%$H4LPqiwwSXsRGoYqBGC}SKt*P z>chwj$}TYA@G~F|jRzYa6@ijU@HX)rW0brb3H&zr2(r=cVuP}*+X#y_Vn{vASvsRj zjo^|0^=G0`)LgJjul3$V{ecr31<~Ov-No(?ZITvTR$~FEcCkqt0gax3ATOU)FFLxW7mkg^m^)sF&lAP%MbXrOtjVk**hj#iayL$|?jhT{dIP4$t^L z^r?awV$R=x2RwZh%^6;b*iA#_;kN7@ zMJ!%Bd;;1y1Ge6m4AFbSC|qT{3r?7u`ZrFtlJqp_Pe2`NnDDE`Q5L-wTdMLV(*cRmhf@NjCu?8%*sW=^%6?n7tHZL#_LC9& zP*i<*LwK%e(&_(0Oej2H1(8f*3IRSdyM@PZAEdou87X-2zb4sDBvx4jK88X+-rIfg4b`)A5A7FHrPi@kr+N>t|UP# zNkON$7RP(FYtGflt%TOkjj7%L_Gy6oqy{V+iHN>)$~iS=!5!*w4&%HoGDkgTqtP6u z@0j@Ncw9oceoPl-IV}Boo#s^Qy1^XVFtrV+4n^(WsXeK#{ zzv8M-{l!&Xe8+rI&%B0fO<(c<+goABA!@!K{9Z$)MiHepId>n-Dw z*AYHFk*wsU42?aIC^2m+>)z_Ejpr$0-x2w8F>3+>tQRDBwOTd#jO86h4rBx4qWe?= zbOq9Xr#zGH=S`oKv~&W=7m=(4Bff7fZEe*fYl9EvTG+63@^9)=Gr(o97cPATI0o_& zaZLIOs(W8pV*6T>+(%ZNIpz6DXT!yj`XW*&XHK7XXBmkmN}Ea{@kHBU)p0HR9pN$1?x0-eyhQ5`yvx75E)@Ch-!T0YCT)_XF z;Cl$D-KX85>(Z2B=y#bS|DyFBwZo0d<8fz}EmQEhmQ2x9$^ZR3|7E4P(t34t6tA#0 zpC;I|q1F)E-h1#+h;vTr)nL8aCG-bW*lXuSgKzI~9zlqO9)}5M#U_1QKseTpEeI__ z`3=v2`^aMDk>wGS=hkr4!`RarvB*r)-;8T&W9#8A%960@XEE;^`-Z$)zsFGTUzMnO z(G>i$xQkVdz+PoukLpQE$sVrRDIRb~QCH7@K#M@AQ}Au9qvwrT6j1Daq2p8+FaObEmYD6rd;NuXVKb5;@13o|CE z!Hxa(C(t3z6+S6t?CCntR6s;4DV3l)vpD9Am+SU&m0nalp6W}4-gd{zCs++v3|bTV zXV5#~uGAT*cl%NV2Ur901GXKyNjV?OkXIE5N{JY+@JcSw(58>XlH!-SJQIeng20Bb zQ$dXyPt$?Fg)lQt6e`W|)AeJ7yo3FG`CXegr*7S}Zh7K#Np*7Eaxn)de&g;+|a^|3b$pWIi|LG(pe z>z-LTGgB;her#Q<>LCNuK`0n%U0{s^%PI21$N>xmE({Ty958-)8CKT3R+q0?i_f#v z%UkvgWpD?@6U@`d_}Fcnt2CA({@6UOXy_%7rcBuWM)gK9=UQ zK@Jy5Q|2MnOU3u}k0z?lJxjIgUz|E9I`99GJ8WRlMCnpJ2v!%AvtN&SOXpET3n!^H z7M~_$yb{y(*G@`7_B9f&(}fgk+IT-O`c**K)>0xEG`+w6EDe{mK83B34Pcmy5ho+W z)?N)@^|SXrmwD>@UPTZyuF~oq3tuDFerO)NWD0v#etYE@ugb_1Di^CAuQ7j>=KZlF z&+S-0PIXv8tT$$ZJ>2Q4JYSjQx`)s_PKMcP#Y9>r&fk9DCzNN)+{TwP$Dpj+k5h>k zN{knVcD=C1cAR#Rap%ihv{_5Bt%6iJ^r@TWJTS}YX z)K)CZtmB|jyv#{WfF04gqeLk~G8O?NY_8;F89OQ+DF5>N+$o*Rvp*Qh*U@oR0rAla zJ2)n8KoD;+|Bx9THf>ag;)f)9^=R01Z^Bxb`4?UVlVuNYrI_ahe z4d+!HkDRBqRH5Q=<#!I1dcoOqEEx<8hBZ2CFPpG38U-vRJsaB{_P*<{Y?TfI3=K=` z=`k1IOPOD}*lO1dEzPf4x%bN?FqJ&FtzLSl3~j%hXJ2-jePC_I^kt?Cv#m0AibL(? zGGpqmKd>!NmGtu+HmG6t)F}^7>TXbV;Gx*T``xi3%Ar#>fHUGCBM_lt^4R{sQ!Wzn zkKMty#DCA~o^JVcsMF`?eud|dhIv1HYkaG7xRtQ`rkL*6$N$DGVkxgaPpJv^NZjrd z^<`c8yN_TM_H6t)z%s%p0Hyo?36rhn0D1Jk0J!JH3abe)?_~Qj)SXNyYFTfF@FHI;rlnQPw8#fvyj*Acj}38l1H3EP=Tc=L`JPATx=9k3s*KYXpP`?RZ{7AR>7Mdbjn@FhQbVMB~W#YESdD)%)YVlvyWn3Ep=G@bK zBK{qXKKLT)GI*4cWNtf@RYGQ!^?bS!@x{*6SM_pvEnv<$5IYE=}M&rv855z2stSQdz=wKW-u{EDt;YFBFZzyt5_Ul&*=P+G+yFl?50D zPEK_y{Ob-WTBokA-u*XunjxQN+VJmpez-*<=`r9PH2V&++!#S%li{&@u>8{q>#s;m zPW}FMH|GP*A0z%u`c%C|5^uO|=Kr91z1M9<%#2H zDm3k{Z<`%7x^kVthtqRMsy$JA^u12MyQuR)iR$ zMgHzIxx9()yOQ-b>G;+JeiP<5nzG$_Tzu8*?ZgYD4-x`aGiZ-#ySQs#G+!015l(-L zMLNeG~hDQNYq@0 zC23}ehrNs0_CL~fZYx!j5YqI!pbdCCyT?xw^0H0?S!la1C9eHB~B7C8VGA&I*-H!k#XQZtHmS zdaT#E@|x~1Eh3hjT)8a1M9l^Eh2U{EU0)+9Lo*&iw4UTOoYgi%Fc-|tRgjB^%%|B) zTb|2y8Ob5V%bQ7MsL5VpNq$ml^a(LFFjl^{0F`hsDAA>deQnpGv;c={^pa9Ai^=fK zmxw9UjC1=;(nxk8QBpw_aNax}e6g{Ax7zxc-nsDXz(l4 zYYQtZGYl&)Q~D9rmUE{&`iV`x)O#k|;rnarQ(yjMHx&dCyJYq+SeCa&-nsqQEo$2E z_C-o_chJsC%ak}3gLC-i9B*bW!n5DnS*|btgnIvn_*Q0#a_xdP0p6mEllKN^NU!>W zJ<+y_r|nIo?Rcg5uJI9}SATX0nkw~jI2txd?|zT?#MBg#wLsc)uPC3H$CNnc&r_KT z>r^S3cYy=hNs8W=j8BFn*=TwfU z@`a(eNdNOP|DSzK9JlSf+&;=_Oq@Gf^f*uGJRwI-e-SWr-v`47>ofaDMetvRoWUV+ zCF147wEW{Q%qr+ff1={!gej=VuS(VsRuSM=7(6!{z2|FC31*|k1>p6Agc!ax{T8)P z&+r$|O>@B40>tOnQ^Qwgb;m|?Da`R#-|73Bu@NS3`+G}Z46pET2ZTcvM3gbm<9>)q z@x@N*@mW3f3LudYZ-em7!2r!m6uXh%wOtO#Oc9R8N=EL_yHni(p?krSgL+of5}%xg)LuzN`T0GK9ZwFe{2en5XfVWRW$w&4o;zUPF$G!0Bh}h z>OLe0Xpx}SD%pa@Ls44yA4KrKyRtd+2mXD;un4w1i(N8 zf5N990sUdsUicK#)JzvH4E<)YE^1@O&X-?~HK$cNT;M}5&Y(hfDBNYv(1Be>SCj11 zXA=9HQgIw;EP}5@zQNbyCD5gs_erQ*Z>lCKX29=x+$Zat2JD8>}E7W1t2?)pkQPF7IZ?)C9p?EZb2$EuLY1;Ouq02)`@uL)Ijl>xMg-z-O|2oun8iAz$%Aj_^%Had;b8OS?w<6hBKU(f>HT9NVKDX-AbnQz|0( zQ(flbR177XZ0v5})8Zr>kGB1%A*VPaxyrTXJke^n&QnO+_=>Y({2K&l&oHcFRDd1} zA)-GNA1P~cn#b<>Mu2c7*5@`(?L-Co#YpKyiIZQ~$~fthW{-TCuqw*eVVJop61q^5 zn(>|NUD=2GS@c#_;!dwq%BTLzcSvC^`WbAbhr(v8_F=?z7`NjG7Xm)h3fXL6QN~so z5U>f_UlVmGMP=CgVKN6^$nzufN992BC10le9dp(qTtMVu1{Qy+^-w(99kFOJBhNRd z&R9@Z?Vugv=p|GzVDXC0q7?|oQaN|2X+q4Ecc=~%;zh8G_;(+9@jo-DWL;0>vi%k+%!EzHDSuTr{p zebJVxkX`dXnF?z;5{}Z5Wa0wq!kdX?{0@%v7$t#vgf**#^XALdv};`>h7P}n<*F-S zmSx86A`KaQth;5BM!MpTK5fXF7>W06D_+yO=7kdr=OMe(!vDB#&SY?n?z$@1zdt6C zxnAk>I(&HIAl;i4TL1ns{!gUD+g{(rCWndTW$L~hXJyK>!!gEO0X}ZL(yHi5@V*KB zZv?XyE$-2?MO5!(9TND@44RQ#8d~blVf`q>=odgWOXDFDv{iZ|VM-}gAT`P|@H<>G zmvwDE@N08@(AY2qFqj(fCI2R+8qgh7#+>o|v**r}O@1vF(Df`cuT;8BMu(rojD$xC zeeMUFIuPf#z!N2PTNQ^Bz;TB$j6gQKfLVP^Axti*$rSsvZLjS+Tt;RXhG< ze7SfD>c;$B*zBvM)a$2MaeB{zWq~o2y9822Rae@M09?4l=h^fqTfV#~SQM2iAa1}; z6~JZo+`L#VKnGrZ9{Bu>9$2nHowkecpR zW-4T&V9QHm`I3EJT_EMPPX7t$Ht_5YjU!>MwIA?uCYeBSkO5f9|M0o`rg_qu9?Fn2&O1%;Y=-clSS@zF;#Z!r4ws13uN<(YWE{uuc(ZlJnoosROmCW9|rE$p}iNvpD61oNd@mu9kw#3iP!0A2cEKu<^jLB zg2v`iAXv*fDfq3D;6ChP&|A*}S!A{i|FN9U;<}fM=5gAlQW>W5N-~FDbgVs7O<8Ey3sM~F>unfP8D51Sfnm!rvfCLSb`P@?H;q7%vdb#6bWpf;wOi6C^KQ!U% zS0?8455}Jai;T1WPN`9}{i>+?PS;Iq^*X5}*cB?7t--N{FQ{bHZ2eB>Bez#|LqKn%1qsxohMONxVIUVPvmf_O;#_ zI^|*-H!fx%1b6S@>vcC|1Q$#G|W%8sDr zO%P)1!BcZ2y3g0y&zc((h@MS|8R1qyF!+7?I*UJma4`q2pVku8lsPF+lBZjkK-rLM zDWza$-$aBQ$zIrzC9c+Mr=+;w0*q}1YxT2os%yQ*@~|(2nLkCkaCfjOq$++fCm7dC z%62qRZnKD=Tb*P#sjDidd>mo&b&>y6KB&IPZSr2B|IJJ_1koV4`aGMivS|YOZ0>Cv z!>3s3FvNh8uEShB}OoO#8Je?CO^QU=? zV?A~QFO|mKWan7SrB+M{)jvX}N)T1BKYkb58~;NywwutoJXdv(tJxopu?(7aj*z`uGJWF3 zR@s_8RUYhHeQ1{^l$YE$7W=P#WBz3>^NlWTR`2|;-s4mE(PE-&4wgejYV`Syk@sBseHYi2n#=|O zu*6W+y<);u(Dr}Dqf}uO$;gA|(^$HGrvhy9gf_T2Sty|ZTs%q|hmlMi4*K2?x>8o< zn3Y*cVQ>3+6(P=$P*y2ZS_Yn{3AVCmU=Z8x5FI?yOO^w`;d5oA!%9WFHJ-y}x`qCV zdXl*MK2%j+^W(GJjCrS5oDe|uJRi!AB^4;5{5@R{(AAAREeGV75jIOkHdAqZhi)cd|uze zT}fs5?uUvYnE)^k-9|JMCEJvry|Y_#HwymmV47G%dcFl2*N2eZp1QRoRj z2sb&zez<@3axA+n>~)R8jtohAH{FF9vWjXCk$m8^Zorb%w^QEA**N~j#$v2kd3MeF zv#cZ-NI80~-gFmd!@a+7nc0pM&8_QpG26K9&pwrqRdC(FA6>4opE)aw^mAXy_+~Nv zt!XWsozQq=M#XF>15uhj9MSI570F>zc-EI@6?pj$#Jc?&mgfCae{mb!q2Sve_VZ4Sexy-bkRbK zyGzjGQmklk_aMc+K=5J>UfiX)7A?WuJ-8Jp5WEB{?ruNd-g~XR_J5QdB)M{MzjMtq z&)hRVX`9|#LQWw;d=YYqOgF3WUy_VxSzZdDK9jAb%uJXxVo60DxVoI5z z0ebBeEzb9D8o{>vLWXxfaxLJLa~9Pwc6(I%34um7EE>7 zvGN2NDg+DS3yk{J>Ag8@K4`_b8LrCI()MvikJ4E6k9UI0p3gb|SiNV6JSNlaPu`vy$P zv}?Cg9wWH=Sv*SV+aHzMXifg*#3CbEaX?>C+%(DW9@knQSk?7ZG~cWiaeqjG{<>nq~~mw zo*6#h^|f_ERud)VKDe)XvO5s8TZ)lrWs&^%dSv&rE|S_yuhH#(7x6|I=&Eq9 z`^?!j;H9BgpC5mbV~uMwS7A+lK6Z9PV!QK%jG)vX*fG3A_`*8Bl`H4bly5V>`1kMM za0bx|WvtzyKlHC$=FUWj5NB#9 zPI2e4c2o&`LjQG%Z9$8H&}zf!#E_jMOxKLOm5HeAFhqY9}^`n<)Yf)$EEQJJK*>J%
%y_5=J&s{N+pC&Bs!f^z0UG`V2`!g5UT?BWh$UJlhyS{wAqSwOaOy2Y z>KIVryI|a~(h<`beqtXvxs2s0kxMC$s*JsY*?xgS_*#GkLI@TJ%N%+3cGnA*xO8}> zM<^nkg~F~hfHP6jJaQp+doV6oTgSo!slSuI}f7!+JI+f{o?{1BDG zK15^%LdZ+L_mcJjcPl5=r;Hx&uvbNov`a{5Y6ibkv3kmR`y{@62desQG67MEt}lvm zDFH3Wje|5}NHZ^fls#eo} zFzG^a4PBYV|o4BP&(`4WsyHv;GTz{Tsv6>C~{}Kw3M3DWHZuPY!k+H zd;f#;gMsfUY){8Ao2QxcapDn6wOV}F3>wgBY0xQGUc+~K@Jm(r-F`YxMG1HsLBYf% zk=Iww%g=r74)mC1x|(`bf6VQ=&2}b|BfeW#tSJF5a}wpAvZ?qRwWUNP_u|UKpb}}m zUEPnOTf3RB*&Wh2-Kz4XTU=Uvi3V-_I?#1q_UeC?kbmAiC!xMX`2sM_}%|H z`@C;J97f%46HU6TXo=nVNF?RjU_q=IE=}y0LZ7hN^k{Rh;1UCAmj^y@i&08q`D@5f z%|>XND=mfE89nH_7}~alfkJ*mv9|;O%a#M8^k&;`tHb3s zX`4223%R8kk>6RqI|~odoc*hku{Xiu>1p*6xdH>zYlSO4C}aEBcXsK;9O0H;QDiPn zoG0+&ZPDN$H3}Tza#5M1#7CJ6{`^MhTXjVVX zg?XDTKV;fr+U;mqm~`Y z@`_EsS%;Mg=_{S*OAQ+wD7e2@5Cb*&Saaa1LA$o;=ZM-WofKN>mqyJM>?=U`@>ZoG zc2ez9idr=jzBz=GQxy%hSmwaV@6F}(iU|ci*8b(Yqo-u)B9n8_Dm+t1|M#Qxs^(7N zb|7fZJHqAzI`GmONWFoBo@ZZ*5ge5RwYSWe(uQhfj~TX)o`dKT;AGK3?Of66O5|rm zh^3LMaW4_ia0T!H>?*3IR&5QliDPJAI!C%3l!%)kGfvnt3jV{r)i&hEOoG1jgBZ;6gF`{@bixV{?OD4Gjnar~I|2L#R&vl1fn~+g=o#p0o z4U4CfT8YH=U{NEu5B2B)*Y~NyciqP#mOLqCKA=f{3 zd~fs)(0uv6(kmNXITOUp$4(m~6G%|7Gf&S`UI=9onADODk7- z+R(xTrO5kG>YVC{T|S0}B_}dTqdT9+TxCfmp7*sWX1wn>uv;35SAxsaCFI0DAP9ng zO`;qiKi!iufK>Vhco_jXEhUYNIVzi-s2J{j8@!%TsnhO|>lr@76X7|{j^z(S7&cQP!mtit( z!^m?th7!QB(0yyl_{`d%V!P?=9DJg?`GfrW11x_-lf7a{d5$OS_g+oH+o7ya$>oPJ ze7+9ezROD^_hsd7rM)_6IaY@LnVc%u<@QA#%8QuyjH78+IGRk67H@~^Xtnu*r)qT; zzMLN`W~TCZ-Y5E<`>}zU{2SLtP<1bC3e8PcdsZJ^Rh;Su6UBH*lMMSi7ng9 zx~v>4w2c6|1@`^b9J#a|TRS?JjppidWwz%FDLIcD)5e`k-p2vt>ZJy`Jh;VrzN&g@O= zJ6n0fIeI=Cx7#LqqC#SHhE>`|i6^d0S6)OGpmgs?9v0wm z1$%EIJq*YOY=_O6g zCkHJ3SvnFXJ1(F{uvhvxWDI6CANKLtXvq{}@hplxGAVC`?z9!&+K#yuwPqmG zAD;_lN(2NAv)LyWz*KEdTkd}vo<>nY-)$$N#VoS&i_@u=3QklG4H++>wJ7>%v*fjf zDWiG%h5Zc8+j>#~VmU#I^~DS2kbf5L_j{|1-lgT{;tSQ%h7gJb9O{$b$FL3Q%neig z&0wz%j6Qx(FLd8%dmkRH=y4yHobR1>3LOX$1PQjemZ{GwSRP`+7b31B+ zl^p0;E2z~_)j*nu(uiblwT6g^Q2yynYe1^*_(=De-v)2qAtG!<5!DW|xycozEMaBJ zrH-YK$;H%1EQn@F|CVOyUTKaO2ZxIl|0T}#0EwVAY`M09QX;F4VI(yyKp% zS@A_LSDo#iODN+EOi*7=AHBz8W>wNbZXq-eZ`Ql zN>Y5W)O9x9-F`_tzu@QjS3Iij7S^S#O9tZV)vR61)e+@4g-O3x>qV&v5bp3_{>bHL zj#XRHmu-7GkITwPl`xF4*SIQ0rPFYz4c2SJfjTh`dS03i?q#hZ;Pw_zm9m|^e8i!* zDrjg{C7`%-&9d+S|0)8Q`5Xwb}ppZ;q4ljbHwr*{3EEZ%;z zTjy(Rx&7TI1*QSjfZIJ~2+v@$y&Hpg_wAnvox5mdmR;|zd)Y4y(>+?o1v-#{WLA!P zNny;__+>g3vB?z(yT;&`w%&5bqHoCdyngbGnnLxNZTZPxiawyY=zM6SCVbVOnJi&WnT ztKVlZUX2i~7D7~V8G%ImI_at|7YIG4a*ee$AuEuJixueDRXNQ*_OW^NN4h_#Qv=lw zGr9~jq>3y(+5bHI&y6sVFg|B^1k-@GxQIBGFg(p`{YVR5-M5zYkh><5==?@_Hk)xG-9i0guWB-3Y?63JpSAVk{69YlIL z+cG9i)iwX}8Dip4m9G1qmuw-r+_^35uR`V?NL?1Dr1=>DbQx`y(L2#4(Vj-f@yTbI13VPJb@i zNcw<>X91-`oe22^lJ`(kYCnCv6RJX^y`hrCC8{(4FksPnGIH^Ci7Vc(B=B#wzT$= zLrY-NOGUi7GVSzUa9x+$zgL-syPTR0l>I+?hxlB#TFE2N(-CA++${8Y2U`8_@X8vp z{|vAE2dZ=0$nicZ(aY8TaoMPY6F%Aw-`g{JDhN44TlJnILgYv_BvBzDt!MUYl zytS@_P-&l6p#ZB_!&Y*LAqcJAcp9%FG#@ zpNyL}0fF&ek`jVIHeufW-r?s+h8QUpDHJaTnD+K4hvyuOA3K|M_)i)KBM3nY&n zi;^u?uQd5M=>siRIFe331P#Dkmq(MQ{Dv7-o(2caQq|G5>*-32RiNfZLL5WEG zDf2p~F6`NOXZ+LiqlrC}ceb1{9+0uDj|IhikwY$pNY?UmHr<&nzmVqq4p^l^+C378vo6NV0j&f_Wc`T- zHM*>OipeeOtZOq!xx~y7+=4{pHQ?=u@0Ka6OdrQ0!m&c-RX0N7yh)NSYf$Cg9rT@o zS%S!aa9WT}v5uxvCXZ(Yehn`0GYw+C5;@Imj7|=~B#o-~5Z7E?SgV0HAVQZY zANiM-xi~jwY$C7XVb!vNOZC<7%y|1V48T_zk^*j)eT&m!n56sie{5GCX(2BbyD;AU z;y_O!6rZ{6RxoRXDh7o8IQCllS=xB}jWB;O@^y9qYz=I;@~W0pn_nUnm}NO+v$nlQ zc}bz%!MnZbnzbY2Id@#_TReZ@xBu;d>CHZ-^npOfVuYWmCeIdSgd&Mx{?%)z<5-oq zxwKd40}r4WMZO^PS=4n5rv1-#{S@mTEM{6)b&cGC?I$6e(b)bseW!QxU4FCIkH+9b z!i|BLs2599%ec1FPzV4?~%z6F$3MBx)+*^aEK?`MT+CH$-+c8@?fsZ7w*D& z&#HJ|_iF|R{QVxqH&&ic8@yI%?+N{L*Pv?S(qw)@w6Rc9mv1EBs3!!N$nXc{0j>|9 zbVxP?lPyUQF@ga3Nn3nk8@Z=FXmTS7-f%U<(j)mMO@$E`_r5koxmfT0DnHUL_32)g z(}~Evok;r&nV$@@>$-P_W1l&sK^%Vh%@LD>B@@BS!zlWPsmIL5TW?ct(wrl_EM=f> z{AH8&sd2}~E(PKPlOk#m)7(6%{1+UY4g=$ptX#>^RqE zANu22$Q0s(*b`#Y=pIeYd9GguqK6LUlGB4u(e+DFnnHBCONQ9E#sduzAurBexWb!Q zyyyX)HBnY_@V1avnbWwVqKx3_VLi(<9=Omnu3CL*07IDjPO}E&sr@rjXM04|(64cB zU>uvcuTCwuPuY-$BjFdI@qLvmg{eQ&7tM&4Z7ss(5!`RSe16{=8hEMsN2wP0g{M;? zR`V03ZlOhjEBEn;ELRLqTut?~+}{gi`jwMDe57o1CM^?`d-IPc^n>|3gV!4tquV9=WDbHq`Fnxo%9AgN-1>gs2MBayx0JMpL99H1%?qzE zpr2xkgOi2u-D=h~S3opyKgq_xJ6}4~5TKCG7lkcsn6r-g3(R0Y_GF5ZL=(|&*WCu zs-nK*GT7LV7i@bWF32Q{dD!ovW^lcB?O*=vOr=V4)sX7L)0lm6A+AGQksT@_mG@tB z{GfsB)m%V7`8>*NkrdOqhtf5_RPYiZ{j7@>^*VLJ=H`2*>-@s+Bv}7QJ*qFp(AUeXy2<$|gAJH2>D=#N) zO8Z?-RGYsXtBW7e{bAPs z$qLBnuw~eZUyzpy1|Z@nMEyp#n^ z5J))E3E@5t0o|C`q%L}qPfIb+U9fo;+8cBAA|!j82<~xea1u#`CCP9zkwvF<@3$qP zAl{VJ zH$UJh53wc0o2KS%plH2yIbE#L{FsIjByq2{CPrr4Q|A$oCdox-JU%ZH6~MZ82awhJ ztdkS3}n{@QYjSSb<8#^|ZW|wr;Zi zPB?WOk-t=pT==Bs)Ld-nGjdC#*7rj8{XLS0{d**i{UDW$2IO;Cb{4NFwK3YI*-hkfzL4u zQjwA!b=B{QGQ9`>wn=5Kx@`Pzye1NI#5l5vJGrV;A$6-qKoa&qV}fo(S=O#~ECId3WCU_^hI^U?)%=WdQd z{`8v=cH!=9>fQX%7h(xveNa3ldd}>&Mwx<*>9tq7I_Ox9q|Gcx ziF0V!%Me;b7}WKWaL3v#W7`A?!M;bTt(*p!>vCu4Y54mrYud7**sV;3&J5`MNVQp) zY(~P zR*D_L)-OH{boTHLo?L=l!p_1C%n_IJqc>u>&p%%Rm2oFOypLVmP;o%{hn)P!laLdD_K%6Mzsc|M5y*R=bZP)?!oFYrDm-H~ zn$lmWkd)umB`zCBMfrTar2CTKsK&XOYw>!d<}E8LdIX8|PdZq9jJ%sA``M@q`jK2A z0Dksa2tYolOcQOTDV+a}1_L+x7o*Pb+u@QIw|BUebbb`hpe%yxG%h}xKD~LO=Md0$ ziC%&Y&zYt$p)6bj&g_t4r#s5(9rm7&su$m~U@a3LK)h@Ft@r#+_F`C!k*aj?olc^j z@Q}7NL*j3*vA3tqgTEDf=~x7)jZj32^(KRyJSbkD^L3UC~_tnBNP)xk+1xoX_VxV-1#wSAeGwJBQp3a@f2SttBzMt@i-1^@exHZ0%^_E5 zqmz;8q3DwTdF$PsC2Ms+l&7Hqzgv6iL1aL(`ozvmu}T!B2aMM5Y zYnO4?-Hth6`%x_a_K!}FeE-F!L%UJKx>KEQbh0jH)GE1;>`m_b+{NN3Jr{+x$6A;i z?=GF-bNvO8@lk3_#rKEpWaQ(!5Q_63x5ef^MQ~%BArvTTpdWywf+jg&LY!ClvvPG0 z_V(miL_f&`hu9WTL75sm(I0^`ncF8HM*x+>Syca5I74UI6%b|P88T5w`2-o!gd_V^ zPu?a!cvzerwtsG6CdTPWtpV+4 z!s1}bc@K1XbEf7qCkAxfH@L#MuOkYgleh*y;dbF3+$?=n?42rZIqe$Nh+GBO}xv1XIoGV|=^Alh0UN!);;q*1MhVNiw-CqR&2=<2ZB8 z-he_rd`EQ7QApC|3!%ncHeXNy$-=S-r?p3yUx!@CTas|th)Wdq>{SKk(g0_V0E*|_ z*grBlI>S?Nz%1gCIT;hKWhgqJN4E((H9I!XO z$kJOD$05hukIJ3+MCFS;5o;zD28~rSiNP`0S$l}{eFe@wP1LLRak2eeH9k&R`#BW=9)5@w zwc+W%@hMBvzV$zjm@g=9YDDYYTSOh>w8rBFg3(!#m6y3xt{$HbQd2YK`nVpn@A#cA ze>28A7J7n7MsI6;4Htt_1)}%jn}tGU1nD*n!{rdEQVSN|M6?#Zv9E>W!nZx0DAlmE7oRff*=$a=Z0&o0>B^`E|Jp_s0aY%m!^Bp_y zk}N6W(i975F|E=w%jS1WUvEgjwQ&zNf@3+_5%n5yelx_NQa@%AoZlf*Ou(@?8yoZH ze75mw1cu7;uF8bm`ZlMS>u`GE^5!kP)HPVve>~6To}V>*`p+NpHLDHVFXM|g#2n;u z;^{_*6Fz2rDdm}bDGUyVM@8GS%0w@^iGmeY!F-!vamq7JBjv;5HnCySH(( zV2Ev(j-?cTc-_pRwx4oypQRrLyW06GJMZ>jwk6p*&vw3=Pd-}*_VDKOHTo{zDlV!WPuG2TiLQuY z6EizqLxoHxn}fyPtr;423vA=7njG!`w)fy1+!W8cY~3dKCODW8*AETpuNyP@22OPje^+?fcDk&R07uz^APj9%c*|q754XgAsEBaA$trs6_b;2;iAmn z0gnWiiu9XzIRyzbI@hAsBP*+g_mJq8VEPb%4%jL`vZ9)A{xa6*?QHP7Id`U`!58lhr^ceA2spj#NE3;>;$?K( zUaCu~=|fe-pgSTQOtI@VYWRtlO7Pvz1T?rjk{d|I+VWCmK;0@Bia`xz{f=FFWpS_( zcC82+@>sZ9?(bj`>;3j6xm6Zxcb1-AQl=SeH~GnMHL#6%j{gtSYUt8t%vTgG56cQ- zwqwF}QwQU%=AO>Q%FYH8SH$zB4HndKpo+y?$LL#}IHV!H=@bqw54%pzjvS8|@W{kFpb;{RIuOk8Iye9unPMPTW&t(HkX}!YqTB4^h-=tdikhrtPc8R(fW991gy;x?FkT{ z{B9wZI?o-|G_ICap4cw5=4=9W7k{d{S>YodoOs>Al5?F!TI0Mh0&`o8PyBwnzjPca z!x}bFjXpEtIgG#6>N+>M0dC5srVEP=m&$qSg}I#Z<}z#9pZ`k&43!XJF@x=~*rrjo ztgN4Uwo%XgvH#b@@ZTzn7b4&!M@cR6c=;dv4yD1!6!vsf&JhtZzS*e?4?F4<<>-1i zlk6;2mR4UMdXJp>*7ob|fg-<{=lMrk^q!#xH^5ilZ{{SDSnv!2Q<~rLZSod_55v{c z$aEPA-a}T1?=2~M4Xqgg#62;e>3S1zvk6<`gE&Y_y}tuAGOlpSf^~+hN%MI86NZuX zpn^kN1TDCTq+JIZPdH<{&&k06nM~^WjXqZm9!V^_`xeWYD6*I z8P*$ng1$b6N>OKqB$vC*C`??c(!Fu5yQbwbD^Ly+)5pKTZ>3GB_%Y__#|S2vKZ}mjXw)y@1rPK*h!q@XaatU*kXzRi;Df~Alil7zzg%IO zfH7Er3o@ph#?j7TH1OpNy!X2XZTby?%`rp6T@46G*V`_|(xLo$phnn1K$BQSgG;T< z{x{R6E5FH!1{8}Ri9xVy67dtYL?cX_tlvKLa{eC`a> z5pTVkUSp5k5qDFhxSN)Omzg~7;h*@|KRDVg(shNWNpu*==Z{GVsUp}!c>^dY7@hFQ~cFnHe2^etxZIOrO z5FU85e6(SYCw)3d#6}%H)v;AW{}YTFDgMD}S0Z(+dn1O#IoQxC5A8k0tgLBa+!m`2 zRN|t)x|8KA;1^)|R8G9;ZaR`hl4UkfF|l9bR=-^%&b?~m@bU!{`F`z3t#*%);33IS zZ0r%DPn!mX-TUx+xj*l*S_h|)4HY1A*bbq{A z7M&ZKoCE1GeTIn7gK1~x%ghVQzhpaXHqV%-vDJ=05wcqR>~RNHFUScM*OY5H@b(** z4J-m<@pvP2T`BGWd**cm6+i`bq>0@Fa?Pc^ZJqg|MSOhr;g^z!Q0!gS8okPdH5<(s zFR4sW@7j^s%o9fiVP?R-M1&H?atCQs^I&_-!g18k%DTQFbAiT(wnf<(t(nM3-%7me zT?Z@NFw&C7vpK`Cul}Dl@*0Yb2$kD}TF$QzLCND4JeRW$VoPO>xmQX1-;Y%ta&a+Z zR!5%rWH(x9eLF~|_X+P6Kdy!U=umCfUpg{bD4WfC4GH{NEmxo7p}EF+s5i>uOujnL zIyjEE-D7ibVk}Z$9(gzr_t%}UfA5&+>N@hq)z+W$3%6PD=>~OTH?9M; zrZx<@()u60`X3o&pAIojk)&}a6)TqEks-UlP|o4`%xkI6q)Tj~B&PKLlIK6%fRoA8 zEP}NTcXbkehq2<*$JE?Y5(A_AIJGpv8m<(r64e-dH4*NdhG(TH=hq3L0c+M9ii~BI z!S%{`SNhrUA$gKTjOGb|KYwE}=~ojDS>1@I6ewiSB-4T}=*e2na#)mImQ54Of*ByJ zv^414@e%xV@fpaJM@+$hJ6f20qQ;90&?1PJ*eqgg={rp@!1&_{-p?%;5^remDGj03 zB7~)oGoQ*FKbTr0{f)VhjvVTXIGsU56#)*n{i74BrKf(+H$gu?K6#up#=sMVz8oH} z`4C*MlQ1kB#ZOUy;Fen?Kj{??w+zAWv}NT)|zxdfz`3LAMxj_ZaM7%^xFdF^YT$orRRoRwbt@=^|8> zrv{x#Oa1fV0(+(n@AgJVedwLQl}My!1K$-?RuR?RGYgPFQfnx_Ev5dms>IPy#I40YD|Bx}b@!q7 z`8{P&OXq?z-Mt*%fBh0oDa;I-)_0JzQqdhk`C%kTL(t>QoGL<@V!)u z6PTQGk7rM^=tGxp%5r9@o@Z~qJ}Oz*I$07@;#Oi*CAS;S3|>X%jvPXHV))AO zkVM$uC_lunpUOPTHOq>a+LDktLiL1Ub0*gd3KMbSxE8Ayq@o}J<`DXa-Wepzq*Pult`K~6hJVoznUD$3A1ED0w9PFg5so5qs3 zoz@2aC7+IpxzmB>KXvvNMN@oGPuxqXpkNM?$1X!V^t-`6Cf8&5doDb?7ahfp^QY+N z)P1A56=7C!v-)wFv~PCgw3lhW2Wpq+9Kmm^{+8Jg*9Emmsn;^j$S-8FCFi(4nbo3+ zvMHv_|F-Ppn7vpfLWS|mRzLNYfJ0lwe}JfaSa$(lDCNm)pT%n8Yu%W$m^KQ+!GniA zcY(b*xBtN97smScAOpLR%u{Wfl< zi<$^)kX@t0>En_m?C?I&ZYTn)7?Sho-Y98c_aM`v1tt_o7!FxV;z0!hU-M*;Z0cTn zl9p%3v$2xmhTn8xoJiIJBHC&nhMXnoq#={&{S-~%%k=!TL@viinL0ILqf=hjoUyRb z8;G#+^l|(a1We10{;OVPOe;2dKX!|Y^E!hQPWdD9Z*ZU=PJV3KFL7M59u+F=z*nB{ zsd#|N^vJwC6r@Vu^COe(ZRnLFqz^B&JdvBy@)j1-^|ou>*TQ zdtK`>KCU*ml1emhlOl_r#zuKf4n*<2MJr|(1FXSPOru395+yTU! z&xHIn)>xz6(3oWxz`QVu^(EDiiST|vgHusgVwHX4KjPcOXQ+#FCjQTdr{`e57Q2hd zoK`m>)mo$fxD|O&Ll$#8Bt4C=?`$66oO^AS7HQ@y6$%w~qh=c>fa8fJH%n%9WAEIb zF5OJ{$UGTPNeww^Ue5VG%xLO+8^+Mycz8?au}pg_7m_F+)|4xWz~}efS|IX9nnBG$>I|$d-YH#j)gMhIqv>_fhn<(BEvgPl3EUyWYm znm#fR8|c4q9HX>MABsPJvdd8OcPV+cYJFcHofxi&E}tk|PFQY~7(`#7r#9Fj+&Y;R zo$jv^Ek!`DiU$stx-f#9%GqL(EBj5yZW7y<8F6!Fo={}s=zie;b!Sf(=P{oGrqG`} zPHC>)C?uoP6L)dYx#8D6Jm2Uq;NoHO6Do)Dvd~6dnqoHr0`{cEoG&R1T z-g-16?t6FJ5l+SAirR_e|A~&tZXN>~vJ6Y(VT*O6NS!x$oLo;-V|!Top0JqHpIgtw z${5v`FSd8WDmLkkL8UqU$LRD7?&~%e==k>6^)AW&FU}MEC^*fD zz??#+w1K(MMw9V7YXb5<+S@m2CSP?a$(Q@*x^i@vdPtkwzs2TTnXK|HCyjp6)0$il zj$UIZCL)gPUkKqQe`~;SGqV%Ce(9zD5JG1~?Harr{y~Q%JZgdbem2+rg3{%sWgX8% z{ISlZ7o7=OW_$_Jt1*{(@ymU|?14Q$nEdda{$Ddi_g&;aAlq@DkH3+%_V9W3C3fLv zqvmnbVSZky!R3GMNB(8n5J5$LI)!5ozIczSAtCGos!_*Oos0|5BpD|AaXEf>yZ^Pl zas)mk*EQDRg(ldusMB^_r5i^$5C_f{)v?k>$x9>B`A1mbBoA3bGv0iZ7Y!mRte;Hq zgi?l2XyUT5l!m1-oJkbZ`X^+Cf7+x7B7c`V&)_*r$BbJrB%6wfw%OnZ==eDuf%St`KEN*uFnC;;q0g)>d_=`s)` zQ$1nyvVbVcPfq{AK>}xohAAP>S;Y*?JpBgq9joy21I`ZQCv8tew8$|jJ)pbzwptpLO1cI4sxQ9(hI6u;Dn(v1 zcj@6~cm+2zt`d>HH(Ni7<=wHKto_ckKo(jU&cJ|O1#UA}4OZ~SGV1vRNzqJv5JQ}z zwAT_z^%XxteIRqFc%}vDBw*_MgsQ`);I!uboy@Dg1oer#g9UVom|0m_9YkKamvcLU z_W@~PHOS?v#GjlJgs5|(Yoy%9-*J&j&JDs39rASJeNvH|1z+~%z|vJPz3>-mME5Z~ zGr13PvMn#U&aJKo(CDt>(^o<+jP>d|aIYZKyKDEa#M4?{BN#(SUcC1sr&*)>#Ub21 zebg2ghExc9k0)7nNc^Ivhr;v%=rGMU-G8j>#s<*aW>UfHV~iJ8S_wv0D13UK=i5OS z3x7sO5I_UPDKXTU`02yrjpIFSAV2;a5Do5bk9nI4L~l%nULJYd&thEBcg}U~5j#Ay z>QUeD>u~?eT5EK}Q%5_k`$?QPSeP4zoULAX7&lZ^MuAj858aT&0P9oFjySjo^ZQ zWl`dc5#f^}7Bh>XV5pdn$rD5m4-wAGovvia5PIr;`YcfCqTl$^L**z`c{dNCP2G5+ zr~6VvT|=B3r)PiJoY=)qq^=S{qv*rHzpjO6f(yp^aT3#SG{B5H$CMBO8<-O>DXm1n`KZab#Le!81m`Gy! z#woebY{!io*JOzoD&oheGt#X0Ah(IAjwqZ@l+RNWiHr^>XJYRiS=MrXeJ%_K^4~qC;QHymHlJ3_&U$Fc%Gg(qA_X!&H>PI>1!4FS`Vjpxb;`5XqytOz? zjkF=3C6Qk?KfF2d@i;B1i5fn^-9}dP^VL}*l2gD66n?0P@|sQfd4l7nm#c|juYah7 z(Yx(?0s88S1Y}a*qQST%B>-F)nHbv}~r|+~gIiG=#Ya6V;&CU33zrhA z|1gMn=Po1hdL$0okwrCg*}F$jca%vdA6-86I1zffJ2jv<)B%rMul1)hybKKv!YDJM zg!GUsG)dXZwsEg+W1%5E9;4j2)DJIxp6NC@q;A_|5ei9+GXDPl807rr=zVir{*D#L z|Moe&z#>&L*Foj2=iom^hvvr~VR!(0E`FHEV@TYO(W}q@A6LDL?mlKh0dYHi^0~Gm zkLAK>)HQUGdDzW;vIVtlrMUE<5JMmPiV5<|Eh9OrpzJ}EyD}JX8>mnZjyaKS)NvuSCd%OlOW8m_A5E14;;#H$=|%a3C0xkb%>+BzT2u1Q(52=i)EWxFzKJ z$=U&?6a3Sy_J`1eb9Qk2-oYik$igrK37W^!GzVi$XNeN=7NV+uhm~&%@62B%45IZC ztkVdbGox-+b&etRyd_`Q`GX9dRRV+!}vHyrPf3vja0F)lkrq7V5<> zZIesAt#BY`L$gkA)lVOJ6q6IA+mDifR5`X2{7_3Bc9cS@Ne2$cmk%8za(C6gNyf>W zBNM{Fb_;B1q5TN_QM+z>j)ZFj5o9mV=aA+`T&0#2zcx@pcBi5%rA}{<*RzkyvQ`UL zpTmr(x(gUBjycgUNb#?5d&B@12uo!P{OiTQ4B=>J2D=j!1 zs|@7);a|wn_#58*sQhX$FJNfM{}YX7L98dyuRN6%-GiKHE-5CC$~eT5>vveAl^OXr z&;Q5Od55$8w*9|Ui`I(0M<_+@icJwht=iO%*@|7Oc4-@X)vDF0MU2>chS;iFVv{JX z6`_dz^L?J@zMuQ~{hQC??-nO4he6hvPQ-eY}cHWLD~qWxy$Xd zmYCjG;j~>#FFhLL9Vh9y2cA1O8lym7Sj?8OagQSZ#;DMZw_rbb@DepF?F!!Y(w`y5 z@V6pW`(2YdtAXF4>76g^Z7xr}xXr3@jvD%iNtw_cforyd3 z!ulWMo2~nvqq!PV!`zqHSnNZLM=-8m4vsrN21+-499L2O2mUdSz_@B2@{|t!?PXj}fCYpqQ>^)%f>Wh@YsM(Y@ZrHC+6q^hED@_!FtSOPNt3iwwQHMtz=8 z{g7R)1rDQ2f(4TjowoYV&LaY87_`mR*exWPEEA{^?iR+<%mH`f#WHWETC(7p z{(k=6c(4XZbYFh5bp;P}atBGqer~pADfXFHl*+0U}tI(lz=ed(GUTLM4a7__i?cU`O+p zhbjC{J}_h0Cfm`|oc55{BYOEw4Dpht@S_$D^psrrinULV;-E3i=q%ZDV90ZkO)CA! zC`mkjfVaNp2b zM`j}gHeVgXxY5Vc1z(On2v~WUDbO}0#6%o*4V@6@3(jDRL~5@@2B&oM*%_U^hv27E z>PoZI;yW+486G~jOm;Bgs(dX8!&mh8b(8aOIvfvK4i)5x*)a2KCwcT#ADpYLfja(O zO3z6CDW!<_)BoF|{?mN!uBtCDuteM)J04B7G$0D&K&h9{XV&H9ukEzrJ20W^g}9=y zo%pEmrFcSR@L2?(USgALq)O%WN zZ}a*AT2(bwfu(?MctAeg#Q?8%<)&UINOEW`nhFDmg(yGOQ;L(IFpVhHaJ>h*4iBSi zC=2c$dU;O^0TeJ_(hvYDq)*S9iVZfc7<#-M0E2rfU49*!^L?+A3N)m6wyHw^Q}FdD zEr-Ow!*O7o-mkI_b;L;DTj23R>eo?zD#~bzCy39~Bu796rEcSn-+`hRM{Z7^g7cB!-inqYz-nQkkQ?UG8KM{*21!gTIUK9Kw zV+6@zmr2j7zfW5{!&%ekN}Y~LqPjV|UvRJs=_^+TWkozmOwOzNXqaT&33tm^0I4tU zK=nYS^ZBdw3TM7<+bqYU;&!tSa%+PR5k9P4LyD(cGp0aJy$u---bIG|+aTTL!NaqW zG0ySFDR;T-`)ok(m7P;k#{H_2*4i)zGw;}p;k5$ zL~hIq66=n;cZQ`t^lqN6ay_W&jpnW5JXNfk6+8PIlNOBh!PU>C{k-9iU>Qxw#Zwds z2eyX`b7nQbhYk*{D zSfu~2EWh8~FBVnS=7UjLwN=(H`^HQg5+&M*tFH|H z<5s7Q{YOHr4BZZ$5S9KI7|iwTn>wMF=R(p@X}eB-uPR041Eq#&{5?Yn@;1dnaFVxqL^^H=iz_xZoSV!oT=Ol=#P&qbbQ$<>dkkbZ*9FQUMN1EV$e1` zPoK#IJh3$gTR#&6G(j62Ewtn@rn*|La|_ zPtFiQY{^@G&jn@OXYjcihh_Wnim#CGTdsU`AZ1e(Izxe=EK)Je2_Ng_LS0bTt=YUh zymBrh@`t<_HahLj@IYT<>qe!X30r|yx(YZGN(;)@)ABrb*=FyGE3kvmxp=#+#I} zj1Apg>x;q-q&@-0B=dl*(0RlGN&=wOf+}KJGM5UF$oD-yO2s20mhV#>)CJ?N1CA0s zU;hZHHH){gSD^E2?=Z`;IH0zKo;5Kc>+m=Hlx;DSd0z_Y(cEFMdH-QCp8=5h}+=+0$HNyr3>H z{NPTp)o^=d;oACWzNIuL{t9kqm3Gnj$6AXp`*F!i$2N`=pnv7R3v+Yj#R`Bw>6;4V zQ9X>~#YSx8KFkJ60^m9ts}*6CQuB%LG$V)Wv^MZcUXMK1R9`TC!Y)qUq%`aiA>+$n zF8%EeWCpV46{Eldb}rJ`pZ(Dayfj{PysrRZYi~aB`g0z?{wlDhZp(i{S30YeQYpUS zueL}3{-Mq%JC`T)2Nr{VLehHwy#KN6ajHEN5n#}Ow@b%G9XYK$pwzWrk(9Y)_A(Qh zWh+>jk+%`xQXVMD>>?-4F14Za2qpgB(p$vkKz95q@eMkJ*TQ^S1Y4q;m1|-5)UZ)1 zb{AavILwMZi!Uhs5+2dR>2e=g@KLIkXWJ@Do+Ehh{tl0F{{*M`UCmV=P#M(APR5i~ z8}!D7lfP&bJ^wfHQv>CSPd>BsmJ`I09EZBU9lJ{@mGn-~-ye`V*fR~WxI1}qEK?5u z6Fpj9(Q->WczTq6GZmrD%fL_qf7Bq4Ncwrx+-`Lv;*s6KF$cu$famO0HxM(5XyTd&#m)hJth zXGK)`J+{H^jddx>Z7qAL!lty->ct?%&(N@^!|R|&2wY$7MysogaCvI;bV-J4(w$Uc zB%jB}UiPnXZXGF5^9h5_4o}Bo^rN?(etdiTxtyEPb&cu)!`B08`WsJ-H9+^nd8!r6 z$!&eY_m}#=sQNurUXNx$1&=HE{9HrN90l8dzUZz?TM!8BYmXePLz%pQH5Ud#3rX2}|h-?E<&3c(;1T$UB#rxe1S6 z;lmi8fH~U7v1ff^eeIvThgB#im;W z!EZv0&o0uLXr*3WB=?Tb-g%C-VSi0brX4;Sy&CunsjdX)@b7)-?^@IP#DDV;VP#p+ zWFcr!!Zux;HwK)_I1O|?4L+CT0TC4Hy#iB_^P=XNE}zLZ4-1H~_7+?v*wd7P!4 zIVe(6%>njgXUDPvgSX4s&ttKOQYhbl%mP};z-0W)8`bN{zBS%oJopf6Ny-=5=qzcz zJC!Ow<9Y@a)8(QMK=7y+9`2H)Jo(|STV`$R{D^QaZdyj&T|nKzDrz6g!I2(*Lk}pDa}b>stn~8fqD_zi;)W?+GeK-7n>ZYNtyM-lz_E($Bhm(_ zx?N2hPMgwwbVHqe1WvkuZ{Kgmc)eQv;FJ~CC7Nq<%+2AY`TD!zvFL|y3^ATF;<8y{Yf4y8HeTvrX%k%xCcHoXs=eMyA-L#y)M}Z}@Ot{Ac3%#I zp#ELk*_@8QpCPC9b36ZmM>%rw`YwGq#X)2w5`{Uu=*ROqJd;?zb4lz;IzQ>D4y=Aq zG5qhQ^uK2RGNhJcC!w7CvB8z>*a;;rfY0KA?B;$Nj~`5$t8>2)k7Blg8H1r!ifc_F z=($v}7bODgfJpGYE7Nu;GtcXLqt0Lo3%Ve%uyB*F8Xt`Y4HEtUtVsWvcMh(7^3hFp zPUpb}|5DmPDB?4fJ()aq3J-`8{-OJQMk*t;?M6%r^)2Yza(ro=$lgRc`+zEo$z)TK zYw?q;v_?J+$Cvj5?~$1RgmrUY{D7$DB-84zJcw|^etp<8U!Tkl3y`kI7% zm=$k-#pfu7wd3|=_J(CPm+E>B)w+~U#Gf=pZMD{AaJ9<9aj$2&! zRS)%_JsG>eP&{f**=|WYn_`ZH`!`ifafr|Q{&l}~i^0+*T!F^*nb*?V>nn09Jh}T- z?)r**K#bjSK_TzBc~qK|&t&Y^F>N@ZcQh=ULsSw;_(HoRvnYX?p>RS^Ew;g=`H0D4 zGJRepxuT>zg3Ss~OuJ6WW$=h(de+&tGS##+y9VTZw$?_Np3PT2mWpA#6ffW3n?X5Yo0XZrornL<7?AvM@*8`-nC*2~*mc<1_n>;%10;7_09?30)qJa2szJ=M20nW~g6 zG9=?elGL^WUVPOTz$TW*HJteU%}9XTz(4;Lo?1RlI3Uigg?#xWG$?Mj`G8FD2x_bK&%rM7wk8FBSOIYLP{w~w&&1UfZti&-~0 zEQhDi%^!_KtD0oin}!+knCL5~4a6tyYPX`P2UOVvlZ23@m~o>dvp6r~X{#;u2ZGFN zc--}*cp}9PR%IOU{E2Al!N`y~R01g1b4T|ctmCiBmxMk<1h+FjVDL^xila|N8w*Tp zF7$^m2VK=42DiMwK)2UQ`pFA#6DAYydU_X|iDA{P#Z$!b_xSNWzIR^1R6dm^RgDLyq^ z=Aq?6e9%Y3jf&258CWE+OzBYx(F6d3*jSvt;wcFQth2Ti!l#3*MdzMq#n4L%%bDyA z|Nc*2eihB#*{qY}B7~LDWsAl1%Cg)eZK|EZ7wN%oys)*)t`|_foG86(ZO%>=;USnr z2fC}1V>gmF>FQUp@ADV!bB*{7zYr9e#I7h!6FIx@j20e$ex~T7^6!bza#VcRcP2gO zVwnr&?5es`M#7oejNHX`GyXWX;=U8+l72ZT>_47J)Gz8BAP{u@GgC3oqyb+Ok7z8s*u!!Mu83#KYterXjx z755O!zr%U_DjM7ZBuU?d+gs;SJbS?e7ALLe^pq>5?;_)!1-Hy4>AW{}E9Do~ldTpH zn*qEERp$C2jsC@|u&N9+=YZ>j`K9OjNq`J|9tgO81V3wf%K!=ic2%vM|NY5)VIsJHh9K;*=u0FmEd}}>jw7-~X!lg(8{M6@uZiSb<&Lc8$DCdv< z)H9*q(ve<&dfWT)Ukx~f*PPDcvp&ID z1+J0;Htgr7A#5K}{LTst7l*2k^Hl+soBe*d=hIKU+4b7_{xl3^*N|t0riE-?w_#}; z2E5-d;{ue;{Ls}{2zhv;?pM~eaf6pNCA3qwu2(}~>wby@UZ8DOZmTZC&YHrRYdJh>Eyebx2;=~wVqmbx}&=vJLPET1K-bNb+*GPqq-^`O+K?@CeP?mQm#!JgxzqS|)9S!UxV z_4>ZrSKLBKXT&ihUuUu&qp{S$TRBZTk7wcbOi;QRI#=5e_k&h*K`a{3usbOLh1(E= z!4h$I!y8P@89e2Yjdqur>MzbGH?dmZjf$Plp+ z_F%AlTP!~=51}d7eQ;)sWa$LqfSANPwvlN3xYdAipKG}Pc==QTQ^xTg)R~n10}sy9 zz~-K(aNn<4CbWmUe(>KfR?)c+seo?FYP}KGQFZ}R^{o1~LX|(5qh<>bwFmcwjuM11 zH`JL|I5-AErUFo^W#;ihDC#NHN#X?jjN%J%limtL{@zS?{;dO=)+QHF9M2 zOr)TJDoM3IeLOYV$BYup+d{oU6k-;qk2;J0jZ@Hq)fgON&W2KDMv7 zEDXo$C@T2!3rc36uAJd+j|Bp@vs1qV8`t_IWM6X;=iYBr=W2ke(fSMO92@O4PuXw8 z4CSKgtKGv?OHn)maEygGu8rgoXfJnZ{kH3;`zIhg zTyhJ~Os~8FO9?}rk!u+N0yAT1?meTqnf8`B~Y_F8V?) z^+$DR^fL%|*dmcDFvjsPv^3$bBNMn=OG`Y8|Gb`rfw+-5KvP=g@G0n4a#Fy|15{mQ zO(v558*d?C);jMglT=3?HXbuxW}K2=XAh#GL&Jm6N|%Oy=f9-e%SG)0@1;8Ir zURyC}67$NSOFGI6V%c3TclVkMt@9AS@n2`tl!&sLcU;?HUl6xXV^WVM1(Sm_d>_4( z@=VV+oEmjau^t>(epuh!aiZeLV5$`7(;u?T92?Y?T_K~SxUR>ON?WvRG_iSrjpaFW zY`ZFei_4v@JmZuF;XmDRw>y;s%yT3nXJZK6=Zgm+H17|7{;x#e z=`ry^el<-KdzJ0%P!M%-jNDutxA~P%Xr}N(Jp(gdz|niG4>Xq@2v6c4Q1}SM@4oNT zz*6`_g%Pl%}K3 zuWSBfH{$s4#5`uYUGgBWV}C$RLxhw^io}VrJGtzK@Ee?r-nYSrK5JjpHW6Y#HuI|e zra}CYz4MFqlArR%w&>y<#!p;14KuN<7<{6m2e?AS;QeZ@$_%#T)1G){ai$BCf{^67dzx(;IGU-t81US8&%Dhk$=z z);7Em^>fQOFd&FJsvw~OQ)nfME*eyi52BDy;hz>M2xNii>JlRNT~SE=({+ zALcef7PjaVls5i`-~Qt*#`@Pbk_`wmH}0*zw!hm8{$6s(NFfhXn`-7_WU8LqPfW^J z@$CtDf&tWdyV^nd+c+0m5z!%M9kURzCZfB!(|3h0jdyEp?JS5Aw!Xea^hbuTDUlVc z--GaGXZ877ZV4ar1|3(JURJ<8QkC}x+eRh#%n@7wANog zF!$iuv2Rzh=!}dxHfW>gs?$@Zu@IQ@rio1W%Smz{ZzcY8C2tb zufxqCg&H5TZQ8-7&V6TfaHLvWObu2#I&%w!YkvQ>_8ei=#Hj+WF zMv;}Gy56WN1BMWWbGlf*uPbX-dK}FCG525H0GL46gF#`o19q3%oUKc*$dmy+(goX! zpIFDwDdt7pElC%w0w&{C-m>iFpCe^KZn)lgC-p7Y2&V^aX1>2sfm^o@YgSe~GzZ-n z^kMY8Wq@@GU(hxjEvB^P(!+4jd^LJC^%bCW%A+QQ*0-e{=v;hCb1QYb-ia#ZL*%eprG}QtM7G8-&vjTyO=x zZ6{t{UMFFaxm+vgOg$cEe!~w|4O*r|uGg_$@`%+#{=N6~jm~PCq9AxNRBQFYmhZeD zx{sy9FGwGA(cFi}CbkgwYg?SV`^2zPQValwCFAhSi*nWnwh$QV2#4Ma{^h0Y9BzPG zA%Dn9SGzkcogTY1n1l04XeNG7L0I|;J400OUd{&yhYSvlpM^`kci}?D2mR$sxw;?A zQS9NmxM%OEV&L*RJ)}|T#TBy>OH1ci<;cpVe3?$0&e_q8r}SnhP|H@AGCJ;3l|p$9@BRm%gR9ZN3>7k&cy2bw|{$zSs37##11T%`Byl` z>^BpxY2Jr=#64Af@dvrXVgU9o5{@iW)R%9xHHR_9ptgrMPZGvgJiW_a?H||fHLx;_ z!AK6pkshUHZA})>Wb6a@mI0UWQi@ANg_THV`d>>CzhDCUh}rKKD#c5-@IR=^JGyS6 z_2fZ9s#iJkp?`fMp}ry2S2Dw5II2Gs!F$#>6Q>UL4voKrH=&gKubT!LHGNUR>mZ%c zFjX_rmx;}&2$-O=vwWxz$`!iO69F^xGP_3;MufM^Od(Y>K1+AgNc5;;1oTnni$Y#! zaqkl4zZ+LOlZ(F0uNsP4Y*sF+F*CP$j6;}sJrDh5`cT8(^3Uj5T^K%t-3S@WXj0?g ziS`~!&%F-FS+c{k2)TWhi3ovUsPrkWjrKiv2sP@XwTEgntcGr5Ws{f$eO>t1L@kcm zT}Io<$s$5{IvX#bBN>P(ryfI8Og}g+2NKmXK2A^DF0mtL7(}TF(R!LcitI&f92H*@ zVu-84mvTWEr`Ju(FK$mASRN_HvWfJNP|F^l+p$OkD zxlEvSK$hg5_E<*9fVic0oOryhx=W3^^N#=9T`p55c0trmCWyPQ)i|$E`Y3LE__`8K z)ZKy6J>pjYa`pWwF0d-h?pUVLM+4yQax?f=GrR{5A0ni;KWrL&V?dH>K=w%i3V6Rk zaU+gTI!Ch4_%&Ec*MouS%B1VgZP$r4LI~Ud!VO$ZZyL~tGPu;jpyxmgEl!}n@&VR5q_BfX(!)6Ko1Sr@!?(9Qx<$-X*$1Py?kR(Ia?aNl? z`D3|5=($%eKDe?X9DWmEB0%_FEOo4{|s{u#+TQfIW zT}&6faP>hp!TFIV4U~R_!ob*#8#X_rS}`fXa;ds}BG0O*+0JJKYKs=RuGCIx@qkP= z8l_E`O3f_}Sol)40*3`Eq>`hu?e1dOSw8mA#yjQwa89D*%H9xVT2%8k;ehg#x+6#^ zXmZ{Lo0r*O-&$BV_3vYORUJ1e!hdV>B=DRqlM_zl+V9w(Cv?ImJI<~gD|%@pI#`z8 zENkg&v1*}uB!sYZw8}QFzYQN~oBguKvPW_jXZj4TT^r6cn=B->?5cIYkcyER`@S# zsoU=Xgd};L5vKyE#zljbooyCV>ZopkkUIJVzKyzec0hu}p}Jl8oxi7^)aIqEW5LwqY%PG$NbG6JJ!-p?b$j4b z10yP)1M|#c`s4|VW~g}T$JY_1FLv&ph=X3eCr*bvq#t{sD7QZtjAGXnkQrxGaO>OF zzc3MotqUJ0NU`xTpQhix%Ap_TY!ea*OT;6Z%Un=as6-<+2d^KBiGzJesP79g0>{;Q z)e{Zpi2T<_I$DxPtZGRsJ#GXt*eM9xds<#$^+lg|IM*nOn2Y#L!$R5>V~;Xz9UqOc ztG4v?TtKCg*-Yg;G6{mlLTYa^+gW`&9K_e5_qqwSs-V4RrMvJE_jbJip4$ZG+)YlzNu-q=e9EzC2s0n5v5e+ zz;&fgp44-D-TxnPeRqH&{Gh}K-DywY@thBtDDPmxL6RacuX(PZ&PLirfcyE$sZ>~Z zK}dJ;7K=pq#zm@fMiAuxXq*4IrCfjDxY!m-K&D0@&O-Y{?EyYfr*qC@1qy^diAW^H zJ$qseKdu7Z2BJ~)z07b*4uK#_S>uwl-!a7VP0{)5!t+~B0n5LdP{LVX^_?=->EcA# z07%TE2Ohxg^xTx|`ZZI=nL5EPA{oq-3DMe9K<@h3ADZF72?f7WB#Ajd^|IrgXNKzdpOMXcl09;67v**a$es#mBWg)ddMqJWFfJ zg$VeR?#ezI)H@$elT*zRdAjClIVCziaZ$ZhLk6c%*o4OR3$a}n|MJQ zdW!bX$z)hrMKxtyRYQfJ0D#TW) zL`q9{wlOd6@7O5scy}Ru5XsvQzV8-`dUaEZ*@TVKi%R5csV}IeJo42y3%}^(kg*aP z>9^5Cvt5a!J-w}tVl#sv22pOwVS|_z&CS!;AHiEf6AP93Q~KUH>j#H* zs@Lm%f?x|2SUB;p*nS;%MN8GI)?JF;LHeaM*&Q-nsH`7bXR4a+8`Kg%mR?k+XV>S$8-&27yl;U zui`zxaetgexTeOvj=GP8x|O>Mms4^vNvx?=gKA8*L^-v$c;OCUHn0xpvC?@`@Xlp$ zZ&v8i>H~ns%2t>;3z7bUe^}`7AdQ#j#Xg zN*|DBO8gca#y6DDU<57bO^?K^sGx>_sy0nI-xr?CRYkas zr27^7T`g69NDF*MCdi4pU-Vjz$057`9ZbKa!z43MTec=Eun=W7rNA(PI>XYQ zkKn5yXV!WVR+lE~dNo7UBG-m-HXv?Pn9I=PvEj&zt6Hh44rWrjPqh zG_o&g@_fD4$yp*&Mi z-re8azy~4SdFM?OBJJ*4^Hb)&!#K?u8uib)6Y}lBiw5{lP4xmAflG3#`*wGjE!f6e zf3q#wO`(or@iHM)JXYL3YHL;4Y>p^^s09KZFs2|>bn@ei>e$lQrcv-e#=>tBO)RQ z8~mwBf5_Vu`}K28;*4|_G_VUc@tU9by7d%Hx{(?CBEkl7FU%sQ1Od}amQi>s5*HDA z+8OIOXzxaU^oWUz7%8ok=p1`|!UOOWusPn~jN2Oid+L(gRCK~2WzELwr8Zn!19-Ip zeoB{ua-*;%6Py^2NI`I#>KcK)yrFbocq;&n;y5rjgT^0@nX91ejH;P&>Lc^3{ExJO z;3HreWgcBAh2Cn_?QZeOsqkf28k7Q=Z;@8?8uHG7l|We-pnF#8X8v8cdB8YT=nDm0 z@QkB+1OJa?*?3(WbTL%YN~sv+#qOcC#Td}Tg`;e9Mn{t8!@YCG>5 zr2r$?If4tPDOAWQ5pl6-l#9oL(*F;g;dz||qHBLVS=>jidX`SaKn#o} zU{><(2+}+6wi7>>`RHQ!DtT{Vhxh7_qt7&fj34d*StBJKzPX3P;z{TYO>zYkUM#6T z)2Xnqp?}U<)%2;~OoUsZ<c&rVOX{nIR zrI+cpCN;9~{Z7+Ojm&1%wtWo!77D24q+YrTfz(2LR+Ew{a6&Dy@HEqo^+Rl3R8T@T zDdhyRFg~5CQy&I(vtx{k(Brs zn5lIwMo`&o^cdxvM zjJLILkWoF;<*t2;E_8#rafdL2D8tNW9C;1ps(@w%rb{;X7g49yoaKA0-PwK&pHB+a zwkkQFl=jR#NDjW^6FPRJqwp}u>X%Lu6M3{HFIjJG(|A*xyEVyTu9@n{!-GZRfY#%a zO{_CM#4d?-Hu2)(g19Ev$!6nK|8thOvEVoh>#3%8E1iAtuL7sJJS3fQF^Z7A6X>bE8e-i8bB)Zff)Ig0kjAY=_u;`+nJd;uG?$Qz#|P3G5L z8IW__1aZOKh%+VdWI9kc2=m2nQ|+a{j)&LUL_USvv{o>60#$s4?ZA@{s?Jc_exH#m zC1lR!+ZRHOO#ZcgN)FFKn)zN;SF^b-UQV#dT&@%O8Z1+bM-u!j41{*c>}hp$?sDxN z5sc>tC)?d@I36-NqM5z>0KxGZ*({eDgfy0Po)jsFQ?t>iyz)7l8d-}S1>jSuS@R=P zA_I@-Pp?h3f1#eW>ktApIi}~|s;(t*yWW$VtJa+#D2yGquXlK%zySn>VlpG^)kB)L z#yGG_?%|56VY5$={(T9UgPAatF*@olje;+JPqnf9|5Iwx0U>zuOIgEXwM%Zi%5kmV zuvjAsy>yupa(TMM6BRJunus@I1pmkyC*iNLMcRmEslQ!6))pc0PPk^gIPs1F63_5}7Ei>d-mkR?$)RBDWW8 zZYW`ukk^V$+e#9ccyg-rbod4N*;nuh90z^mEMMq@35haOQrgIzkYr*qkI}at%b$Y! zD6fn=D5a~;&%r{SUX;!)uMsn5)faXrlFI}K9bZT0udNI`BV_6I#ItJX=R$a&CZ>u5 zG+|FT!vjN(3o!=8{T@4aEClE|q;sQxMH1pLQ{(;4g?+!y0;S$H=`Zc*0kV-jjFC|0 z{YLFjgwU4L5I6DnR6H-_<<*grz+R)&OgRpScNl?i-Z6f8O*aWIvkB}lB4ICB7P{cm zaS6j}3@TnQTjnuvqI6+>kE@vo9mIzm{hQpm$0~Er0J6vW$};Xw$*GCEeja_=%A@uV zKd{2=?2?rRaGA&A7APqJ!CES!Y?DoRXk@j78l)@O|0x z6q~{+K*hhRq5WZcAxDYLfeY=|^IY7na8Ie<4II;${C2E7d>@lj-@_9^jwuQ;N?I$F zZB*|0$^I2;_~VJ!yyscJm52syX?UE1+g>)SIVXMrdNd`DMvWCUh8D&S*Jiw}6Q&f_ zdIEif&l?H_$B7@}TZe6Z#i7JOyQ~M>2`}4kXHYx!3XUu62N1@bm*`rz+5!|n^k~^9 zCezFOQOT-Kcp_nB80*DSP~Cslaq-;zrjkQYh)eay^YvU#gYEW zdA5femkATSEwF^8d>p?8l}%H)@m55?-gL+oK1fY4WqxN@=$ft9t$!14|1!2yONvGe zP8uqSRkrT5gp7LHGeEck&Gd^^RXPsqtNNzLlw~SgHp(#gz9<0t?B_5}OG3W-e%#ix zBpwsL;wQ0Q!y#hkeIMwv<79PotPvm@csgB)v^}LR-6yzuoS-y-E%oo;dstfIgaU!L z8dLm!@x|T|?=pTw=4c3GW#Y;X7uGe1L$`zs)wAihn$og#T*#!b+)iaaOwDu|YtcWn zg0c-V2!Tlye&EJ)D%9Qgi3$231G?26Du{k}%-FPmB*>5Ype*|TbB4xN3bzM5U;3_zZ`W%)w99YEy?kuE=azOldA z@LDhy)Gk0N;nY@0=pfF4%ye)n1$8sGZvu0@x%+8aUFZCsT0 z9M&2>^0>Co)eQ}^&)tm%AS$f6$vq1bG2WRii=ao21KzM%|az>1e78qhAp5QS?~l7iC`G92Hp-T zhcZ~6ycc=&Bwyzx9%3b+x_o;L5YYeLE>ck~&u3If5F|rmUPSB_!MfG+sW$4FKSg@n z%B(qIo4B=)=S_~D(&UlNQ=SLG7BO9Svh z*xwl9M5(8=Hwx}9k0G_k$M;Aff}%I25MdVnwSmdj5JMpCa)azibhK~bBxIe`40 zyfoil6JvCY?z!64i@6PLxMQ)(J!ZB*@4s!byP*~h0dL-^h8wPa0v0MHEcLyBY1Cvk zSyH)+jl4H2-vN71^?v@IrGPZmHP5>ycr!aKPRKEH?Oygo@kdJ)(-Z?Pi|IjpyM*uS zvuPx3bASw>(DuZu$0ec;qiXvAoKeB%E%2Z{2tt zUy*zRO?umzr8WN7-qn6w7`|Nz6))a-RB(a%RTOi*ZKRhtS}3+*NtzvY<+`U(k8l4% z!%dmc3t++WU*T{#Gf$!8)X`tY=~IhJ;+0J6J)|_kAQ_9SwwXfwN0an+o3JGS)C6CA z1h)&tQ&2m}Rn`P93ehIx;aGuTKJdsb9%U;R1(pFP7FR9;{ZY#TotzX(2E3 zxZ3>~95O7k!U848p3hu?Dxn zwL=dn48dX=wPq(9V1Ez~1=7s0776XWiQKC3drzHDrYdLgLOE-iu0F51a*qWu3Mmn5 zTk$dN{-wj1OCPcFQli*A-Wqd-7}bKUC@jIEq-;p<_6k{g>sITE;k7g*Ul8CsD4VVm z6(Iaj6#az0Iq-wXCX505-TGi=-OG6Wi6KSk`K;A9X8MKx*b!JHgHr zt87wm-~=B5e`sL&H&yV~Ue*lqz?MvboO&Pqsp+<}#gk9bw{P}8HKjC!d*@qIocbrN z?!xUrY|53r&uxZH&h^)Vt7bX;Ic8||aE_KIj%pNM0V5w3FbY&NO-awA_y;Puu=D4O z0NjrtZ&f;<6E5T@WDS$E>??CfwS1Yo-@-ko@yJ7RRnRN5ht=_}Dk?IiW!nJ1_?)jPj**m!!$JCAoqnOmrg2K-?9VD=mx|wpx7oQV{CmPV40s63x z(1`fuG~l?<@R4u6`+rE(n?j^Z?FVg5zf(k^!+w*vZ|2F9^fEbxbp`|Y|+mM3%~9uhBEw+#IH*?b0_RwtR%WCQR=TyxAr}uN?h%ZY`viD zZ^&n9wA#a><=Dh0C;_musUm?U>a;^kR`_*peAI9!^L4^>JzwRroF_UL4u4p`vGOyA zX^F5|FGjqtJ$UEL*Q-l+J{`*JmFkzIL^Aw4;dO{HRf*U>Q&@sAg_pAX2uhyJB;4&N z!E+y&Eqzl8cdQ&->o^)>=UG z5h8a4oo@FFF#o))GGpC@8v5Oxi*3yPHg7w{;EiPpfd`;AtX1YZBgFl*FGbwKrk-18 zUZNKxqGv+Q*Rc*V?19P8B`lCG)-|@P-nd1tE=$vDxziSMpUe$gQ{#6;U;S5)E8@rE{>Duh0Nf+bzIrmh7w5lUhrBH{otafNr?RZh09Fb=Vrf1c}vVLHf7HIS;fbFfHK{ClYMbpc8(=Z3 zbQtZ{c{cg;lA-u?wHTEvR^Ee-u}c$2Zd9=Rj~$z}0FDhL^P*6~M5eU5pX+cyv0-+?*6Y2ep&+b0E z|Mw!7zpI?je9xIPbH=GSDvXIi3!(t{8Ksc(#wc^~#{sAWl-~`#ubQ}#ip)g5#^6tu z=8@LozU{Pq`bHxuTFGLrO*dMGMf*mEaf8`w8j%HQ-(t$5R6qp~I%YTrKqLoJI`q=v z7byL{EdUDXZPQ}B4I-z{>T7qC3^eXI*EO(wl!`DDuzy>x#+AnK7{2a>@*1noS^0?uA*9z&`CfmPQLfK}g

|RGo~q z^9iN)`1wbX!FvU}KDON^`iQlTk+P0l5iCcRn^ZWh5x@voJ<)BQyn?g}kK`&uE^ZFQ z@W6iXtWV_4?W2O}7m=C+2V+OLQBi?*^MW7TQ9KriXEF$RUvR?2-YJ5Y(vvN=5)ZD= zv(-DnSJD+il&$xVV#}WEE~V2)m42qYIKFw>qM&#Y6{*Uvh=8WPDcnqfRp``pqit7Aej8Ih^d;oO>tosJHfm_6`_nsuQ!I8 zR7C`o-$0WZ65As99lLiyTjn#{B?P1VYs0nOVkigI@j$iSMKAUM#Q-vs40H@qM~JH0 zvtMLJ?Zf*e9J}DKt13|!zHWQqZP3&?2oqhS?rWkr&C1sp*(R)M#+{5H1KU?^5YBU=Zp+VLuv0bi7)|b6p3V z4fs95FX%3BBBV3t-B1galC~B#oJ;C97=uMwrDoA!%6l%BvYr$t+9_1Mm3St@tI=se zylKhA+OeH>Gm26z5y0NnIRDVtN7}r;NobQ0Ffo5C;!QH!tIE2~YMI4Y*jPO#gly}P zuw>V<6VUfipD(HG{0Qg1Xyz^(nr9lWx$ENjmK8Nb9Y{T_=sDj0b;?3HI6JZ>oWKm6 zs_#yTf{hKmC9D(5hvs;;`J?#RY6`&)*!CgnGuqIcm|*v|KSsZK?z&eBl{j@5JN=H1 zY!}ls*o${tPr9#SVj-}gl`qlZ)Sz8@%x#lX{bgD1^%!=axiMOFiAf- zZl`~?@s*v1WO95FXby|_9aEBwO*x%BkAvm~$Tce7?A|FTG(N{1Dph<5X5@vPMJn%~ zp071!hC+ZF>g5{7WLWzzcsykahmH@W_?{E@m7nLR5Wk_%B#)mlOivRtuSGwfZBW2A zZ^j1;9ml1rgSL=fDU(t)vDXKb&`f%R>JLvhNiPX1CV~Tgch1?+sEk!EB9_ZwY_B# zC8ud#5d}?t9HRiJrsM)TZ;xt!KwPO&&Aj`wp7Np_trCD{tWPtJlEQ6>G+1bP^gTSR z>)P=!2gAIvA@1}CUt*V}n`2O3SC|&h9uiOW5-7NC9yrJstAJ(yVoArj<{E5Q5W1|C z_VZOsk}C2KN9mm^QJSzb=upmbm2${oEX5@(UwRqX7=!_UgGZ2o6b^e&gW~ z>9>_q&w0>!A^NnC)4rRCt8z1S`#Cz=V-dRe3*LQbidimtxF(V3#c;;lIA^eOcs0Sn zz0FWvzTcxQPeZoy--@faNMRFfe7k{jniD< zmlKE@A@-ZEy$QQ#3*@#)f0sE~0-@ba+aDGGEs*{L+D)2b*< zh1E(TT3sg>O945hlA3wdQDcLx48#cW3Hpe)gL8o%>UMss3GKZz6R}#0%&=|;xZuR1 z>)4ca)p+bG&6Hz!jZLW&o-@i2@+j_t!8w}U@U_3Fyh}>8bcO1=7sG%{;EmH7?)4$| zEE4;cam3f?Zpsq5LWOWX{W~9YCSEGxy!Pm&tew(l-^!72%Q5{Sqo}C(BeqF#C;5S^ z!fD}G3h}P=&=aBza?o#&qj^0}EO38b5LiQxv$dJjLCXbN4ul+23PX$hmlCaJdrxuC zhimfG(mp*@Zz%n^<$$j}xjcq^2>CjUbR5MuRT>~%^!)IHwjtHy9OgMu=it@ivD_t7|d?d7q9_SSpSbg$)g zD3{mjFcSaZa#zTSx|Ztcp(szXFUUH|4zP$9Mf}^5{_OyWP4%bdD%F4b^mz;Eb6CbL z|1?kx4r|(y&lnol$uJV=SfvvGYSl>?B_Y0vsam(i3=5BcOp>F+@>sBdzWRQ?-&&4z zG3Z6jxR4*O2oB;@IxJM;`2 zPX|IU5Yggvt%Ee2=w(xZvxd++lKer=UVAym%n$o$2i^}e_NyhkD8h=xdOGa%8G1~T zAB{sC=ka}fVTD66>IYqVb&9g6v*s&B5?VHCO^r7R4`yagxxnBx#h>WJGU z3hB%?<;l<&YCFm~!@IqXICA9YG%u~+Nv~hOKeCPQTB(3%C7U9$qmkl?1bQDju>x+qsi@AdsNOqp9Qk~tatH)T(9gTtaXAmWF%KjT<2YH1mMakRo?xv7~ z_Y+?n5DEuH!zbF7Dmdi$>?=9@|DIp2Q31&$J$S8cF`=fdF6Cp*Txjn_+e3r56EicQ z^FPw(k6Rq0&c5oXG1aRbxm$(~u@;n-NvaV$=91J;iGUJL9_w@ziN;NZ2ah)&MN~jN zhsRL{q#MJ%sF!iKsBi~X82;gURE}yr(^Y?r;0{Sodis?HRfIUdgn5rjOuh^H)!oBK9r=En_mAr0V1ARovKpDb_<}tuYThSR*ZE7o_fF^`>gv`{3n; zD>tsi*%A%IZ^h8-?J+9)<1_mDw@{g?bAe~rWuB76CX4SAa`CuQRPUwRjES`14kr@V zN#ZMN9l`PuqGX2&h7lY|Ij~b|Gtqt_v)hRV3!}OyxgMo38#6brei|rP{<4Pl4{zF! zVu^81CMh%>i7mu-BQ|&^p^dQKc_7^yoyj)Lu@GP0)kK0JsiC$d-*?7l17Y&Ml3SdX z{Zq^Cj_cK!beSrtIO?*h(=eNcxz^P1VzJCgl#Gxof7+sHJAB<>tz-_lGOZTgR_Z!- zU!c^1NAH);1?SMp2XlL=yNEp5`M?9~aMv*`cir5)=1J5dnq{xsI_eFEKWW`79Eq5gTEDsRSB-3p*rMRXQU9@${CJG(v^Vb zu2&&Pb0)|Mw5`Fb$J9T>zPg#NPIJtEDNlzy4c((MT+%9eoVVGPJ(*YYx@&x@gQ6T= z70RZjp|d%-9kgnc7jGR5z`RW#SXd0* zBar&qPPQrk%q@>kIiD8=hjx^WG`UrvS7Ylj*JG?sp5{Eli2pl@mqfRdv2m>4JrQwE z;5ud_vKF0aHlGreT3qj@C)53%*8gYdzD8xOPNn9z*1JeDu2H@U_L+E1UJ0HZ?#lnZ z!0As~=z(t!DVg4`oc3rcyY=L+*mE(0PU_S5-N3EAY0nC4E-o(k<{_b%uQ!(mnj2`1 z#3X?F-e>1dA>UL>F@oS6UfPHkUd~J?n8rYCcU9l}4MaX4Klg1helg&4Cnhih+HCSFl$MAPf+SIcFCzTB^d3q^4AK*1SHsrGty`+RI-EU$2QaIl3`r!C_t| zOjK2QM%jxQu%wzNhdMw53WbsSe#6T2N6Fi{&i4?ivZ=C;Bi}*QunUMY{64sX>nBW` zll22ea1Zz7<#l(O=s_;)sN2YjZfE}UJ8B4f=8mgdsrNDMCGo4}TzAcQ_E)Dh6DawB z&k&_RZXaPnJ>?Z(5I`8{4E9VKa`DtR7VUaVGtfcT+YrgKX6{rpmAhHR-K_%}qIyz^ zFr!d*nC5ZQ*7nrJ1_i@W(+@yB#Bb1KYkW|MHg!evlfuqsg=AMBcqgLY!r&@5G3FsQ z9Sl*Qwm8BwliCu?nsEFj8TKZ&<<@?zkD%)?q8(Vy~29*0YG<#($MEcILH2JtEbVLGrk9OAz zOt;kF>yw`GzLsZ&e#bWm+_vZaTr>EP)5$H2x>}v^#*wZ-VQ;AQ4n1K1!@&B#%nnhU zpSV?1o+LeV7C`vrZOB?P8+u$ZF?|M~u6sVcpnkTfUOfIjhMR*dO83+W-nWMTsy)9> z#$JPudM6nr1TLc{4v?o`L8`z1K>BQiy}1iG4GJ=^_%DO%6ub^_SA~ENEWqE+G)MMm&eb92buB;PX3B^W%4MrB<^_z*c}w9rG((BU%)AJ4~RmPhRDFcgPHla1`U2tL1i; z6h;sz=R3Sb<$S=ZiQ46#>073*IT*avk^LLc*3DkRTN2%E)dDa&Dek}5EClCssLo5G zx(9wWyc6kWCg5P|q}sIJrwt9V>^a7_o1A86%BDF+G1?gd7pe@_MT4j5w4?VdUbbDC zQ587)SsiG;g*3z-E!;g);F+Om3vc+7=qEM^_ikIXrn(oU-|aWrI-M5rE+`rBv}ACx z_=?9vTDOS>s}t5B$~`5{8_+uI%_`|2+8sOHGU4Qvz@=6(dFxZu*}A9Odf;NfZB?iMQkC^f1g;GV-7ZF z&Hh#F?p?jIn{@DJbi8{s@3_f1cF(Nl*~1g8&Y8&B9|LukNb1dWj^OhHXq}nizaw`< z`&!sPIQV>yfnarx4c0pzFgll+y>*yW5!vAM_5Wbt-)c4G44-RUPFd(1HSh%@W{3p~ zZBl1NrhyZY)#s8Hbx$ZNl{e7^{S03M_cz4^zD+{&xS+Y-CN~&JYNXh4crO=gQLeXs zq6!}AH*g7Re488i2xx(|0N%#xW3{&iG(I|__QASb957xnt#=Za;IA;*5|r7cCj9ph zCKAA)WJ`T}dmf-OV>7Y^gv2mcmHy<`X`6$%LAD`{5Q8L_AmIH|r(UnRIKPj(!$opCQgM7;9E*BcP7Yn6Q$ z0lg5YDxsl}N8igQ_LI-2{A^B@!IW*3!?;3zC~b$AcGLcj|9SsX7^~{~t4Q9yIpjgQ zNI8pn*<9n(rAErDXfib}2LJQaW}zy`b{^Kzk^sQW57HlbRk+S}rmbQOwqS^N4KOIi=>41!GW zLZ!0`AnnOz5Kav#@$7<%-Q`y=4+f9GB31kAE^96=pgj@Ug;ynF#?=C}36RTY zY-hI++B>FHYF%|^lpXF$D;!K34Svx7x`cP(#=|Eu-pc19DbMI`x3r#$T`Ol`)w)%r9=`rY4<)N z1zDs&_SB6KUigUI9rH#RbJ7^O-WG_E8m>jvi@!IW>rPN{ppzMKoRb42O{{iY7kx!F z+-ab4Ean{-L&Nq*B|+NHx>w7|iDNE{>MptM-Q1T*7HBjH1A2V*2@3xmL!;ZZP189M zQ-TvPX79DqebHsDa@i(a2m2FKcx)o*PUW>!{pNrcp5tGcYu0Z`Qf1E$m#COFJ zfe7ALZ)Bg=*{$*M0(`?D7O*SL?aj2yJ1-5GvT5h}O1)GAC$cUDgFh;a(5od=x}dKc zTC#k_Euz(19i-5FiH=L1*)T2#-;6iz573-8=Lea)eUmAdX6CoG?^ec?>{gjhxTuqu z=RZO~-CKB&7K(eZweOTARC+7Qv_EVb<)d^FQ+GzRu*Cp@hS8Z!SKe*M-@o3gnW;eH7 z+e+(AnK5nuLj^kPK^MZwf6x*kJs?{d${S+#5Jc?E0A=cmR(+7)CvuI9~9P`+&wEWA|0N5y`MXyXxO@WF+7ykqo?ykt6 zt4N_>dlwZ)?LGdTP5yb1+odDg0U;nTfxL(j<{_q)peTJJgMK}b3e`1K2 ze>V8ZS}O*a^us{CB_P+UpPU3clE1pGyxFRR%2usmO3C%dXOC>f$~*l0VpJ0QO=5L{ z25)xFNjxGGk=0J*b!v*fWX zgnH)tuV5!lP!r8+DRWGl8;?aQmE-*gd}KsV4*#=0*Rd?*!RVByuA;doKnbX*t#5j} zBt(~r`a#V7>JR1?;|D4gIs0CB6~}LVfrmXwFLu{6#N(aRVadv%et={veZ$Tbz3r!g z!_8bbe@|OB6WV*vY$HR!t5$#q$w4t(>pp&kFA}#Rk3k%h6+of8KcB}I@r257`~U7Z zz@>O3`6EzqIDzQi&E({XN;@s2Uq7e)$&H z!wrsw5V}1r{f&R>=veyx@`fL8_;!Ex3^29a+nbA>3qAH9l|+@O>M#|}>uC#<$vBJQ z29&TW<)Ryy0!NQpl2>Bf^s8faUpSm3lvixJ84MRiiK5z)_#V85hF%oM2*CI=_`Y>G zoD3p|5fE&+{QMJR_O`vWkF)^djFr>z(>#2KN2m1of-pLY5S-6@mFEq-A1x>AuP`P2%e7?cRn*5NwHHo?fj&^^DrrFd9 z>rzAfRF}d}azV?`&&CCB$`wh-Fm*3w^Mq(WJj2uz8P}qjYdCHClada~8<@S>>FYnq zJ*)kAxKK_N;HOyix{5>>L=E5<>*RDwmSedBvsklJgm_O5(~Md| zhgUguuuhNSSQJ2J#J3I8o{&mv;b3R)?!B=u!!0R+<_5vCr=R#lVV5ySB=!^9y?OU@ zGta=$!k7}hp;=-6?WWqQahf$4Bv7O4crwbAM?qwR(#a<7DBw8x%FH74bM?=%2O%j+ ziT8pm7#_0#o~n0tN5!Ze$owCn*toqSU^IADQ%D?KrWJ3xekgJWSS)u7z^Z(JlEk6+ymd^W?n zotML^gYpYl&N|6eKlt%Ga6%Nm@SEE>P2?ggL285%%GY7Y;t8Mi5+dkXA!nlOL9eh4Q7j}nF$8ff?7c182ImYA)r&TeLfnLJ*c1Ts0bHx1JwehK@<$)abUs|A-07R zydFV&l$9XcyrTrIpv**)`VZC@EeR?Fn&D-^K~5}QUj<`M7i!)BDkP7-?-2KtpYV<` z`>QwlHf9*1CW`yNEkC-`&~Ggo$RXk7BfrgLw{fAXBa)H@?B$A3V=l{f^Z82X;(omV z`@$Xd>?rkm(x)rb$bRJU=J4bN%f(>8{rm;U@$T!I%M)7b^Og8!p}K7)fNB+Q(1!Tl z(r<4xd8$0i;U9e!anN<+*jBm&aeJF!G~jOpu^l#Z`ok zq)rZ==v3y>Fw+8X97S=Ja>-R@KEhsnBVh-u1ifPrK2if)=7|1jzCpXcetqLJ1hM#F zu2lgQG+%CZ%h$x+tW7FlclEkOPL9Lxud;BAi)ZW)eTb+&7#9Uz`H*V=%*e{AsjK!o z5JYu0CtwrniEVoS!?rc$c=q&^5=znY{ePRcXquA`GNo$d)~#;*Tg14}r(9)U6Z=Zf zknJ27MK-&Od5yxB0{21#2~LEVF&c|=IB*nEqGS`Fh-#4GFmGLcLT5H;v?1w97UB|+>E7oDt?CJ=C z8G32Kgx6Qu%hzRuDWp#DQSU#0$*)F%6V*y-HPZ@b3f73?Ct+exKa5`4Mn-NLYNW z!%fS$Vr`&8(*Aozd~~g{3z(3*!ByqDT#Q~ji&>l$<}U6QEM5W7j3pIyzh2bw zc?K&+4&BEq2-`i7vgvH-pByqTKLO~7#fVltP7Vkq;jk?*z16bM&AU~kFDD8*1f-sT|2ahGCXYJPU=r0+=Y6>h8pKobAJ_JiJK zsEpxbmXGkUTctP+Aq@mQpfx0J8uWspn^7aXNQ*Ou1=Q&7-R8FQz|84;zs7HMx6Wg|5CaPj;LOi@%cHKdBTU)jq~BwqZi`+$Ol1Le)Z-(A_=g+bEF^>WwT8xsx|}eIr3TqLfV2a)o~`X!`Zns zm5#&w$^7dbNwH62{fTjs$^vg)hkK}O{a8?f35@W^_`4X>h!oqeHPuj9&;YT)|z@=XDC#O~}eUneEEBDJ{ zw4l^wdY^3NoWZ%8<~%v}f!K^Ys$h1hm%ahg(CYqW9EsO5C5-m0+X8>dv*QN1tX;hM zipPOKP_b>QhiIw=wmEu$Ju)noZIA#ii~g}7j{Y2$`uw?Q4rkS?XdRq{ni8w!)sTp! ziTF7t5bg&i{5t6|7XOl-nXBqYpkGeu^D(-uo&>K8MnG3N)8;7@!=sm}xWxuR;dMtj z3yUs(=xE)(n5?PJLNr++6;(jmNikx+PaeYevojAFwWj2aA7z!ADd^Klhfp9{xFd?! z=RnX3e$4(jHKr=4l&guHeG%QvCnyiDWEYo8(p z>egwCYcg#O-d#E$e26gd3VDfS%$?XMUv>!|5(gBKYTET3Quhx{f_f+HxYq3uRpapB zIj3#e!c!Nu`q16Hg|_oGbq6wOZ|o!U@wxh+*C|iTe?RzlFYwPkG=Y~$M-C)@o!qbR zAN|#33IKp&t6`;!r1t}nn+oy%Ng{2r1|7|3&0JKZTfRun%w>A5<~CO9w>JrOm@@1o zOJ7nFF&K=qi%Z-M>1H-EG&x@3xrG+f)s;{@%Q7lh`zjzNTtMRxb0JDjPK$8>w%UKM zhh4Y1=kTlO;OC8CpeNRdLNEz-zfd-L#38!yiWx`Mx0Zp&+BJ{qfHXI;-lq(!uxdo( zs@9h(8u&PQjnKcA$rf9a2fHUIL#U#pQSAG(WDT_Q@lQcyP#(Td@1w=SB@x+o)9$!y z-%O*Y1ee66Ya^Smnz}3i_TG!s9fm$;>UFmuZ)nrJ99tU@3tsOK-3V7g*Cn8}pK?G|eUtT;l1|@$TR>>BeO&uVf_C(;5iS0sffvRDUro{{XVO8x@q>y0 z5sZ&5P8AH1_=*iRI~WY29Uc5Nhs4QUMswpEqKejNCqOffClhzfE>W6O3=T|~Kf#z7 zE)y$JN6YK1anIWrb08WpGg}@e)_%+Ok>{acSfBZ3X^s}Z{*epikM~Kzl+ucmKOo8% zNLh1S-zaj~;fYS(I^Z?1WF9GFj5&NueYBNmgCFbU%0 z-OHZ-2$1YD@xIJn%hnuQPW|(g?O8v6q~F;xeA2(jWH>cS2F!-V9bVTV4Q?Hpx5wUn&%Qk)ayc*^XH{ejOn1FoaF1jL{7Nhu6_zL!wqv!=xrFGpb~qB! zil#!+LHB17af_rIf`(!Js9y0BV zuWwP!H_hyRxY2u}N^ClXK6iY%$7Sorl4vf_DdFZ3bUd1M!UP$}zT$|_rfjtZNY}CD zJbL`B8mXBB+cNwp<;LE8z(Mo-fVOxLzZ;jiI6x$F*)rQu&qW1CXL3kGrG)J*t(k8! zMJ=y~tk>0HdSik;$pZ9UjOt{u-C=q~h-9nxP+MS7Xm#K0NPaNmxAO>C=B{ zg1@Ab4NExxm18GH@$vrhK7EC;?^{w4 z88UGqW$^(O>F4uHL-st$Cdq-pv6r#Q9^(m1%hy;9hed}sd&lm6sr&dRO4}V&+-VQHkwdde(mZ?7y+1M4sO+Tp)f@o2J!FOgny1kY4Ryp`aYn8|lk~Tyw4$sr zoZkf&vg~v%pXnVi%TH+m9joVuI8-5o!%uV7{4A2zO!5TDxZH;i77qP~F$*^yI2_mE ze@~U^e5u`tkCxh%wd;%muYM|6*!f|Q!aUq8Q)rO=s4Ol+{#W0#R|9(>@VWi92chOv zYWoM73saD}&E_&4t$n{GRi)GFUukRqE~SZX;q9Xaq4H-NeHAaTwT@GXSxTPPGeO%! zqynB(5$=!!oL^T1Yww4?7@hsS4fuZIvBT)90{rMwCI-T4!_jum$!a{oxTW$dP0oe3 zjwDgq$GdF)G(!M%PoTeZptw4oGL=TJOUd!^pP+}85xz9f49j1!^#oIiqBV^5E2ZKd z?Pz5Qeku@4QsPZ7ujp5(6KvJK_R%KkLhQT_i@FxBoLkJ_aBK&c^{gT(I{2M*BQn-mTNw^W>89 zM%Uq`e$W4CUR-HPXUDF|TL*#fDgaY{+|4^cz(qSNtDpb7hW=|XAU|N5^KYT$DWSm& ztJwXs)vG)S;=m^KyDxbHm5zOH#B$6+Ag{8NeLZ$oeh44!Z}E70dsn$^cV|8_U<9fN6fB3(L+^H9isrb$XdIx0EZekKKUe==)ERrY^(~<05*R9ox z7n|}%Fs(AsIWZ=Mfa6QNf&=5iz#0&fxGb=j0?-lbq3;azu;-x=*<#O&R9AMpLv)QY z#NcD`Kk<5a=W!)rW#O9yUlwuyxt61a6YqQT8XAEx$fH&W#CizAo2_3P||^omz6kwgr|svGOhtpH@B}f zxC6C4xVRXFuNe42z;NP^0yU?le)9$GKTR7Tc{_ z0N@|XO`!z(jZ5rX6Skg%HVu$Qcgt&>8ItkmXgsIviwWX=sydIJH)dy@Z`fpPHOPQ3 zKDE|#cRG2as@&yaBDdq#rDMfIiGjJPjeeaF(u%w~9QN>! z)8Mm>sai5qUEj;)JHtpmBMR*q!01DSeAYc;t=w2m+279{V3aWItj(g)xx~WX>b4p; zg8np1p|#qh3BhfP98~%TNl>?OMr!i*%smCF$AeT8C@zk&Eff7FfS_9t*k_<&<~Hq70x2MGQdy=^pgu~SsJ^C0b8 zQ8(3C!?ZOb-Hfvf+&YBupKv$aUp-`_{{}0_ei#^~fcin3?)VZjMB@uHy_(mJ6FZx@ zLE>IMjxD{rqZ^E~j_}#BvqU~v2{@r9JThTY60m-=e8nTm*-u8zlGOI}PzUs=GWB@S z>9to1R~$V)EEgF*7~uGx$K#Gh40E63C9T_{&0hm6I~44qiw5EoOI&1Xd46}qeuPzu znK@fkNBW5Z6mU5PWd^6YpMKgs42o@A$@F`O2d~8)|BhrB2!eZ`dbmEE?DDE>knk8V z|3Etcdz!_(Iw_BS9~gJ`1$lLFE9mM`gnW7Won&-NZEq^=4TU<9miV=2@A=DDl;ytY zbVrW~vIv&Tsm;5t4#QvnYTGd`-_(|ROTZ9&+sB<-2z-`L;3YJDvgS30w99aP$U%<(e#x6QF;rc&IR z;ecB0k4;$lPYOt>uP+>|DFv-VldYocD*CF^H2(vLKwuibcPE=CIVE0idVzT?o;fFl zzb7Bj(UW^0Ly59~E|+AfhlUW=8VINoDhEy_kJ9Qy@AX+Oi+cAsKCS2{uh#|W)zzdR z=Y!z=r4?D%pP%~8hhGm=HT#-tcJjth%nsZDL7qYGLY}8*%wHGUPT!lKl>AM0+)OZi9{W(7C#z-fefj5E9JT#bd`g{e0zA6>+b4r1(^2wOe5b|siigKCoQiqx0>W# zgSRY%+?$ex4|7atZ;;386BR18^QF)pQ`A{ruvGTxnet`l|MM)JT%q!?gjvkgufJv+ z_tdk5Puwv665yiF72eB3R?WBU&!Ue}?4Osowy1^S19n9cuR6?oqHypA!;{E84jrYuZ|leVSCV zpGQuqDUaq+ox_^mHIYRQySG(TzI_)Y=VNlw@A4obbme}va1z-`;T+RNmW6f&%X1&d z*=Pc1nW!##hCi$J%C7jH434CAnvIF(QB)lpr7yn!FjfUWL6|7(f4URP zHm1yF$-oZhAK=Bv_BZs)_J?nR!gvSlD9J*iV4tupD~g3Z*2g^E*7xr@gL)c3OS1gG znuK)P?s;O}0F0n{t2KVquUFa+L3Q~ro#X`Ki|<$O=(Zs~Aj}Y#Ij6%ZVgj&=m=%em zoSMAK_<|gfwV!W|vV*A}fqsB`fp;w32E{B)H|Ad!Y@vB96VL6$UGr#oX0l(@L(=s+!(vBz?K)K8yIoY_&Qe#&4}-}$@Unbi!?k#= zOMp9|Q@MAL3k;-n1`7Sz$ckD7dhtSs-8Iczt;@^z(!P8tclUaS;|-5QJwz_t&+OE2 z^Ylz{gR1Z(unwl6IPtytm$nxV33(|E!$T=!V^*>*3-4v^4=V5f9BrV_!9H|b z==qpX;5J?^>(rmNs^IZG4ZQj3v#j^(Pd3?nmZxWK2fw6@Li}lFst+~~x)Tk2eY~8{ z;iO*!Z6|+jN&5cBoO~ckqe5(_B+W5f2}jxBe_4a4|_E2Na9*JbCAqNiN@4v{mV3NjVA4Hj;7ST99TXjUhc8 z_&(K^9;Ep$IV8@8$lAs%jAr$e5EA}TU+`w^<#(RB0R>O05C*jTpcPYE;t&(9SG zp@j8E?xS+!UBijG6AnS3*Wftbm=^p!-VZapiB$dbwjqtw(l$W&>#cI6NS&KOxW2f&9j;p1~f@wE1?|X&f(PQ$^0F1cPy;NGPN|TZx zh!HEQsF3*#vp+NPRo-i3tAQa)uXQjVGp#wcg6EDMe#Y__{^B`{^s_y5 zARYUj^r#ii93^{p6KA#d0o(Cc|9xc+XsOCWRvQoMv&YxB&r;j`-ClgIxMO=V!Sxpu z4F7*nAp2pMX(X%~Z$85!*;CYxoCmYzh0}^H$zn?P$?A_xe~4fAHb+au#+8B&h|VBI zMIlgtGxFwh`zxG^64Y0@ITCI~-yJ^FT97n4dXnsz^i9`C68L;s9$ctkFN!q*+BjW` zO9qGg*4>Dy9geL5!yr9z72veEO!Dt-Y$G@@spCC1drgLKYd6yAf3yHPxrnC&?H}XB z@OtC1)!^^6`Ci#fTM4b!O_KI{4!6SQfBSrDACYi1^N|`)_-Ycr@SCp>H`t*1*``~b z^Ub!^%|M?*gy2Bp4Sw^`&E*Mvknn{807JP_%8J&$q@CY*^$QL7)F;(tb#N(UovRf* zC}YF;O|AqY$k;O+cU2f0=#$eV3qr%bH{})Bh-Uh8rWVZNE(o7jqFnShmx+qQLGN(H zcHK@*-QDAIxPztMW~i~2i5q=3su*?4$@iT*ll5Bq$|mcl z@nUUEQt|oR^^&pDr01Mt-E+!@ zYn*uNxkXig#bf)BlkRh_EwzPsQ?J6QE4Y7mdIyMc=e z#b5QF;h;Hc(T#;sg7Foj3eJ+l!;luv!DPVusGow}vabbA*>Tl+X)@HkO;E6;zuQ-^ zY(!{>YPX z0&~kEFMuxU_@#MAP#z^oiK3H(@A9TZ2cPQf)IOK@xJS1`Q%&p z<8q@y@};pb-LT|C)n+lLQH(mmT-Q8j%Od;eF$*K~2Tcx4hXSl=yGJd%5avM(Jx-v( zs>Ig9+=3St)?+-e#2&>6a8`!^fX+C(+ATt&!z4YcDgzRLi5IMUbAuCmz2eE)3kRmk z1```h?XwdqblDnwy+8EPfTG7Q+cI^tBejVXywZEm{+m#4zpCmywVE@0ru=$#h2Go| zzNtM|;pEp-=0?VF7fb^bH|BSMvRHVj<5Aw)8zlBu>xXJN?tZPKm-N@aJA1n6+q=}4 zI?xt&%tzrX(ITxDGCa5GkbQwK{PzDYn{7k2=LZae1^!C1j-dlRL| zhZf-16CVBx82|c_*}}=V(<48uV&udgNM7lbBsvUaNTztNy|pJ`bID?=BPpx=vK9fK z+Pb^zo3SCMM|aM{BlE|@-pfAg{`UKqudRf2wV+hP{EYLKEbq|{Cqr)%Cxb=fmDn+E zO8bL%7e`v)8pxaZ6Q~bSsSkCoX2md~r~8 zSTn?VE=@M!TjHaK*MfbXB6yiI;0w&34^=a%aVi8qe3*|0&U3Fy6xz-&hnUy6CM2d( z-{h7rWjK_dqoD652cssbOJ2 zw`B)Mej}EbKh!cqcG!#1LT>Gm;@|nAV_of4hdPD@O5RAyy>gKWbT<}LUwEa-gMY3P zqla7u3MBuYW2worxJIv5BeMFI1@mK@r8|L+(DUZfaL=lOo5Sx)oUEVi`S015K=OLG zJ9=p7FKmAThZR9UkZviFknTsi`m^fdl5mD)GMM@Sl*p z`?F=#Z6rU5M~hmEB6*QvHhvTH*8HZ&X{RGrpXb+%(J(8?b|3rR56V~zi+drPZ2Xv? z700S{8^%@XN4R;^rE8wddWjgCnJ&0$6RV+TJmI6bgVpi@n{g~0{<%F~vMQc1vZ+@6 zdblADgIKuN2!_TFag~d&#Iw!x*!5wpJcukm-r7U1NhLQvCSf}KfJhPPVTL&nKFZr^ zCPB`E$0&^X68Mm)|MM4WIQr)qYq3*VW@<7!Xf+6D7*3}NvwhpK7QSD0#^M=?u3NZ! z9Lz%~wb{8{X8rBsh%qY)_rh2jCk`HRKW|c#A0pK|c)SBHB}n7XYQv*b1%ZOwR>pjWCvBJ-uW*Wd$k3UakFs+3f`QA(rO((-cO z+)+gSOTrNIPl8(}8;NbuqW2>-43miS@wr{&Rr%u~pNo@sa2Xuoo zj{sze?oM{eB^i(`nYbRwR95~~AaYKzlQyOq;+AByDP-2`JhS=cn*Cy>{u9f_YN^${ zPrw$^=i(^Y8gf}82jj*+tAhU`@E7oKJv}{&y61CL%q=Nt%r6GBA;U9l!MSRcY_J^l zM$+6yBW~}84yE9VEJPpa zjLv!}9$d7oJi0vNY8!p*qSNxFZLOA*B&fd@G{f`$tq#M~FhP6Bh6V|euj{h4t@~rg zN`Lv8i1-$?mpmVM9eA%u95I|M#*K)l;`=jDXXf87;ox~}sx~qBpvSIg^+JM;Wv$Vi z;0hFbI+8ccKQ>P#yT7zpWbqsIEg$`i&d!r2eu7SAf-jF?43F+(`X-Ft*v9 zuxn7ucQ3ATQJxA7EjZh6m^hp#)1o4!#pl#AyI7N!z~`YSg!p6&e@eJi6FhnFttNVZ zW75F!$Ek9K!5d@EbL||p$`mYDTG4Uf$kPg})QAdcvQS5YPLfYoEKo?-ScZt3?nMZx zdTm=EA$FV-2^PMLFs1jy>|s^fnXJF$O8*GM^eGCon!SPSEB{>EBeTGEQYVH;MzE4a z=ChCYTUY!P=DUk~gS(h&IeG;g9F5JOQ22L18gVx?uu)8`h_`f8HYRHaStU7~vc*Mr zp;J4HPGJNV&KS+&H|u>zMz}?|$OJ-4F)Gfb4@Xjil;2qDYYzVfV`afu#~z(&Kp8t0 zxy^$4(GFb`WeWA=3ycS!!?3)0Nb&|so@l?#7yLl=jF$(yT8=c)^EK~B)ooKrg2+t{ zGQt$cAsSN8e8OfmK>S22HJ1#Z)7r+dGrcH*XRdjDpFtQ>{C)+iKjNkF>sahJL(^2) zV++o`wI@$3h&C+s=swU~yhU!seoxpx`Yar11&&Ic*};dQOM5Jdg9x%U>?CQL6hKZv zDzZF%aU63muts7BQK4)uy@GVBvsaByKBhH&lXjnw!SZA2oh-1%&Aa z{c{GYBX1EJ4%ddEST3*PxEOl{_PZ-Kin_8zG6v9Oe)d3eiqa&(J_`|Kp?RCb^y~2D zM^cX@YScIY3vykmaeJ&aDn8?AmopvdUwdq@LV8?(?!BU_2|cy^E~-lXhlu}0R%=3$ zjHjwNm4JhlA&kn{T}rurSJkm8Sy{BY6yP#Pbq3Aq$K!F_EwG+P&5b?-wCY4eDf)dH zid;oN3Qn;tOTBs>DkD$)T?`_nwq;-v00gN0{nJFbKp7)pcgdNzw}J7-qswzHrL+gY zF(wicbtj4a^oKxoa8`eYT$0pVSbwT!$*;8cL`~^c(AL4}nbOxJ%nF4mT}cZXfYz zi#T!iy<*(lJ;^Q`o$4oTM>7oZo?5-kfR#zOl(e_B9oT`vV4K$*Mu&OP-}ONZ$|FRg zRhxlJkqL!g3?)!E2C$~a!P?ER$k8e-6M=eXb>pcLA=Pr~$p|#u)5(DW`NMnq&zV1e zpK?43zI%DzR`6Jq&WGm*=om9>{(HaZ{5Ol{Y_^amF9I{o8FPOlFu!-o-Uj1S&uQGu zh2Er9pCGH*bPYz%e5FgvN+GN3+*h9MK!y2eonSv?9}-fiBbf$5Y&@@H2&0%_3#%A^ zix9kkm`sa&3qL%lMMz9SO!9-IIg2(0@WyusT>~tG=ikLn7WZ@c6~6eP%6Lk)XtGh1 zcg=Bd_$40U*>rGFlfM9ZMO6zyb|?ra=$@d4i3fcDHY24|6mjXP=n7&Bt?;w=`;=Ri z{{&&>oidvYW(v@bEYO`)n_8PICV5QGnG)$G7sIO~x~4Q5*w-W{l+1&Sw>16Hyo+tJ zwuB>>op^evILE9^jku4H6XCHtdN<5G1d3`3`<-<^>OmPmEe2;#=suo$Cve^cT~oRb z_eivdE9Xaaeb z@QX9m{%rU27enCs0FJmp>b?2w_7so4)e+-1|DEXC6khKUvn)?3G(IJhQ6dzWX}}xx za>d2BH84|Czr{G)mM`8Ug-#>A1|}xC8c^orRg2j9TNu{I8mx&VxuJx z1mH7Dne7>m#InVqkq=B%A3Ii(U%Kpd(10L1Ro1KW5JjVvfwZ$JXr55*NUm0&)AS69 zO@z5=XE-gZ+tr0^+&+Pdh`z?scp0>`a}7aQyD~ocRhvh$sE^VI=9t=WyLb*md6@*h z!)3k(@#AGSM#$jbAg$XN1n8;o!CbF98goR=>QzML%~^BY<$m+5-T8J<+LpO@zmKfn zR5MygfLturgzfQ4x3{?Q$9#<4rx_BSVi+aGjB{TxfvEGclyOyXEY?zqX)X;>i5l~h zr3;(6VT)=2w|JnT%w;DZHJI>mQ|s*YI*;3Z8;uf>KPKJCxEmjh}%o#@f8LvJ z&pZtzU_}&t6VWG6Fe8lL<*Aek9zI4P+{MHMnc6W+6XpP~Ey0jEZC&P~{5)FHDEW@E z7!Nb7^4oK+Ibd+KMc|dN96wzmbH+ZsX zUm>O}wPxZi?psb$c>8qLYjt>fx$H8Bg= zyT*vJGDmnCAkHvxES6sog%-Enul_>6G?1oDhf^f_865WSrI+q9^i=rEgiM% zQItZL!vO;oAsQrF{k}vJsnWceg2z=uBcK{#{X5N0$PKc>mj^rmkPJm#N+Y>Ix&Yqv zbj*C6PcU=+>7&~o#QT@Dn2S(|;JhF%vvUFjrE(p=B|`Y5)`jol!ols4ejx9uJA5XA z`roUcZ<6!G#7O2Z_+>_`0eE+1K8afz5mlJ!<$ME3w&kvvsg$yL9evj`HSy@B!_fgo z))eW^Oq-ibZA+F!drGwW<$iLxBz+x*UlMvdX zt&ky;b>fNT3@=Typ_nJP#H3Q<`-vKTQUT~5D{Zv>^1@&FeI-_K9Ckhh9K{hKjDK#- zMzX6qk&#h{V! zAAuPD4FM;w+8AO6km?T}qu^-~Pz`TYQR-3%HRJjvAtfN?BMnszNdP!ZkT?$&G>R=M zHc+io@O__|6x>sR{cubtTi8b^1y>2>=kUt+Vu}S#R<&~RRCO!6r5&!L1^xO(c9;r{ zYhJmle9zgfKJwN{*U+R!>8g0jwaJ@ zv5$?)u{eTfodgQ5M?1TzV4Rs!W(S`k!7>IzKfR8CK$@ZntrF=vVGc6Om<414lL{o5L1 z?=;}Gi1f?x`w4cVL5i?KaJhWv90;Lq67+X&+o6AAKL252t_~8?a^k8px7W<%_a>;> zZLzL1D4zuGgS76eJ>VVW8FQ-{Nx>?`dTHcgt{+nhB}8#2i}ZS8S4Q&P0%LhN(`9%C z?h~_7^$K3?L<^ZnF$7^ojArPM_~#8oUhryG{%&7Mt}@_;+kJ9ZHOk2(|EqM)JW}uYrT=%HqbsjM|1)1(ljXcP1t4t5|j>`?`ecVctXI_z&k-a zuaG6Qxu}Q{G#djmLwtLdVWN@T+l3M4#s+QN4R ze0M-l(I>-BIid5W*wG?e$c9=qPomqIpK`&wHv-k~Buk8^2Va-oT3HNh=p*9G@z4mlEYYB)(_W4Dh>vyXC%Ke4_QY{B z16>2bwYrql*;!^QL?*fXI#X3Pig+0o^KET!BuH>m+g}A9J;T;HIPtSse1!H;#h)|n zKAC2M)a9Zv@QSET1T02cbRL(NSPGc;H`iW5&==XL(d1*z8`IxLe%^WiO1h=Rl{SC? zGZa;~?UPBjOD8oJ-YR%i0ywSCl19O)&5^=DnBiNFl+OXj6w@27@|KP@DxLBR#hox* zBi$s9ve1CMD8c{{DZHY_+JVcnC(m`*EeMm9B!C!QG9IWesPfJXWa)}0Q^75C)o(F; z^5p*UORh?gr0l3#0f>{|Wg*9?MOzGfr4$=#kXOm5EniQm-1WTLLvV0ZO#Lj^Q(II% zy|5KbLG(yoiIwV^U)4LQjkHEy{hcDxGma&d)MIDC?kU0fM+#*{fu%w@UODeIVvYRv zyC=rG(p?5Gi?xs9c?)$G_Y18%29yi69XN8qWZ*L6;Ipc#-khIneYCjsw|ooEgVZJ9(4*5FvSLPs-;^I1CSk?^~cx>O%=k}(@Y* zhkH7ft9$WUBita zehN~Rge9Q4_+mgJKksH=%WP_E8X>*!|ISh23?0nXV6*(a)rM~rY zj^D+|e7u5P%Y0ymLzs(UiiovvyDj$c(flseqtx}@p#CyLn^HLdD#8nh z%JmOgA89?xjmr+SE`nem*qE#k)t(;&d)&L<0-#ITduf9iav_-5>1`bna>1XUNGRnG z0LO#y;3n)!Te39kON!4eF56ljf}9tJiSiD>zl{tC-lN`ew%B4OAf)B-<%@Qo^lQC% zj=j;Ov2CD%lwQ6I*RZo|o5t@kXh`s`0u$CD z-dnrhoJ{%1jhSE6u_0j`F{qY2)g{_j1v;A^s#ch5VT6^ImWu1>=uqM8pTly&sdaTi zH-Ma9OS)_VQ7u4#tLFCh)4}#Ot@(DP5iBvm3OWdk&vqq|%@h7Dn`s9hSD_h}xL~nR zO6?QKIt54A7!(xcyUb1XCMwhqUR#IgNjQ{}?a>T3a)XkYG+frvRMyuNw$QVBH*q>b zhv~k=dL(mf2MQ?l2E(g7J1Sgh?pbrgqKQ`10SlBop#;o&iZce9x z1&JfVQcs8X%_=!wh|EGO$%SV@?IK^| zPU106#La)b+jDc$n1J*B+itZwM%xnO09d3+;cP$u{5RmGjrb#G0T01t;>SlO{bf2} zAnS6fQcOnr7phJoD{0+r$oCS+ft(b!=42jVq*C_YQ&r6rf=%ICTs zDLmfZ31z-&=W#vh&(B|k9QXR{VjV#Y%hfcl!}zwI_>4|sOj9R*f-Z}oq#b&6*xa-y=E}Ev8_jwK~!z9DqsD0LlcP3!HoxjWEbF(@CP^2hCjFOUZLeB4GVu=XI|p z80bfpxjxN*#tA@}L93S+<5RVEs2jt%bTi+mxd;@l&8hcPYaMM4i_kRgA)@8L8)y{O z9Bzyi)9qH2-$9N1)9buFQo1pNRP&X^_@{sPifmdRZ$$lUo7Lv~*Gnh8)~bJOvL>(>qQnjbTX6C91_H|6*FviWmzaw;;XP|dN%d_(F)iqj=~!ZuNn zR@wPQ{3Uf4Zlh8Z)5HEWX_tu0_uH`kLVnxrt<0hb%s9imvGa(TeRj?H3@4;vUH&fS z54qlXQz;V1rV-|h^^ONCZ~U?^Hin^~!?pfm(81ai|Jl*1I&xi+dzXz+-^j91cWeA! zKmmO~chNWLEZKZUaA+nfDK7ORZYAaq1wh_LF(2rP2U;ZYK_gMEMOvwO5lDd9?1LV1 zV|n390Mq-M(QgzOnlrqHJOQk*`blramcN1Nlu}QbS-BAF{M1Z9Rqul}L?I-|{l8uD z0L}@6=lX2MH+?fJ{-bTH7Jj>Ct=7!I976u@Z6FRrx-kDivKw`L-7KU~eQKf^07Qo9 zZL5*IzYn3$LLT27*du*7HUIfIJWz`nWyo@$KG>bjmv=uhNlf9CN(b+8L{epm_En4* zNX0jz*bUQQF=_*>(W2j??WujLsF~zxa0=0G(N)EZEbQ4nU7m(xkEz%*GV>s`P{E6Z ze2z(KfhfKxtaE{am>!PRyIs@8#cpT@1(ote=JRx>HUFOPuQup}H)ezN%5PY%6&-I{ zqi23k62JXGYw&297kA>r(Er#u8)3GBYh&Aan;zhQaJhN>G^y2JZ=ot z)kR(3hK@0&Hwl3eD2L%!GG>I-gN&JKtTCP3y{AhPI?cha63FU-mYc1b@$#NpkoI3~+#4E08W;n%gXOwU z4ur5T8^QHNJ_jT=J92g1e>UH)r-Vy#x{$Wmj5o8JR`k@Wr*vQ3`$fr7W{lA?Bd2(q zDX$~I+ljdv(ufnSEE%T+ z5ShdupW9kzZ!hJ+;&I#j5PQ8_BTLNbv0oQAUpH1e3hOUpU(=D|Oo4!;2(tjU|! zQLXoWKWmdNGWnARQ$OD0+yCag8cs+ChpxAoH!RgB9sT7`(|qwy!zgc8mH6PI&?z{Q zSR*Sw>*FwkraDCSbN?U{59!Z4dq~jjOdJVwylaiJWo0lF6sN63cm6u!)%bJn#3(Y; zoYF3lE_(c547}maO0QUaXkHVvWg!2W7KX?ysYjnu$oH=a>^6&SiZ+gORHgKL98$N7 zJE>{-)-!*28oRx*r#5EmeSv*?>bynidqZn?_Y61*=A`fK^xlS-evFO`qR(nUN1SDv zM>wBvDMwn_5EAD)h;QUp5TMg+!efk!^UF#5h`5*I@jxfFzB9S{D_LSyl zYbwLUBTY>6+)_gMMfu=}-qW!z62@Ls6X!bV@EH|B3xp1eg%+ z=Mm?zPNa_pqoBUb6=&Qw)t@W6hjuGn4JhzLuN-9&nMljT_jF$>y2zKt;fuXMx?P_b zGmF8Zc~G!5bL0TCS7b?@lNsJ2LzI_R0NPb<`O&_}rt{F7wvq8>liCDtY$ zrf$^L{iv*__kHGbOG`D3Ou@QRKm{1049f&_hUpsK&dwexGSpNsbVYjzf_9)a+o%w5 zS#3Mps0%dYeaz}{RXTdBQakKk@uwcyQuuOMGE#bYAPZ(j7+CB#dstuBr@>2dE6>|_ z_WD|6JrngUcnb^EGxVT$XxnJ9E4P>1i^0{KCOj?nl3XY&SQJY`etGFjp{;zYg9PyH z9eU4IEe^JnLg$HGQZEdxoTf4LZ`JC~CwL*1-H0~R`f8g67kuwmGyHN>_-@+xS0x!Q z=y)KJKH*r+w?l6zC}8Xnaz&br%%48-u)1gy^^|7MX*8LI^ma4R6=&lP>kYbA6W&MD zyiIhBu-nGXS7XsmVrEMp)=DU8^A)3UX|&}nI{nd;*PU-a%! zerGIOSUDJj>~<gCMk0l7{2 zq(SFEHtIeML>?r}-%BJVUZ@z6~kJ^|fS$^%LyB52W{b&WSvcrhXp~_LEI*k=LQ1Nxq z!E_WS=AVMr65EjS(QQ5VR50i2L0T8I@BM-H@Tl@&+LPZ@x|t~X7E1Sc`D?O^;Tb7P zK;b;h8CPE{j`TnG`+WEJHr~v2_6ZylN;9BI^IW@p#?$!oTY1vtB4H9pFYonC$J-P>2f4d%_<}p%^aH04)GqL=p+Hg3B#hvH3V)~o6H7I9+ojsd&SgSk1 zuKt8y8lMRmCyns?$-jsE3|I!?PCHW^urpvc(8;Lq^;3k8QEQ&nnHyyGiP!jL2?H7kXn*$}LF_ ztv$|!A{keI&f}!2VS4|(bZzwhqI8%v6}#XPhx&tN=gUD6*FWwg!hn(^SSRV!NA|jQ zk~3UDvPgL=Ve59i_62bI`3WOd?0l;Yt%pbt$CDeXCydT+^uT>+mrrrbV_`yTpChwDll9n zH}!2`iB{*EO*?W6`MW{&Z-U>BkwWYPZ`F&t9Fkg%tI9*_vWU}b^+7s4`dm5a;cF3X zx1?_&ls{KG7um2&Pjfi8!Nh01yCUcjV#1?_Ft`{5M#f~kS*S=2K0(=$#AShDPEYbn zZ#W`Fw`fuP2hDSr68G=?!AcE+9->jf*^Mo>wutyBbUB|7y(>oUO8oMvkPz~wPb8ST z9l;a58+A1PbrDsW&l0~&5Ri!8`faoPaDaC-kO8eLl>Sy@eN9(VKX4!ZOg2UL!%vlP z2)E?0WW~22FVeQ%y6DXnLDB4bqyqa*&B=~H;26KK8OCfL63 zJ7Ci|3f><}U}EXqK4mrxZi&jd+3$LgWU8>M&zSzyWYgn;Y)mW7J~i#n2W?|jxGV>c z%Q!KPcRW-4(DmM+r*0aTrTe0&9n`zwJ#VPs zE~18aV%z-mp{DR&B2M@9GFO+T+@`-kZeRy{%OZ~7PaV01QlPZq6FAh%YNPN;Dl z$5V@I>N${IjjCl=PWgGENW&|S%jt)Hspb*d6^v7sO7 z?=k23lfn)72pVw{ie14u!CaihsMRVSUblXWJ2&zlk->x2!_dBUuXa^O1^bqtJm~H& z_aGX1d7E)9!-%^%wWrT`*|^N+m&3V34i6)p$sv_ST>F+bzoxqNij(>K>@!r+q+22h zyuG_0I4H0lXmG4fz~$P`ad>@>0`_^9Y+dtdaoTHayua5L2y1h~C}p@URC z?z-DPO**!%`U5r5&)rGF8MU!Z%A2gn#v%+j(RHQ#<>#y6^O)}zI5Lvt9MY6chIL9r z%zyx`Hs|Qy5^}>GfcGbpybBwwE;$|IG7%~jwXswL^~6Vz4y>Tw1~Kk-NMekrD6%o) zgNa0|EIdFo0I-t$jA=zRKBNgAxn#@j*py^}WEC6XhTDF!TthQfh4ZEV86#Qtm=?-5 zp=Gm`oj#pFjy(b5jb3jk2Re2MX!q<1S)N9tsSCWXg&%cv%m9pGITknen5)r2>5ZUs z!H?Z?IEDnmjpgqHx#{CtOO!~ys#Dl%h(}}Rb6$;J+SW|*M8>9b$~Nwdr3m|=0Eu*? zxc132KWr1Oa$T=FtZqL9b6=hB&(z=OfEWc`Yx?wgknHjdNp%!9`KX0?>jtK$2*IgYl=BvGD-??wnESYO{sCJ9hD#K z(DfjLzotgX@dPo;#l~kxLA^;)=y2#=F-N!oA1LEe9jX3ntB=ykw=^B5Ih4XY31b(H zqOae+b~ZwA2tJK3>W{ZnoKFg6v_oVfpEYI`Rcv$-&&s;Rgz1N?;5H}dm4$M*W?lJR zEJS@{?o5(YbM>Ag{HQ}3?|yn}dossLQft`+^klXF;cF|t?{J>`V}$M#Q{P7v&&1ns zZimgoGyu_{ZThq9?~$n%qz#XvO)i6;$SP4q=P> z?mK>MI*hB%?{HYsW|EZsDx-}4yKuw+JUrbi5jv{ZaO*q8BTOqp!|YY|m+~dcaXxNK z9>y^YA^bh#-Q*YND@2r3{2J@rO7-licOhnCEWAPolj#uoNQ58EsbZ@zmT8(Nt>zkCr z11uM{!@4rUOTE1T4-|b$Z zM5J1rKX?VGlwrCB%M4s}x}2o-hZc?oVYHi5{yo;ARN^msoX@r>?)2MC+VAZv^~dt( znx9<|P0dPY_$Uz!f6K%8KGwJzmOfmg-^RPy=SN2x)h&8Z2Qi;0S+ksvj6LO^-CodH(@_8Hm;z)Nh+47~lc{Cr*-;E- zrUXIh6@x15t$B->USKCg5*AtMYiaLW3Olr{ybIL5lID>=D*WzX1;d+kmM zyGf#(RkQjpCaqyIS{bW-iq7fgMbEZW=|Yp;2d^T@R8m#W@v3UjyQ8S14%yE#dgdpu zT-~az5e31cr+rrCLYc(BOc*hyZAQ{PvZ6NYvhFyIj;WfHj-m;qe2x=1uO?YvX)%#j#i^7sqHx)sx(^7%UqII3 zH+jo7m#8EUinT7o*=@Lnd(=hK-TPD~%q^in%(-kyUFN`}!4Kjz;;JszBYhov6|5;l zTi8UF~mno*r zvN6pEY{r~#_2SrZ3)!8sqkro;H0KPP%3&y2_%20aSy<#=?IpT(w7eySpa*RfKI*op zxHM_;`If_hOC&E|YN=#|g-yR>h7_OPDNH1`O@wd=xADdfa10&%~oUx3_9Ju@qI z0XknIY^TW+tN4^?5qY?aYMgI#dlR8HSDVgGbsz5GQ0ZixEc)Ss*W2WSQ;zgT5ZXCA z7&Gd;)Pp~{btT$XJPxXmg|B1HM02aN-QDew1E;R4g;ai=9Q$YXD*>LB8<`6I{<(Lx ziAtPsJYK1j{HShQJP^8e>n5+ns+^Wgx9Cqj?BO$g)8&VTa@yl!zqy+$xYmb55%b|I zFyM1(<5yeKL+9tF5+!SIj-sMK(sk0*(rJMJJ9XSn>MX=kL zmt;gc4MpkM?SAF&F%@c4C1I-4+uDq?Cz!cKiC-VM>f1txhd_eF=GGMI0ydnOf>NH=S~)9*P*Vj~*7 zfl9*I{a0dXzq22UZ*lF*=vb`NqM_GYUl!3hU?iy|^j*|g!svoc8TJ{P67+=Hm-aVu zBFKhc>!pd=#X=RONDo|Q-Lusd`M<{UCtIT4a9~Ee$H)w!e*|Fwqa|p zLa=YFXg^WtHHXU|4n3EU9NC>J1*B!kAKv`vI7!4aQZ#w-mgj7jmm3^0h*`n0H0V#K zV*E@f>it4OnSNKrBjBLuW6x z1<*#GpuVua0Ef@o8ksysHZ&y`m6uX2r+Q9^``sETHbuJ-Hl*TS<&d5z`@{*v4UFLujmN*WMTQoNTbr zukTFia4Nt4vo2(`@=ETB z$6VL7A16jeh*xrUtj}Yn-mhvsPA+yXj~`Fw8l2gADpWj|q>{y4Rd#fuSDLgKlf?fq zZW)(3%04oM#jb$o{!dRU1HM;-KOewM40;NKU{0C6Y5Y&!#wXFNtW^k(#?~;;AYD^w z;3)^%MTHc~TZivw=mdpJSUMhP`q;Gvm1DmL>-MyP0T^!hm{iY{o}MM2YFg(YVv%0A zMNpl^Nr1h~S=`~l)0p88*(9g!f7Ak`80o#pq^;Te%1f`tJ^??JwBjtDlBsj~dYL_< z`z^;fhBljmvt`(J1U-vH$z{x@{`=qnS7}0t`@MSOPO2|;+D=l(6ZSz8z)|TMCNWht>^w%UZ*c4wpU-eBY6@z z^k>M@ljekn_rCH2$kLQ5FGH1{X%#fYWjyt&8%1zmo4E=v3_rNHPPkXI`b)Ns;KcK0 z6|Ia6Va-Vgz1a>FS^({Wy*KXX+z;)-J{pw3M6$dsz#h$1@}=$Sq#1^{w4 zjw{h73LK3=5t%*Pdp&z2gI3}RGZHZ-cikp-eY)4QdSm_a#T&iZpnZHsTc+Ys^eokD z3)xJ?f?taDs0q6#xn~wTVd?c%mG3jf+9+ht!H)J3OP(=|y>dK)y_4YKF7?6HHI?*x z#ow9$XGq<2QNQfWp-SXl6#YlHYOQeB89_j#$MIap#}kKDtd94!{ZqU3`-}&-avX{x zzVc(239RP7P1ioF>&=jzKcCGH;WV0o1I?JiMtVAR^bpwedvbOlu1uAK-CRLW)Qu60 zl+4-bD}kRVn)}4k?TOUYmSWV?ah|~h-^Fx? zK&_YN*!b^!|AVNd?p;jJ9*QrD~STmXs-3!+?NLS8xY-eTH;K zdu@GYy`B%FH9bUu9P%J7o>T6@!lZ+Up+&15_;JrKJb~#Y-LcG(fu!e1p>m%f?HtG7pdw4wqNE4MFr$4t(s>!<}$?& z#;2BgQEIPy?4|Xb-rB_#cYBc%rk*6f5dqMP7)|dWy(-qk%gb!4` zxb$q+R-Kks7a|m-Lch7LJJyR`n;Mc8dby+-Lx}OVCBBIn^O{jJM_XNDT~A1eXA}=|?{er%hzs+UP6UwtW_Wp8M&P)E2h)75U9+ zPMVsmDr+U+JE`$4mDEofTB=^gI2r2q;Ar=bS2sYkQWRIBq24luiN$(lO9{AT4T~;c z#u8yTOMI%s!rFn05-)3VXESF;^i z(+kI^M%tIlOPpQ3HQxDgjZ#0E2{tDg$($k zCVVo5nJSDuD(vWqMvFNc=A{L`Ry}2@3dh|M4$lm(<-f~aUDq;jpy&4NO>Na+_}c%d z7!iLARl$Wi20t*pEa0H0Catk=Sv)i^;|v7fv%vU^6#sz}`~(Z!M;-ux`J)ociWd@e zsOTd}xt$0* zSoHwNYbX(`(-&{H$EA%!?O$u) zV+8&wYJd?a(Ec_K{~x#gCDebN2)V|C4S{NqQLvf(=Y!L?-K}{cINq&YAxM(980sw^ z^S?aoFMj};Ic30XXj~V^F)o-c@~y4H9g?I2#i>a%_cF z731*(m89>WI{+V zCJRFOgJ#==sSM6`?(BC}HFTc~U#jhqEB(|K|L%jl7sWdMIQfdNq$yG&+1i0K{fD`a zLnQ+$*ugr1uiK#y*60M$ROtE;G%tXI{C5KV-z;q*KHK9!_TIu6&)cPFjy?#8!5FWL zRHWA8Xb@X)(dT}a+WadL-i4p5X?V0>Wz>-zQ6}K8`}E73Lm%Gmu0?;XZHA3hhx4dW zzHedF8Pr@?OueF?{}0aiA2Y@P!QZf3B&h}jrTiD{;q#-K0oBRrnOTo;!4T|OT}dTJ zUB7R|+_U)qaruAdN0E4Z->L}R!Y@rkDrUaTHEMX?wVd@CcqD43>*RprOEj_)$Ad2Z zUq~f@pJSf54l27;9!Y>JHvPFk8S>10s53!8VVju{vc$cqe*dNf0pD1$uDAqOWTAWxeZs*^tmOFQ6Z~>)yv^I%ea`BBQPGt8>(^=&-#tiOb z_J7CZKUF1pFbbgFolHzYjQmC8kxSSy0qfP{ALs1HJ%1w5|BhHlCwVtMp&8djwi)bN z8{7UDs8>h-T<^bO58@j;VY@J3;Zma=&6PNruKmpG0LUCgFhZ7i61jf-d#!l~u8#gD z_iCp=^WTu8fg9l|>VD6<)UV>5eLJ{y9AEfmybMOO6%Z<4(w13aY zCy5O}dC{=R?j5l9J*X|n;3ck)pFF{mQ_@|w+nmY2v21tAuvdx-@~)H4UWJg9VnZhbPUks71d~e9bZI|x73JCpe>IfW4MnGU^x`)z|5pd$ zKW)crpfqVuMcUAQr5z2TmPY^s%3!<_gbmioIllVeSK375{P(*6VDq9_2K<+=O!!Kt zxW<7ZBF@B%nj=jf-Sq<5ytT86PDFHC67TkYHDbw1xJebQ ze=*%6TGKpr`TGB*$;cCtHu&8{67Ua{23qc}y-5yxklyu3j*aHkAKcO41b_c0^etP0 zF!U?E=ySJ<0Tn7-n2V;#r?>v)S)%G1cvXG7Bl_h(sH#nQQ^#;q&g2+vM!E*1ZgEmw zdH!BJB+_t<;5y2o)C{{apizVd1Xk?;-T`b10SMnpI`!--CdiF9<*Y{x(1Dqs^vXMJb)tw$%1h%~&;b$59`c z+mO@C;<*FnLm#@QDrY`;_wN0k7n1#rdz}NT(6?S&BLw#WKuT#DLGqvf-ze2OI&+{} z@enK|Hh9z>5iSz64^vvE(U1oc=R#q7Kr!O5t)m17U1B!;EvDG1k5@wrF=&E zE92XzH4HzxUI}uTDgj_Nuro3?p4E)Tv9G47a-1vTA~#IxPxl(g-r~55G0( z7A9b5@c8o5x|y9Ya{&kkb`{R?w;gE!A(SPIQqL zprH2#z%u3;8kZFK4>~80ZBEZ{=@Eeni;Xl&^S|E6z6Guy*IWEy2aZ2PI!64^YUFLg zs#}5>WVZVPVVYumcQN>sLeta z0}4WJ;a#{>P(u6os)Is^Q}2`ek?y93%d3ey;_X@$6&d=v>NkhI5(0ql;CT zFDBTt1gNFrv{LfN7T12l75M+y`_8Z?v#sqJ6;X<)ND-x25D_U-Rho#PQk4=qq4%QF zOM=J%(nP7!6(RJ{L2494q?Z7p2%$r0p$7s9-^Me}j5FuVdFQ&mpXZN;EAV7zKWndb zueI)bJ>8i3yMwEyW#xLY@qT}{TtGr1-+X_2IM&-lFK)mfyW0A84=yNHL$qbWuu5=a zdE>&yV!*?)sLM1?gt;eYzuU&L)&3?*4lXnQ$gy%4J^=Vhwp+xMjh2`oHVUJ%SZ~3! zU_;&&X^M>I@vaKGd84(u@nL_`0wfS2uwaZ6`XPpIWqjoua z5pla$0JF?J(kwiGnre393j5{j>4ag$+Va^8prve9<$OBQvh$0ltl1LhuN>6TQySQ2{M>FSwJzK&tQB%Dke{{GKK;ZE zeRYn$)LXJHS2vy=IJqM|uCx${dNPr{PnEFnpM7re}pZbc} zo|~&!+^ z@H<=kB*BWyo8GwJoawU02Zc`OjG2xNnDDY` zXhZ!swN@`Sjkj%ZKK$`0h*G372GC{>YL?1o-#A;BJM9fvW+2lIfR9MDLoQCoUJmlq zT1dTFaW>^l!sDtrp2{YHdjDK^0b;} zipJtJ!^(+6-!1do{vT)h?pty~F#J5qw`^c~UN2V>M<4SS_lcTZFrKzwx;O zV$$7)XH-=#WIu_^mEfr>ItqZHG_tBXX~1J9eUFkT{~beA%Z#QE7?c3=oZjU;Dv_=# zb3+$q7JuWBN4?K~{_X67->z00-R}+CErvtvk|owfi))o1>fp^&aDZghx6LWU(EE}F z20Tf_g$@z$i5A=QxcXH`$<_Yv5`tfM|9Q@V;(Eq8k{l^-*|aFN@0DWX#@ALAL4qBk z_shes_z$L#0(cZ}Wu3s%Irb?AW4rHv*Kwc?QJq;cLl`<@GnH$%s5_qdkeM+>n)g>S z_ph$%7=_YfOcnJGu7ebkXKKjZrNSCqzH-kFP#@;X+@HFr01w<#H7;)RlB?&P6x*kd zz|qE?mb0)p@gtM?7Kglk+;S|K-U5|t{>J!Nm;oRv zIQKRj+#@bI84I>3r&PIqWNEQ`D44o=Yf3d?$2Yv~r|+ivZF8RTQ+=(5sXOw|=An+| z6?6;n&t}hLzPWfbG83GjRS>N9nO`6_p6GwX>hl~M!p34-@< zt`ceJMOJ);TP;9SHp6c5WHgs@j#^wumUV6AzNmy#>7@W9^PLJG)qU1n_BlABoxdVe@pJz|rb`6}%ON!TjgHolX-&TtZl~bTrMva7epvpkvkceWSDq)Q z^y*~fCzo1SO7HaxH78#Q={}TJ#@33hb<(Tq2(VrZY8!OoQv$ujQVZ7vpKxScLa`J> z)n$&tJxQo2-N0S@q%;c37~Jd>J%oMw{Tnl!CbyooEyClTkt^qeXWCuD{O1hgc53Yt z6jjT>v&Vm$J@m(OaD4fiZ)T3ou&Sq=;Hl0=O{oWunyA#--E4D%_)3t+Fa=Opm2H}11Ve6~@__(mR&j6(@I#V%Tii7U zmGz{wFBQLk>1(H~1h+kg6p)AzF4=6|p>S~c(@gB^1oj|>tbFU|Z|6IszU z8j2hC^w_Kt%(>?^*!t-WP^RSQ$EevvSdN`Jn5 zQz-6|n~|$6LdB~63PA~Zjpvy%R!G%P%V*+AjN9Z-t$y3hAE(TyDq1(gUnO`v6abtU zTKyXmgXe{c^X}n;4RTLoUs2bqZE<2SZx3n+%la};HQ)dJ&V75;o$A-p>mkEGTFbJb zoiKb>xUpzJ6xC*Z@7~{M-WL@ooF>l>D9R=~<)w{c^MK8ft66teq&TD#@V+q{dKh~i zn*>vCnhE;fDlrl_eKY-yfvJ^45?p!`83W>=0_=M?dX~OwDVCrW8-^q*X-=R1VXNGa z92jP#D=6!J)YE!!OTzbtriAGUTR9|%(@l20uS7XIb< z+W=$nyU}iQuA-ZGGDJX_CVH&7+a@$W-vk;aT4bN>>4@}VauYB??Y7{KeH3}!b-w}y zI;mwzBk7pE1>~~cSgOFnIq%oZ|6;@cx-4%)qfY@3`(i>7abCC8f7&#=#{gWM9^%7( zdeY}}%^J^3yysNMIFMvR)!b@wtk+57y`?5@7Q!q<$b$yL?UeNeb6(U45@4*A?&~Ob z`pny9-JZo}UO5l!F$1tZ8W3nf>*O-r#nL>ik#5smmZ$}XB4&APLLB@Hvj>WD}SOF*4KIR81W-#zXAE5eaG zd?SM5cyH~uXh{maYvyNa;c2zQ0?n?T&OA@$Fy#Zp6;KW@1x@(*mZX%~HMi9M7I{Q$N2CdaF2e~3<2~1 z06QWaw_9Cz-kx)SLzQ`wsM1V!)u$&o*IJfP*c`%Y}yC2}CStE9iSF}RvV2hd#|31mR6n=$-MjyX&r?8JkqHa0O)(F?Un#U^j^1uts7=lC{G5>)`4HsVIN`YiS74lXQ<~0B9Q3fjB2s*?XcXZ|-`2V0$36q}Cz0 zxm6NdbRj3N)>F&-F74_spU;K~{V^>84Yex;JNmB;#DkOl>Z4r?rC51#AFpFB&S z3J_xF5Vhz0^kRa-Sgby@kN8EG)S@G#C+7-X4YK!Y8P(8P2dRs#X7Tg7f=k4;?}J@}nujxe)dLeR z=z3rj*B^w5`|ituI&?r(4z%*iUv#{d@RRR5W0H54QExGpmwp3~ogi*H>K5H%s_@j& zD4j%C6sgr=p9FGDU&4-6OHlnGZC2M^(=w6+4~^G166)3nL$X-XAhLXVqZ8=w7TXOT zJ133k^>paU z8H6tLjW|(`2a;EuAycD*3a>fd+9#eVceLrK5Fk~1R5P;aTMQi#1hnaZzJ+(7JHg;_ zzQ+_aDKzuK?HUZ+obR?A%ZG$>`q>e}Ur~1%J zKe`fZJlyQkgQl2kgkN0$A(J@b9`xZk?K`^$h($cgdO{X0YwQ^k;+QBd*wAT^ODarxl!-q^!g2ro%E9kB zD7)=;0`J)!zP+#U{q$X+2BAlIf{05kaKf|?!G?FMDjd&hmsYh>K@oXUWit7D0|wJ@ zw6M@ofTkY+g@k4Q~vz*0B{4#lyY| ze&kaurntL$@qV69mFs+E`(8Qi{Yk(5zNu;s%u4U$0%5qg*A{0qbncw*z`odj9v|u2 zR$@;wCOa51(;!D$bnUOE(i*JQzy~GTiStf!dYPtKOl)r@eicN0OYOlwk5xyFPX)x) z<)1hM?*%ISVr7;mm&`BS*q0FQ@eJxu``i_FN*RFni4S_gQR-I3YT7Blzf0bwsG4Zq z&Tl2%ve2}>#N1HRaplCX4X<0f^uH1)_NgN(`jUAdiJ0{(a{MICDfl~{-dB2aF2A9! zr|_>dKLeKNJv;d=0-fna4(d>E4?W2JAyWiMIrF zdc9Lya|iV?zS1Sm^5hY7J=Vd*e(Kw0Cmx1`4{La|c}Gr|;%c&bTtr zl5L#KrFk7D1rpum0D2KQ-0N|~}#a3kyDO3XrId`{JnqOWuDB*s+c-xbYHSOsg zG7+(Pi$Nh)tV}&!`nT2XgWC@opsX{J{xeUl#@aL5#WKV7zBIR_p$wpm$pIZKZJw9u zM~`T{(9i)wxX(Zysb4}!dF6C%hQA-Bkw+~StwW0cy-+urbl_Bg$BfIlD}Ve9_g@Ym z`fXbKxOE>rimQzFLXTa=-DOT4?z}Xj*(f^OQYvjTGf>la(7QPOF7VYoWna)&7&Y!4 zjrw5x{=^LadU4~R&Bgn5pGG2B9{-3ffA>h>6^NI;jiOfg?SuP;u-gZU#Y^7`gogiy zpud~?4}bLi^!@9)qf-a=Q&X({S^JPDfF#l!D;is$$xXAiO0D4jH-Go%OW#_ZU7-4;4CZbM`*c$YEZc4B?Q!G8ZZVJ z)U1R9xBIlm&pL*~tebul0sJEjOy zG+w(KGQa}Ld%k>Q9e=ME{!nZGA!q#dEtXAu0UH$Oy(OpjOUEg@G(t=5l7LADPe2lR zCfA1CSGP0?o%4AMiqq>R2X6le{{Q~c-ijQc29a}HdEFsSi}ez&M8FOtZZ_n*lvR<^ z`CW>=MOw?eB1Jz&p1;3jwf39U*-JpTRK#1ibA@IIZc(nQQks9|48Z@{hhjJW@e1E` zgUg-D63BIVdui?@nxTIGWJAz~#q=zY;*={Cu`B<0g`)#FN2|3g*0LjB8nbDj`hGLC z9|ws5ZN&!(ILH1Z35j*n=nmcHkfTR_%pLyTsFrzs?~7Yz;#bc>)b_=RCXx84W`6(r zi%E}JT59DLj1iCH03og3MWxr1A zaZL)%lzaeWWse4~QU1MY{^{LM$sa%$p_H&Iqe;t`k;^S~W3k`bxPNdWp%)IZtU6W; z5+PMHJ~pqeZu%cQ{hwXKj}GVQQ*~ceE!ewr$7RY{;Bp=(IpXoA&IOY$!VtEpkDp2Ik{Xjl8&3E3Vy;(gW zkNyd-=7_{aUwSPS9B4c*$j@u8*o(C54H+K-)l3#uxFM*k7r6+$T`6x)xHNe#{gc4| zVIRK#%B@|-ekTW1GI1C#lP}XNrID8y&02Szm!`^df*q)FI+rHfRh=IIL0^NTbjFP1=OeHd_8RseT}i&K!X3)BjI?r~rHQ^NbFqw(X9$59kF z`DfEX<$U1yciy1fV;$x`b7Lr}!ly7V^4rv>Cnmcwm%RSLPJmV~jqVNocjd}J%mV73 zF5IMGbQg$ju+k+`5IfRTf7TL*Cdla|DqdfiHsQwQh@RVtv11IhYX2Pgg zna1AE06c`M^QvBtb$|ICkNu~m{qEd8LHC*J0f@o=4CkdmiUc*nD(i*@nYf}A*7`%P z{?}OhgVW(>e3T=w0OXd!&TRw;i$8h>12V@bFg1-SL+(2*{=Y8=ptxNIaHZDetAGyA zgf!3?2Fmh$#<+iCgO6{0Sgb3!{*wCKk9~swvb**C<_>ioPKyAAWe+=#PKlcUa7yJ(^EvBelNIshhNHFeafBI7;`ENE7+^7`;qum$Qb9)3P zf`F3EYSI(R>GjuKpZ$%&OcL~4#k=+gFM&aA*j~1@+qgB4vxolK#WjtlK?9eFJ#X-e zwlc~?Gmly%=!9Y@b^ZPv1gu*}*_obGfiTkKPe9&j6#8uJ;=lR&|LHDpNFB`ug$Dli zr6;P*=0y_76RPl&romueSgYOd04UV6ZOgQDdKcW$joXC~&_!pO_;CIAsZ9SprYyVs zt7kTjgLm?LV&^bG$zZMcR;IN!}YY4TQFO%LQM0T9*&u^ zG3f7i{fBp-@>6TD4Bv@GLd()$gANl(-|;z^x-g z-RebVv#KrDKASZA2b%?kYSiizB5SeNKP$AI@+WD}2!W|ho3-jS_&Nc4U!7~YjS(_{ zandIFb99sbZBP`d#^1vI_bvL}c9-RR0UYDji7?BKt*QibPZ|}13)PqzPc#7;jj(&~ z5iq%~oC1?-Bnx}68K!+SM&(EN`L|!j_dUlW;Aiu|G$2p`hFIN;4rF_h3y|3?9rPxm zwnYllY&st;{P^i-;j>6k%s)n-P+(dK2!Ha8`H5EcoP9PJ0j;Y1-g9E^NO`GQ|c32MVee$u5# z9s2n($Nv%FKE)i>uR0A{Y8V$%--Z`ictaSBpnJ??f$jj)L7eZtn1`udyifi@+!rVS zW*yDAH);2$agIISX=m>QQxoR#7i^QJ&%Er1?$h)D5puuBQg6CnAO`x+9?)n= z+_Xf>0n+Lmz^pkB+8+vG--EJ#H<4b?d99r{u#s~oU!c(Z<7bsdNibIQ(o(#2!%VKg zHI7%P)(tB_+;mA|6VX7^kJy zow^^9Wf#+!1kacIRK2*)$5Z==3%hrp9MUmBaJb`xu3p$P1f%IhuE0VQ?!(^(>}fC*3vZ7;Vn;<(ScvJ zl>hYerP?!w`pi`z*`MEY=QeZMiFoq2W;MEYpq)xH;*v@>=mA%lA0P6+Vv!-DzBO+5 z_RJc*@=au=-xuEma#`YFx{m{y-riVcEALF-IR8{plv=Xz4TI@wz^t8z5%Pv!VBNR~ ziqoz>w&9L_fB|Qcj#Ag^uv$gx=(>4Fq6sV0BiVRqR~j1GlSpQDAm$M5@A$H2Ei?v~vnkp@&XT96fMS<5ucZV>yEhIhoImWp3TM zBN%a!`{W(1Ysg?@O~Lc27iB1-uFAYhbPCw>2rl2j3Muv0#5$yX#7oXd;+r*wYd5mu zlb7^Rp2%qm&z?CBB8n70t!Zeu9H8l=YjuC^gbWGyK45PPQ7FwCKC(uh?dysVM|0Lr zpmXKIb^O9GnGP?xocOk9W4e4jUaD%Tyi6_*(B6m@b8>L>)-7nxuUW6Z+{MXipFlpw zKUCOk<5UwUJwxWtD+}wDpSj9xA@re*vP|J!_)_VBaCKX3d3bc)0M(bIl=6t^sy=P} z&ZF(BsitPSZHK+D<^0W`M6lWfOzPL9)pe2ZEr-4F#8|o5z7l(ub&t35L$3|o%cE>& zYff*Keq6iTGK6|j%$h8rK~7o4H}k^E*-MvMPj9RZ^^qhW;-ITvddV{*8%%jl8yS=B zf?|yeyT@t@1AF+?>dR5JPS96$u`pv;wXEX;cWHBZb)u3Kythms3WfFibU$y(w+%sZF+)_b~Xw zhXVv;`4K*yBM!u(Q%#Ug!iLDx0S8aF8_v50k3&U2YLkqXdwTf#KK8$&B_4*T^w8bT z%aE|_h34JXtl_YIn;T1HYa!O0N#Cp1k~{cOrcLxqrE7=hX#u>Z=}S%eRicbCcHYUn z8Mo8T@|9K8CzQ#V#-@yYcU`sy{s2w)6hXFO%pNB#9-&HJx?ZBVUf+9w#KCs_Vvm&Y z^MPt5^oKW#$rpCaz6R%4b4(XLrAlB4ttm6&tf+CO@9NBh7Sn#V@nO+$fO|xKpsOrg zQcXGDN$7d-;D&kULsS6{W!~|!ms)B_Li5BtM@}1)Ox~!c?AY9hrJ#B-cm%_0gIfL+ z1!C0UwtPjIk$J^##7V@MN3VfTfAzf=jhD4^`K_{<&&*SmcuEfak}1oK;3%*y=+}6R zF*mu;N6+sGo?z`V{fm;CsGIdfY2D!;@B_fIpVT<|)+h_fwT?t7m=p?cD`0}#)byoO zsR~u?$Z2<6koNrG)h|_gkkk3!u?;U;`yEP22_gDJI2MOVn;*TNK`{R>^I+d-}jiD(1TK+O;IfP zbSZNcr5Tw%MBvvU;R=p+dv6bEc#Kv33?bpkH)MXU>Y~O*zdA;! zl78=T$B3g{gkPXYMj9`b2%VQEXl@$~%=E#1Tzopb(TWIUg0-X+n&gBbAMRN?`To<} z5yfQk&ei!9NoY;j*N2EK{3czf^B0 z=#C)rV*w@7hHo*uWOWhmO#iZY=NJBpulVBt9D5YP4-Zm+YX=5PK#Df zh7JLfYX-x)7~I7!4#9^tEq1RpR!XZ^)a=xHDy;iu*?Uzszv~{)7WWoIO|0ED??slD z4F};oJ?x%vO$3&FVX}o3`L2UD*KTEP7lz-ph9-CI8Ggk%c|i42E_smXu1vxOGLn`} zhFGAGyTp;s0k^dmLY^&ArPx8&8B$lLG#fl`BTD7-OV!C*-WDvcD#amH1HW8^^qp{3 z4;2eXZ6jDUOK|U_T&R;I3Snf2AE>W>6K-EMADl2%VVklFvd!n)+WtFJg~4=Nn;GYn zXiW!2Be8GTGb=$&YMV0UqG!&bp&ojgw3*z~$k?sz&H?qsyrSb>U=h7LxN&3n6x>S@t znj}5963WZbe5uLB)hG<-E;x{<=)>|AR+F?L zD4(iQlF-|5a}W0(*Sz(j?apRvQaJ?pMtFDF!F=fd8ZdI#%mJmA@XxstMDLRdDGCwb~*Ln~!R|P@Z)C`4uB5?<^IqEUkr` zSTdNSDN}18EoL!MwU?7|M=QV39`UAGmVQu2Q;Vu5xqXkK?Y@K{?)1c*_A65{2AhhR zkVd(BpE6~94Nmep%C#d_^*NH+d`O9~M&6dQ?Ybd4WKjt{Gf!UHh4OR>And)i=Da4G z6zGURXlCbGZ9?r)QU|LgRj}p1P;LayAROZ?Sw)7w9>E*NQZtDckiUZ%7TAQ)1p`YZs3fmUUvKD=V$Y35vDA(G`l8~ zy4M;@qS6B5b(P_LbMo)liZ|Vq$vLomPFSRmnF(bMxg>G~Bh90%*t52%Dtm*a9eiOr zIFccI#7g4cl^K5g&MNY#IXVnbG9dMH8xdy?T+yg_(XlQ+#bVY;r6@BcV$czobvtE# zr7ZhVN(r*Rj<5@+#nf|_<+X{>wfIdf0Z+QI8((|*{b3ztQU&>(AZ$6rMF7&`Jol;C z9&O{JyU~$facN6grf#Nok%u0mR1SP*I`8G@I2N;BtOzM-&6by|jGnjDc4*yA*5~f- zAa9DqWfY!-E)&PgT6a=PgQX!xQ|349Zn7g$j)-~AK2inzVK2Wgacm&J;;_y}jVksa zX>b@S;b1Ta+35^g*vYqB8p0avc_|4inGUThhQi}kY{OMNYTtC6K|;Nip@$5x z&>j_9QoQZU!BdA^X&L8bxrqh7P;_T>fa<>c*NS!Vacs&c8}>No2;%F#N<{*YrT50VDlJ4+pBol_LM2DDF7K721*du~h zkv<*DbbRm0H9baN6%yv&ba!P>+{`c?k{4s)U=dicMWWadgc)OM!&os^HVMI+y^40a zIHV?*%FDRd|X1O?t_$JKohJn+m>kWAjrY4j!tm? z#3j{BOYzf4nd9g%=>A)NGe|5!SuDZ;rJ7LNvG1^KZ(P2LX9=BsuyL(x`~bYorMBH! zD*1J{j)nbeN3U%UY`Py?qQj1hSlmjZ-8@+d<%q=kDD#}Ekb2LKU#s%u*FdQ^&Us$j zu3$XS={3bN4MOBv17iC=W;6gt>JWLJCU;tRVFE*58K_UC6B`so2P+{7U`Hd7a0Ede zLy$)AhRkEbney=$#VB%_r#$!CN)ANN&sd^o9>Zq!*`>?HDv&m=e?T(%R{NW zAABu6{xqvDv4!B2K;@2P*ifPiuJV@JfGk&a{m)%qErY^XwIaw+K4r zY1QTn6h+f(S0BeORIh*78(A*G>pxMc5uSDG5wRr5S1~hP z!jQI;Leu=7Qo`}~X$Cf=MafFl7s=N>iCLKwO)0@QIWww{o=cbtGSRsLX|z_i!SFQL zRA@xCsX=sv%%bP1HHG-X%y;f2FVaWwsVdFW?+`yqF)C)^=$$>`_{-?4kGM+@HXdt! z&oD6+7$zNJ$e+=2IfHyCsg3@jE@uI@dd$!Vk>Ob)O;Xbku!UoxKD0G7iDKQ_3^-?s zaD{H%U`Dl=3CcpxMk`-B>8E)}4$KIrGsKbe;;!Pr96h8mG!g2Q_jDB!`PNtUX~RwC zd9^Y59EVW5e%0ogiaLV(M$PC7cTyg}e`oHpP@(BE%G#VKzPOAxuYc@tNo~@o#BBH# z# zB}LACcm1pZDf2V>Q?ad2;jw_Y?)G`t3GZ{q`hYAc?8=NGT`60Fdj^hC3-u|#Ru7Fn|*!BQ%dQh zD`vhyqp-j9Sp_c08gqvIk~C(`m}<4kkRL(TZ?wEQq7CIQhxMuTEs^Vqq=r*=Zi^it z0I}tB-<$9x#f{)iVQ>TRdxl!lXunwG3;`Rh*DO1=hw}mxD=a;I9)neOWFYArtTVM| zp6|xbQ!$r@^c*K0Tfpc$(%3KvCSBM+B>cKH_v&ZiZ6dQ)bKsn0(Yegm4}%{wbDaiR zEH=R5hr1lpb-c7Af_al?Ek}#lK8wa)z);31rMMZO_&*VMYPqe(h@^U9)HF-AAcdGd z`@%KJ_fx=kd7|7%BoWDbxpYTy^|!%oB~=`@SC%n?%__6Qi|rBf9WHW5)ZbEK7(|pl zCW6wzIif(2J(}UjGpexnZ1^f==+Wwo1s3nqZu89CamCt7WW&L)KIp*R^)a>t3wrOz zS)Caq5+UZ5FVA7ChC8qhvC;<(UbJ<%y-nMYa^A?dYmLk~@dJ=fOzwvQcB}wIq(8%9nh^hi))*X?`$>+FX7FmX)@gJ~Zoj1yHs#=#abqaz{bv*p!Z0#|D z-YGk3O@9$qxgK$D7^g|2vWG|SLG#TM%p*1$O1hUK7I~(kwzOK?BVM0ElS@*cbVg*# z|MHZ!NFf*!J`V|7&yBz-;pHUUcc`|?K>6JYpPO~RO+4bV` zl21pW#jQu<((IPH`w%N^p}lVwYfUM={c=6uRGNL=K2kK-5ARHWp>lR@!}Vy9O+!IS z+0__&E>cH+3nj|qj8BBWUTkyL)AW0G(pv+3(I!==J}uRInI)F5M|oJEPcX_|>^{k| zj?{gV`q(;1n@~RUManTtleC=scIaKBDvIe&E&nVk@L4X4cUMsnFoG|{+ie*Kr9%h# z(p?L$wpmnZ7|wdXj#))>t`4fq+vrcrc4V}vMevaZu5#i|9xBsO=;q9{6Dk}lD*2`D zQ%K9!y5gXCo8hZE9qKJ|ElzNTrMAc(K?Ral6EwXeI6*ErYITZJm^X`FZ2>LO75s!L z*Q76O*xjTLl(hOgJNUK>2e~fl`+YzJt08CVg&!`kw2ErpbIN1304F7xPF;=#bEXs# z+=KjRpiUu(c~fL1a`law$awhaR6aEO7y=@a$BWF?4RA8?wk2$aRthXu@8ILUH}DVaSTTebrj_LrP5z*Ts%g ztz91Nlw1<_l2Um~47vuFN|T&Nvqob+mari+{w7^01DU|hk}NFQlSdlx#=2!`Tdu*dQy-xQ3ad4U`hzICK|ap7 zYv=s5xFPKNZKn%*rsjs{%|@#qj;JAI+FWa`7n3(Y^)06<`(;{R{-WOs@eK@BQO`?M z#J8om)^2iF2rEiqWpz+)@77ZY*$sX>uB1irN^FjM_L_M`&Er0jORpv4n;11A zL=kOpNz_DgCDl|Utb_Aj&BmzfX%x}+#*^LWd!NjnK`z&P&c0ZpTyR}@#cnN1Gj~Uv zi9Sf$!SJdteNs?dHCNBGoX@?`)oVht95$LObi{Kg)Uolc z!ASQcH4P@j9ylQ`rDB9?1PevZb4WFn4RJl?g(_FF>fuB9KPJkhr)oze%8#xNea(Ed z9|vrGJSF#fF(Fbi`o#}4CpL<#_ZYW7jeB-+LGpR@c^?g2tl=v^+dsWg-s$z=@@&vx z>j?JU8}r0c>7jW}XOgwt?V9aTOnbw&O9eST*4pMQ^eV`7DJ|_%u4BE8atJ#XH&LE7 zv)J+>1+in^sG>Y~c069I5WZJjlWgE4U1DPJpcsZKM;VrR_=MXbnGy(0Lj)d>Dii(5 zY;g|Gjl_?6evQYP8i}KQpHrsN5+&Ys%VduGj_Bvb@7d&NN<$X(d7VVB9-*0;RsMP< z*3iCmE^t%Ktv=kBj@{ZvH!R1bHYYTNOKVY=r&#RjOWDR_+YVkMJ3CJB))(I@1Xr;u z39SQLrlSNITF!pthg7bQCF7ym~C1P{LlRp#%9-%<1``i;jn)|zxdw={i`MUzpVqN_djF! zpE3N#LHM6B{LdKvXAFNpcKg)F|AmSF|6zi|;m|QQ@r!XMg%nus(~Fs!aQtc0zyJ7Q z`y>amYwe_m!Ebi?of!8#?!z8LIfkwHuwwMCR*Rxbl|tXTj!B;w>wm&eOsPM8ETCS= zy=L*3iyfg}5U+%S3OFRj0m^NSPJ6{xdh=d!1nu$P#ChLw+OMBa?mKX(p4^v_;`e8o z_GlfGcByTi#7*4k0T=ghH-S8!vX>BsrK)AR&u7T$!?<3&b^P0r~$#IZeC^a+H}9DY1W4&V5_BZx6+bBniI2aQ2v7 zTrqPHXu9eRD&zUMRFfhUizg$+99hn9N3JpxOb`BLlm4~hQ4>aRORl9Dy!f+*!BXd_eUDJPu^P2rL{44KTn+WGU_)Egt87e4`-<)gYFHaZwZ=DSI zPrTLZ)W>O@E}2sPWf|9F4?xMHnuc2rPYEaEP6{^X&>BW5Y&UP$PR;_F$Cs2wPidd* zRoU*M-Qk%h?(2UZ{mWr`{n}4Ioprc>M@9pX2)@yq^^b$Q$@d)niQ~^edTZy1k{%(+ zc?)5=_0+G8Jecx>1xfn3B6<2xY zAN9w;Dt%0SqZDPY$Zbn#eR+NuRugQ*Jp@`7_d)BzWAE!$(aqIy+Kv-eMepIFOW5;J zlBnh-BuO5QUCT(6a1pH58WgskTv4EFuAjn}%?2s|>FoXbBZncyk${HzIOBgkz3{uw z0GqK(OHs!Dm7Dic$t&6&(ZFyiA2madDx5%5!VSXd^Bo-vToYP8izF|C?&4IRS{R?r zx6<<`5%d%(J*6dOM>O~_?ulhHr%Q0(%a|#=O7G=EPbNQ|hyx{ue3qdiR-UQJEcIGQ zDV1jQN0%x^qH~aZhx!u*^4IeFG4@c-?c&L3o+r@}pGuB?CXwf8dzK;@=qHOc>1zZM zzi@Xfy%1EMBJ(6De*T3j*gSyU+x9CVRVFaG zQ_Q=6}ex%dtDeFb!I`b5-Pfg9rrI^ zPvlDPxOzV*V-IgPv_K0+Shtdk)|Qtvsye3>c7~U$Cz}#mwoY~>Qk%}${ab$qo}vqG z^YKXJUpL{VuVHauGZ)WiuK7C$WiFgqk6!g?xwJ!omhr~+so!iOzoR@5n?SQz0>{eZ zh{LyZN?fdq(&=TT`C~Ie0U3q2U@m5{%X-TWw;64?eAPU?*pw^01rRuz(n^IUYk3@F z8MTH_Xd5fWdh}d@=JR@r6*R}bP>vlsYlsTh_~Ml=ra^nH=K75Jw)?i0C5^#BeGzI% z^d(M*sGL=OzBqazy}*eQtQ(wkCtz=#9a|80xOI-HO2#m=OSISgmijTPmA>?D`#z3s zSO-z)h5AmmdKzNAdg*k=)7LN5d77=?w4Vw_KkDy1jwm6Yz7RL*i&nV+~>_n>2%@ycLR9h9YRPMpBN@9ld3^I5@cV;^oWY8l{pVNFcO!7|bA>};WHF(*R zRGQafw~w()kBhU>7M137H;ZFL6*X;8EP_4o3yoRu8-ix^2KSsWW5Z)9xQbb5RY(uu z1mUk==t!EcxXxqc`fnpS2NiH(4@V2XJq7v)H~LA1-sD&ekDm-DUEX~KQkXepKTB@H~=*-jo~wf>N4_QFK%99Hv*98xXfoPnn! zu35^-ka4lxs_eF4DS>bL{29>7r!cxFq;#tk( zo1;pFrZRX#>Z4J+*^jN=mp0_^Et&rKU;OM@zE%plS#!r`iQ%eFj9=%apP&PSLCVsgg{5pY48> z*MAR#S3!7excy4%uZP>l_lx;OkvGK~qnpb`5iGy33>Wyg$DvZncE>5|kFiQ$jkRdv zavTx9BP<+UluonOb5&cevd3y-k$wP?x1(~?8!fb}D)aN(PWiXN<}4;>3{mA86(!HK zwzDw{=bB<}(`=Ml&7ZUel&A%%BC!G6yG!ibeqlT1_j2yD%Ef-@fk@mv;gN3W$Q`xO zZMHMt9c6$zGwR=7l1>wEt*zXcrg%kOlvZI_H~Upd&T3|_TKHSog@fL&NOA!vXQc~p zPsPt<;=}&iv8{G`Qzt+itsYj(Np^K=3g~`2XKESBRdh2Ea(f;IyKAjc{Q@sizO&3` zEje{Vr_40EJS>2cJ!ko;gxc81@_e8xTiK|K$6731&EC2=zVIf~!@Y>ke=j~d1_H*} zg8oxK#OZzi&<>_k3P#)zvWUg7&_?l6mfau9ukz5$FG^F50L;_-pkZIV2X~cc-zF09 zH6M51Ih%UCokm+8g2&iF$NU>_%jE9+H6|MgRwN^X$Hm|9h3a<1>X-R2!wA6r-C}!W zrhPxQvga0Lo;%5mG=i2vK0p_|s&%!IrnW~H4o*B#9T=+Hv9g+LxWOO7r+C_iVbu_) zNoA{BRC}ro@<5QwB4SD$9_hO#AAES~Hm?X>UaN>hgZpU-pY_u%A~qYjLkc3B+3{0f zUJtuJ!)ovPRQcXcD3fxE((XQRK&nBpHD_`L8w0<<-m4jc4S?T$p zYo;FZ0=v#QEY+iSm_q%SZaK_V9{=cao=y|7yA*VlV&CW&@3`Sshkt5{dijv3zqfEY zdfWuY7y98pgr?Aa>pc?T@E7YHU7!FYA+14G{Fr|&lJ|j5K=1syofl_c#?TM7;Jx)VAYWAimvi%xn{GGDdSSrMC+fgygANYZwekoHKzhsynx7k6umlo-p_a= z#DHI%4j;iTwzu~bVzuLA_&6kYu=LWd5UmMd3PKp|bT6jl$4}bFJjrT}7RP)sjZ9&n zZLJVCkb0s~PgWyF^u#{zQP9obT}J@IX_Q%u&YZ0kM~?BznaP#lJt~?{)|VYu2TwNz zxR`e7cB3}eMz){ZamU&$80OSyrC&|=c<@Nk&y`MfZj^#TR@u*tWwk2Lc|ybEFiAjn zL`SfFC+=|fXVKF(`Yb&i+xkh=v1`p`e*k5_fzu<2VIUDJerU@6mq_+J?kU7|T z!PRi$nnava(`!I})ZVPiVzV_9xcbCz9e0uwv`vflW_^V=(GOk+ymLN%ht}T(BqZg^ zfca8mq?swl=a{M45+9R^r$|zp7b*#dEY9i7pU%=NL9BeWx@GsZ`RM9)bI|Hct#VTt zrc-m{0Onp*cDO>&*@g(x!iPdXLx!@KKC^*vMFB+arVEIvlm0CB=46Eb>SPb!K7Qo? z1<^aI#`h`HtZ| z7+j}#hAysXbI1&4W0ZS0&oXVr+@;zF(`@-RU9faK4kCrd1g&?zgzRMJO^pw>_@i9kEFV?f=A>UA;VD);cLq5qy4(Y1N zMjMzdY=tqoogh@b8?)8}KQF8M_NT9DrPzBJr};hWyQeO&=l7rQJqK{;7a;}S;~Api zTAmNs_mdOYlJx(KZbj9f} z26;W>0FNs&1`>I*dFg@|r@0Fk?RhyquG2S;(-N!utlmU5>+FWEv~7Xu&R-{+)HA$u zg7XGTA%pN?k+Wd5w@D3h{<0&9XzV`F7-fw7!lb~<%&1#bL|8Dkyjt#lHxAKVK}6O6 zECM6l<&wNmbqxP%e1N*;j2JDW0hQn?*2%K=4jJuU0F(m)yWP6xw!_KN=50&Znqb2Q zr&F&!ol?ABnJhs2+*&}jF-eevv4Uo%&u?hTH-GTU^g+GInqN}Xo2g9S8ugzEk$$NK z-wnqEVg;jE66XbrQEmoS=w=gx?DAJ5P60uD{8?@JEMIpNr6Ye{lmDerd$BusRc#m< zAp&FMieG!omw5Qg=YM;e4}lic#Ok+g{Nq2qYgYv9huylz5|T%l)%db9_cr&L$(?i; zA~lP4B5v90yg1%T;rMtAc5#bT}1v!fCYp3ohh5G*?WKlNVv(Fn5ZM0Pvq+2+c7| zXQUBNh)n;Ozt8E|FL_MIiZK|t3YIcbJqJZCfi>eM->>IQHwL$W26@#LF*Rx~Ra zt0|d{or0eNyT`aG4sqy0qRlzg?SVG?$`12Nyd{0Ei?ESmb}Zsyf^J=VZzZXWp!Tei zU}@6i_pCqWb^qj>rU&O+VtzZ)IMe?bCCfS``YtDbquHXb>t#iuyGiLMKLWzTUE9=D zm7hKuaR>5mzE}D{unzXDWIitQv1%wB3)*#SS5*FjjsFr~?fp|hqk)y?ftO=f`^Nf4 zLD-C%m_f9pXZlnTdhwevdL0k%H!R}lkVfo%8@+XJi3@|)J6Ad8FWu~`{0%qTv+o>b zI1K*AB20i;R`5UN+(-Y^Rf817@(TzCUb3FsjA6HN+FIY3$$jlZxDd`IiTwPVvfC!fkRrb?LP z7ai>0Zk(!nUF&gN>~FBlOGrl_aTh=jDkkn)&rc)Ye*N(08>+ubfr2B^kn^oV@1IW( ze0~W=<$qexi9}Bd{!?N;n_V-kZ|kglB~n4`&z7jO``kUfv8J{`aQVhq^7ckcx`p{j z!w?5K-7b4`MyTan?{U6cIb+EaL>-nWj%^X(VMA@}*Rs*v2^7I z>&6D?&vTYi+DWXp&1pj6obc7B7wEGY=Ot9oI67tP{igUT`uuW(j7uF^P+#>sgZV(5 zqVxC2E!w&d{e`xD;3Z2B4gFbRF)w>J@#?L2+yJrJ;cd#6s>?i>>xx#-?=l{gskQ!- zZWQ`HOSSLY3aM`_y#D(3)2|nE4KSoHp6x^~lz;*{jw`PFhpdfVhr{hsjV+PO~#n^Ho8+1R78lY|t-xRrxH7n-+=` zcVv9gWNN$i*ws_SH81kViLYbmGUPkA%`3H2bAOkP9&l!$5m3jZcD-tQGrc=8_F{WW zjL3DZg8d;Ay4_d$H`S#bN^Uu=2XVK^Q&){L1H#51qVf2faU|lXMj*w5-KjX=6Yv+q zG10;K#;xJQfBwpFPhF~?0UDt~+5AVp818A!_qnh5^}VGq*^TYhiCgMlhTHX&;mtM7Sw?_O&!Er|MCmX_O*^_mcR6g#kpo+HbTn@VcA{Bu~KQ*CfpSp7XSa=s{| z)RTeQaQd!ucUy+x2B)_8ixhmv&cKH!8?VOijYr+*`>@OIjYR~x2Zd$coU7vohYjDg zi+u4z=oi)aKMZ9k8ynWnlR^Pd%J6#ZXL{#IHqzQhfbu!v)tpk-_4f(H2i+Vt1Im79QR!o z-L7fVfx>9@IOSX7HsPVRALZQK_qKz{zBdet{g;0JpzzP*g&u>fsm1Dwrp(`6(+Rug z>=wC%{#Q!I!u{SX^=R}MvBgXO_!7Q4T{G4pvCcA8!vJT(N{LoKUyGgbniyQZt%bgO zpwpc9t%UK{cDn<;H)6yi-=c@ikN@7jRp;}z^aAv)%b6bQ%VqZtO5KLWJGPzZJ*9Nh zKWOBz-$x~*4?X_jr>=_g-+UAG4ee8h#pP@0X2Qg6KI~uL#QWtqqJBb2{*w9+nY+A!aJ5=UM& z?R*qPxMxET(eB~`_M|`Zh&&EI{;v4(@Fb^z`tC~P2M@6DYc|!CyV1W zAt>#67)&}uPbRspFT=ynx*B>&{=K?Jf`isq%MmsIm`x2?Ha}=c(mAur-oP^CO=qVpXU9p{cHuljD({^A^I4W`(zH0>pRY+%>NO6;@nKJiZp6Se&=UfEI2Y5jrD37_CMLnGOUdG@S!+b!9k z7uKIV-Szd2_^bgE|MMp_|6Ih6PbeJCBF4;Dntu{FW9$DV_x)C3-2p2z;wKNXKJ%+X zP5+JAVrR}L?tgt#T#vn$*6jYu;e_3pfu3W1v$YPneX8&O5GSLa`yr+j%+-T^xOW|V!oNN9vE}}S^qZ{N=&g8ylB|hz z&p)oC{%n@*;l=Mn%`E?p@3)`KwLbg&zt9Kze)o%I{8H0Gzu7XvpO?joLT@9WGaIkU z&EN1*%}>52espUMxNSct5qW57biY^TW91yK{n6%Pxyj(_82{flO}KX%YtBYQ-u~ot zPAYQmvYkn}*!*$${6)Y2T}$Vchr-%_^)d2~XYg0yrM!jfbj*dKoBofn`FO@qH;dlU z`tv82)c;oJXOeH?(CjO8DS#>Uf0BFx(E#QDO4oniU3>yD$@@2S$Rd-je)a$Hf8bCD z(M$x{~xfI4Ipj`}Mu1RanC}M5Qw&r$B z*VWM_xoC*G{K}(zt-C{U?|!#lTOeb_d&>1>-{g%AI}NZ?02MuCZ=T$5`ECYRFtY5i znBsGIt&?Ei`Kw5*j17-)cjz{%dfQUL(wWaE?hld(>{CZB;ctJE=s zQ3>EALxcfnsdFy4TEK)`##$B6grlyk$cPYwv|d!u*Uk2B4es zM7xj?mY^!e^_KLUo$a6H7k6~~rP)}8$=pVnfJPyCA^E{|z(6n-b?L4+;kY|Cfi}*r z*d-uNWe#xTFQza-#XLrR$o8b#V3PlVKh6$e#c18fkXM-CUF9JhL(hz>G~URW9Nr>!2eZ^a4$00u-Zs@QH3zSv7gjz5N!z&=G_K zDmZI|rQ{Oyp&zuU6+%l6d|?zaV8h}1YkJts>%R zkTa-J8rgg_KL;Y**looQtr?u2D|49l2wet9I!3&)M7GufRY;s$KsdWVT%vVUSosHq z@~h(an*ol3DNXAeG>p8v=4pU_@kT8GU~qtm(1?*0-36tTw7?cmeFnAyU|cW4YRa3@ zz(~d-99D7%_jr`dKpUD48A-M-jcqSctRR2zQ8y4t2XNYxS)*lkwvkvFe6J=OL9BOE z0342}>bFte9irGxUZTzR)12iHWL0eCSfcNx@nO^E$;sxy>*s)yWcY~xVXqW39$`hP z@lQyw5k?{x0TITgImQjqu4+l z(2iKg2@i_ziip%Ayj7ZW^$2FDxF>)`D(E7vNJ|4@tS@ck<&)ADXVR7jyM*c5=ijPy zBCHpeZpma&6 z;Df;n_mka%9pEAItSP$20=t56!hB1ATuWtrP&3qMLx zIHlxvMgw&`_ZNbF7XA$*c_0=dUmvP!;Q)hqgb8PEEEHW+Y?T2-P>gH&-3HJpME5v< z8A!mBKtW<}g9?o6z*aRbov*=Xn>L9s+4lNTyU+cA2m&VPomRCx)&^?8bf(pNmG|!y zEc=qo&xsbyKi>psh?t(Mtq~b%yKN$XK%70Re6(DLyhNK;AbrVwCW}vA@)!n@=R%u4@Rh z9l#1x83otI^YX?q<06z`vq-h1Phv7CcA{}vp-(iga+?sO>uZ5Gpo{xPUr^F2t^vO9SJ9to(gU!bkE(O^jwX~tV=C6 zN%JQm?vsa0*-ir-@3@+=;ju~_``*j3&>VMg6%gEh*7vmMv?+wQc)1%vL~ioTISmy$ zCUjVMm4<;12$Qt*!zj}giAz8y9IjKpq$y|z0!TW~;=Y;JJAi>RrLai>Qv{P904Qnm zKs48d{NdG7;r>jFnK@u%#1uT|ymx;jCxUP4Cq!#Mx(C3x7nJSBH{*08OVEVOKp{Qq z>PLf$w^*PBh5%w5rSvFWeNB;yMyM)WI7D*W!iE^uGVU7yq_l2Pc?9D!-?%=3@KPzm zhPYD?0(Fe0XKZa4`zuuK9svmiL6b16<))2@@=WdB>dNGt7?FsJ-MBl21sFP;JpzUc zCL}(%ZEr2Ug>_=~G+Mm`RHaNFx^D0kj)~7ZvF8B{*z7?%g(g!+92<@33SS3$ z=Ba3*>>>pbO)eRPH>k_)=OGJr&8@NLT#I!gUo2|mu?V^PkFUr*>!`B^vKWlK83r7Q zjXC9Fua%Q2fYT8MVdRCw5d(w`h|Kk_YXd~YwD!=nqww5(P5aWL zK;`aLv00;`DS)ENfW!(SR+TiqI#n1T2wu;Kr}Ij$e~;B5s$Nquz*3%xiry9HkMD8_ zv6)Pq#?mhsTglYdXBx1KnHn4^|7Mkk6H@WwxqB(*nQ^h%nW9`75TT|e+@~QM)T#XI zn`}8bvc8bu&U0_MV;&63Ia=1DY_1(N^vtM5PmK4y8etz{lTKZ>zns6q_p~0NDi+SJ z0i{UhV*+6hB{x9B`~))qqMi{UJ*t+)(paaGP-}^p603?84B1s}1VLls| z9=!tyh*48Vve&m8RIk+|3=1uU+R?JFqKPn1ZB#IWt@tL?LE8#pF3Uh!Sg@cA?mwB) zoC(o=aZvD5p|B2mbn^~p=RpP5BSkjZMrk2U7PPRS!Vq4@W)I)qx=vp1K^G4a%of3d z6eTs1#2$OM)v=5DP6B))@RW;1{MoEo?m{L6s> zo^3ynUqF&siOuGT55(g*?(aa~XrQ!1FmEpjbRqbzU0!m-W)e4gCD@0DXQ_PWp8dAb z@ryT0%JT}YJF!46wv4C)BJM}6P6&f9GPx3A$RfO~J1O;=Sms9Z#T)=kDtlhG3%E$3 zMOfAWCofBgrL5W(ZzrBoLD;xSo(<{>*d;3O+{W?}Ngde?zq&nI-}9RV3SjhLs8E0& zrgtBh07NVWix?ta%XhyKcO9XUlQs}modUQTQp7@|QN~hv$(lQ8xW*C!M71cwg%vb;-DESXj^ zx*2_Ap))sK4-OgzKRg}J1wv>QK^2dP7yNw#rX6}4rNwUnD5+7wR&Z_16g22pxdj9n zd)(``4%R#%pqrEKW%N^|m@zY=0F~4hk29JfVkSag7EYxe#A)sUP6EPcfB8l@r7muH z6IcOE)s<%9pU?<`-3*r8|14D2|X)-fKp0=l>iGK;e`$QG@giw#l|nBtVsA2 zYTcx~oMrD1@ac)fxZDLqqz~z8A_w!kdd?d(SshG6O1G(@GS*9Yd!+6dh04H!AU(+I zAZpw3x%Zs^6%1bzi4o$36>=Yu0^R{APTbwWY-u5K>q%U0g41$kui-pqtPo@F*1Y}$ zea}!pN4O6gaw`xaZrB1;aIgI2o8GqGD?7{s6!d(HwD$euz^d~(WdJ|{za-aCY^3lr z%hv$}cte=U|KeVFLx;Th{MG|coj1NjEi0yrG(pTtIVLn9Wo^TRKN(u?f^0> zBd$l$_jQn{zXNC!E&05#3 z&jYGK#Ak`?i$b2GZV0uB%MamRvU^_Jqb{dms~-TU>+;4k59P16q+Z6NlX=4XhpK`N zYg>Z`MZ@|7LI}g%re=M8M!__ny%HNEUSu)7=0X|)ISq^(O$q2GHXM{RU_=GAkB=5yH~5vgaPnB&j7TBUCA( z+U|uGK!7cDNXv01a)Cm4bL9~i3gfZ0PL#H4@EFD16i5jnI)8}B?dQ|w(H^m4j{W;> z=JgLb0?*%OUg={Z)C*JV2Qz>l3Lo(`th-@fxjN9f{ejbbwEjf)W(c;*h>J>gKH^0P zwGk+Q4+j1ghVHusWu|NNU%ud&u=T_(lV>Xsh{CxUp&!ZxR|iwhVCvFg;Tu{Qb`7x$ z6vce5l%icT5J_C~lXVnKf35>sbe4{98k>HBzL@U{CJ837gyiUzz5oj%2P}Cej%0S$ z@*$83qYp74yk3R#siRhM076htQetZnJXbv%Th3`j-*|nxDy>7dCPN=!M|cn;N?Tkv zn>8w$X9hAvm`aj6Z(PEN&LO;5L6A292;oESkk`oSWJl#AX`lcR%U|#U0cJ8yBunKb zDQVRja=hjN=$WV_5GSy5TOs4+{S`TcDg;zDcVYx!w2fJ_h2blhPOwwLhcTcH%+0&0^wC)jhv#*r*!cnR znf=bJLX4~PlM?qzcPYTm(D2=Z?~j`_4y!WXR!Pi1W(r>ITD1l0!2~R#kv>E`F+FwF z%Z&WNr}OcJ31@(cbknnD9L;3gN9QnQFqt|HO4uU8rvm`YXqY>5)`IcKA7MF}E6Zcb z(pRJQ_dayy3%b+&VZAbNAO^z+1DF*NICoZCK_AqpZ97hprmmE*#oUfw25bE$){AM+ z;X!^2V>F8zwz@i(Dc6%t*y2wP#x^muKIWamcWW8J`(E-u3XYQ+^nUQnqFDz|!?vCGdg8XCTvFL@2V9=u}BK0EJaoC$MeX>x+tWiMBKgiu2Hy zANLCO_e!X!eQ9L$v|m$V#Sx*V7JD{elLu4G3Y2n~8I>Lqd?j9`Wydw%GD0GgjfkCt z{HS_}{l$bU7i1mj^4{&vSm?l}OUnQHzBuTVRPfHP9@HR1yK>wp!fOXwxF#2F2}w>$ zsW;D`%LxNedaxN=64YpYU|R)Cc8O^&XpP0+x48h#0u0G6cl|v=wRToCIJN;0;@Vs; z=Iv=b3Xd4Qo|8>J2=iXe!|21kN^OX za*UK4p8!=t5h!5U(m^K~R`DU`MhNH!W`z|oHt@>;RQLcwZqty{>6sO_*hs_jmKOll z+<)01s~3b|h-#)TcXz~BCIjOGG9=FtbpsrLBna2(Wr4To0&tSaZJ7y?eD;GzuVP`< zmXi0n2&>iC6X4){#_X=0`b`i&U{Z;I#fsCMi^%n`_>+8hGtAfH3;`)r2AVJd=nYHv zkDEkmkjg@og1KlI$ESLOr4XSy7TU5JmjyWR$@8<3JFRT-J5`6}ZnaqGq$I@*I(0U$ z^mtzNlR$S*qO4$N?@nR`!aN-toZjBV#`N_CWgtT`PP8U)y2P9Zq1sF?;y_o@3kXYL ztFTk0JVp>Ab5<_C17K|42}^Zv>W2V;Fr4g?Me0X*>m)CxNWacRYu5v_!V-2c$suDP z@W2c*CbI%}X!jjl}r7i*%(v|I?)x0wk1=bTMIufJJ zhgG+-fiHnc$zB)FWsue)*<{#GNCL|^fCqxj13ax{0?~a}b<39RMc-Q+yO^wt$JiJ! z&g$9%2#polbD<`8t0YDZ*ghnQ87O5~KK%Tp@H^S)znYsY9MvCcQ0ch;L8jOLA!H*M z3W7O|+e?|>Wlbc-iPCz(#o)%XCRaHvDRmJ*HpO)d3?vNky*lF5xo*a%-P6gM&t6yY z5*Y*xR4fF*v>Lg33!)m}l0|+B!!0A45I_UsBMwrX>)hp=v*O17!H&hnab=BdcHIbUybi|76j5EqX%Y* zYbn*P4ZSJ=K!E(#+SIzd^13@Tl&|wlOY+TF3kXFgpg#TFse&tjkYVD-KT$xjln|~i zd%aXKD@wARksQFbS8%p5UrP!anvn5T`;>$(&UlXpXJh&X4JT!1SCic^gp&& z+Xguz1lMw5kmT@oK5#7%3$_nSh!!1qyRl>FwqPR}3`3Eg;O0 z@%)CX*k%M*CO!_et-5vPsU9~boO|kgMacZ&gucQ)pq6yHTwB91Q0Vr>k{n6@>oXm% z64JC9olCJEYL*L=r?8MoG(=dZ_h6*o&`t;zmQm5Bs`g2^+6p`BZBK-e05pL}5kcxt zJx9^o8$bkple3A*(%%~ey%NtICIjyXNuEibTwis`0s{wwPzGVXl{ToOJ~QI(Wvu0a zFdK8kdJ7<_hjkOjT9ue%rzQYVbfa|sV9RMBpJ5+)T@Tbq8upO@2H`|?sv|C2CEGe82#D+u6?$ZFG)^8}jXZ1+j6Nlf8}g(AU`5mkzy2sXOAxr9F9E X&+FC3YT^A1{ZqZHrIfFD!~cH(b|Z=* literal 0 HcmV?d00001 diff --git a/docs/source/serving/deploying_with_docker.rst b/docs/source/serving/deploying_with_docker.rst index 14d94b09e9b9c..56f0020a1011a 100644 --- a/docs/source/serving/deploying_with_docker.rst +++ b/docs/source/serving/deploying_with_docker.rst @@ -37,6 +37,29 @@ You can build and run vLLM from source via the provided `Dockerfile `__ +- Available GPU resources in your cluster +- S3 with the model which will be deployed + +Installing the chart +-------------------- + +To install the chart with the release name ``test-vllm``: + +.. code-block:: console + + helm upgrade --install --create-namespace --namespace=ns-vllm test-vllm . -f values.yaml --set secrets.s3endpoint=$ACCESS_POINT --set secrets.s3bucketname=$BUCKET --set secrets.s3accesskeyid=$ACCESS_KEY --set secrets.s3accesskey=$SECRET_KEY + +Uninstalling the Chart +---------------------- + +To uninstall the ``test-vllm`` deployment: + +.. code-block:: console + + helm uninstall test-vllm --namespace=ns-vllm + +The command removes all the Kubernetes components associated with the +chart **including persistent volumes** and deletes the release. + +Architecture +------------ + +.. image:: architecture_helm_deployment.png + +Values +------ + +.. list-table:: Values + :widths: 25 25 25 25 + :header-rows: 1 + + * - Key + - Type + - Default + - Description + * - autoscaling + - object + - {"enabled":false,"maxReplicas":100,"minReplicas":1,"targetCPUUtilizationPercentage":80} + - Autoscaling configuration + * - autoscaling.enabled + - bool + - false + - Enable autoscaling + * - autoscaling.maxReplicas + - int + - 100 + - Maximum replicas + * - autoscaling.minReplicas + - int + - 1 + - Minimum replicas + * - autoscaling.targetCPUUtilizationPercentage + - int + - 80 + - Target CPU utilization for autoscaling + * - configs + - object + - {} + - Configmap + * - containerPort + - int + - 8000 + - Container port + * - customObjects + - list + - [] + - Custom Objects configuration + * - deploymentStrategy + - object + - {} + - Deployment strategy configuration + * - externalConfigs + - list + - [] + - External configuration + * - extraContainers + - list + - [] + - Additional containers configuration + * - extraInit + - object + - {"pvcStorage":"1Gi","s3modelpath":"relative_s3_model_path/opt-125m", "awsEc2MetadataDisabled": true} + - Additional configuration for the init container + * - extraInit.pvcStorage + - string + - "50Gi" + - Storage size of the s3 + * - extraInit.s3modelpath + - string + - "relative_s3_model_path/opt-125m" + - Path of the model on the s3 which hosts model weights and config files + * - extraInit.awsEc2MetadataDisabled + - boolean + - true + - Disables the use of the Amazon EC2 instance metadata service + * - extraPorts + - list + - [] + - Additional ports configuration + * - gpuModels + - list + - ["TYPE_GPU_USED"] + - Type of gpu used + * - image + - object + - {"command":["vllm","serve","/data/","--served-model-name","opt-125m","--host","0.0.0.0","--port","8000"],"repository":"vllm/vllm-openai","tag":"latest"} + - Image configuration + * - image.command + - list + - ["vllm","serve","/data/","--served-model-name","opt-125m","--host","0.0.0.0","--port","8000"] + - Container launch command + * - image.repository + - string + - "vllm/vllm-openai" + - Image repository + * - image.tag + - string + - "latest" + - Image tag + * - livenessProbe + - object + - {"failureThreshold":3,"httpGet":{"path":"/health","port":8000},"initialDelaySeconds":15,"periodSeconds":10} + - Liveness probe configuration + * - livenessProbe.failureThreshold + - int + - 3 + - Number of times after which if a probe fails in a row, Kubernetes considers that the overall check has failed: the container is not alive + * - livenessProbe.httpGet + - object + - {"path":"/health","port":8000} + - Configuration of the Kubelet http request on the server + * - livenessProbe.httpGet.path + - string + - "/health" + - Path to access on the HTTP server + * - livenessProbe.httpGet.port + - int + - 8000 + - Name or number of the port to access on the container, on which the server is listening + * - livenessProbe.initialDelaySeconds + - int + - 15 + - Number of seconds after the container has started before liveness probe is initiated + * - livenessProbe.periodSeconds + - int + - 10 + - How often (in seconds) to perform the liveness probe + * - maxUnavailablePodDisruptionBudget + - string + - "" + - Disruption Budget Configuration + * - readinessProbe + - object + - {"failureThreshold":3,"httpGet":{"path":"/health","port":8000},"initialDelaySeconds":5,"periodSeconds":5} + - Readiness probe configuration + * - readinessProbe.failureThreshold + - int + - 3 + - Number of times after which if a probe fails in a row, Kubernetes considers that the overall check has failed: the container is not ready + * - readinessProbe.httpGet + - object + - {"path":"/health","port":8000} + - Configuration of the Kubelet http request on the server + * - readinessProbe.httpGet.path + - string + - "/health" + - Path to access on the HTTP server + * - readinessProbe.httpGet.port + - int + - 8000 + - Name or number of the port to access on the container, on which the server is listening + * - readinessProbe.initialDelaySeconds + - int + - 5 + - Number of seconds after the container has started before readiness probe is initiated + * - readinessProbe.periodSeconds + - int + - 5 + - How often (in seconds) to perform the readiness probe + * - replicaCount + - int + - 1 + - Number of replicas + * - resources + - object + - {"limits":{"cpu":4,"memory":"16Gi","nvidia.com/gpu":1},"requests":{"cpu":4,"memory":"16Gi","nvidia.com/gpu":1}} + - Resource configuration + * - resources.limits."nvidia.com/gpu" + - int + - 1 + - Number of gpus used + * - resources.limits.cpu + - int + - 4 + - Number of CPUs + * - resources.limits.memory + - string + - "16Gi" + - CPU memory configuration + * - resources.requests."nvidia.com/gpu" + - int + - 1 + - Number of gpus used + * - resources.requests.cpu + - int + - 4 + - Number of CPUs + * - resources.requests.memory + - string + - "16Gi" + - CPU memory configuration + * - secrets + - object + - {} + - Secrets configuration + * - serviceName + - string + - + - Service name + * - servicePort + - int + - 80 + - Service port + * - labels.environment + - string + - test + - Environment name + * - labels.release + - string + - test + - Release name diff --git a/docs/source/serving/deploying_with_k8s.rst b/docs/source/serving/deploying_with_k8s.rst index 7dc076dc709df..cc3606f0df851 100644 --- a/docs/source/serving/deploying_with_k8s.rst +++ b/docs/source/serving/deploying_with_k8s.rst @@ -162,7 +162,7 @@ To test the deployment, run the following ``curl`` command: curl http://mistral-7b.default.svc.cluster.local/v1/completions \ -H "Content-Type: application/json" \ -d '{ - "model": "facebook/opt-125m", + "model": "mistralai/Mistral-7B-Instruct-v0.3", "prompt": "San Francisco is a", "max_tokens": 7, "temperature": 0 @@ -172,4 +172,4 @@ If the service is correctly deployed, you should receive a response from the vLL Conclusion ---------- -Deploying vLLM with Kubernetes allows for efficient scaling and management of ML models leveraging GPU resources. By following the steps outlined above, you should be able to set up and test a vLLM deployment within your Kubernetes cluster. If you encounter any issues or have suggestions, please feel free to contribute to the documentation. \ No newline at end of file +Deploying vLLM with Kubernetes allows for efficient scaling and management of ML models leveraging GPU resources. By following the steps outlined above, you should be able to set up and test a vLLM deployment within your Kubernetes cluster. If you encounter any issues or have suggestions, please feel free to contribute to the documentation. diff --git a/docs/source/serving/deploying_with_kubeai.rst b/docs/source/serving/deploying_with_kubeai.rst new file mode 100644 index 0000000000000..ec3c065320fd9 --- /dev/null +++ b/docs/source/serving/deploying_with_kubeai.rst @@ -0,0 +1,17 @@ +.. _deploying_with_kubeai: + +Deploying with KubeAI +===================== + +`KubeAI `_ is a Kubernetes operator that enables you to deploy and manage AI models on Kubernetes. It provides a simple and scalable way to deploy vLLM in production. Functionality such as scale-from-zero, load based autoscaling, model caching, and much more is provided out of the box with zero external dependencies. + + +Please see the Installation Guides for environment specific instructions: + +* `Any Kubernetes Cluster `_ +* `EKS `_ +* `GKE `_ + +Once you have KubeAI installed, you can +`configure text generation models `_ +using vLLM. \ No newline at end of file diff --git a/docs/source/serving/distributed_serving.rst b/docs/source/serving/distributed_serving.rst index 4d57206e53a05..b24ba53e59694 100644 --- a/docs/source/serving/distributed_serving.rst +++ b/docs/source/serving/distributed_serving.rst @@ -54,7 +54,7 @@ Multi-Node Inference and Serving If a single node does not have enough GPUs to hold the model, you can run the model using multiple nodes. It is important to make sure the execution environment is the same on all nodes, including the model path, the Python environment. The recommended way is to use docker images to ensure the same environment, and hide the heterogeneity of the host machines via mapping them into the same docker configuration. -The first step, is to start containers and organize them into a cluster. We have provided a helper `script `_ to start the cluster. +The first step, is to start containers and organize them into a cluster. We have provided a helper `script `_ to start the cluster. Please note, this script launches docker without administrative privileges that would be required to access GPU performance counters when running profiling and tracing tools. For that purpose, the script can have ``CAP_SYS_ADMIN`` to the docker container by using the ``--cap-add`` option in the docker run command. Pick a node as the head node, and run the following command: diff --git a/docs/source/serving/integrations.rst b/docs/source/serving/integrations.rst index f39997e0e44d9..0dd505a739863 100644 --- a/docs/source/serving/integrations.rst +++ b/docs/source/serving/integrations.rst @@ -6,6 +6,7 @@ Integrations run_on_sky deploying_with_kserve + deploying_with_kubeai deploying_with_triton deploying_with_bentoml deploying_with_cerebrium diff --git a/docs/source/serving/openai_compatible_server.md b/docs/source/serving/openai_compatible_server.md index c39cef85897ed..1bc8d32d2d161 100644 --- a/docs/source/serving/openai_compatible_server.md +++ b/docs/source/serving/openai_compatible_server.md @@ -1,13 +1,13 @@ # OpenAI Compatible Server -vLLM provides an HTTP server that implements OpenAI's [Completions](https://platform.openai.com/docs/api-reference/completions) and [Chat](https://platform.openai.com/docs/api-reference/chat) API. +vLLM provides an HTTP server that implements OpenAI's [Completions](https://platform.openai.com/docs/api-reference/completions) and [Chat](https://platform.openai.com/docs/api-reference/chat) API, and more! -You can start the server using Python, or using [Docker](deploying_with_docker.rst): +You can start the server via the [`vllm serve`](#vllm-serve) command, or through [Docker](deploying_with_docker.rst): ```bash vllm serve NousResearch/Meta-Llama-3-8B-Instruct --dtype auto --api-key token-abc123 ``` -To call the server, you can use the official OpenAI Python client library, or any other HTTP client. +To call the server, you can use the [official OpenAI Python client](https://github.com/openai/openai-python), or any other HTTP client. ```python from openai import OpenAI client = OpenAI( @@ -25,166 +25,71 @@ completion = client.chat.completions.create( print(completion.choices[0].message) ``` -## API Reference +## Supported APIs We currently support the following OpenAI APIs: -- [Completions API](https://platform.openai.com/docs/api-reference/completions) +- [Completions API](#completions-api) (`/v1/completions`) + - Only applicable to [text generation models](../models/generative_models.rst) (`--task generate`). - *Note: `suffix` parameter is not supported.* -- [Chat Completions API](https://platform.openai.com/docs/api-reference/chat) - - [Vision](https://platform.openai.com/docs/guides/vision)-related parameters are supported; see [Using VLMs](../models/vlm.rst). - - *Note: `image_url.detail` parameter is not supported.* - - We also support `audio_url` content type for audio files. - - Refer to [vllm.entrypoints.chat_utils](https://github.com/vllm-project/vllm/tree/main/vllm/entrypoints/chat_utils.py) for the exact schema. - - *TODO: Support `input_audio` content type as defined [here](https://github.com/openai/openai-python/blob/v1.52.2/src/openai/types/chat/chat_completion_content_part_input_audio_param.py).* +- [Chat Completions API](#chat-api) (`/v1/chat/completions`) + - Only applicable to [text generation models](../models/generative_models.rst) (`--task generate`) with a [chat template](#chat-template). - *Note: `parallel_tool_calls` and `user` parameters are ignored.* -- [Embeddings API](https://platform.openai.com/docs/api-reference/embeddings) - - Instead of `inputs`, you can pass in a list of `messages` (same schema as Chat Completions API), - which will be treated as a single prompt to the model according to its chat template. - - This enables multi-modal inputs to be passed to embedding models, see [Using VLMs](../models/vlm.rst). - - *Note: You should run `vllm serve` with `--task embedding` to ensure that the model is being run in embedding mode.* - -## Score API for Cross Encoder Models +- [Embeddings API](#embeddings-api) (`/v1/embeddings`) + - Only applicable to [embedding models](../models/pooling_models.rst) (`--task embed`). -vLLM supports *cross encoders models* at the **/v1/score** endpoint, which is not an OpenAI API standard endpoint. You can find the documentation for these kind of models at [sbert.net](https://www.sbert.net/docs/package_reference/cross_encoder/cross_encoder.html). +In addition, we have the following custom APIs: -A ***Cross Encoder*** takes exactly two sentences / texts as input and either predicts a score or label for this sentence pair. It can for example predict the similarity of the sentence pair on a scale of 0 … 1. +- [Tokenizer API](#tokenizer-api) (`/tokenize`, `/detokenize`) + - Applicable to any model with a tokenizer. +- [Score API](#score-api) (`/score`) + - Only applicable to [cross-encoder models](../models/pooling_models.rst) (`--task score`). -### Example of usage for a pair of a string and a list of texts +(chat-template)= +## Chat Template -In this case, the model will compare the first given text to each of the texts containing the list. +In order for the language model to support chat protocol, vLLM requires the model to include +a chat template in its tokenizer configuration. The chat template is a Jinja2 template that +specifies how are roles, messages, and other chat-specific tokens are encoded in the input. -```bash -curl -X 'POST' \ - 'http://127.0.0.1:8000/v1/score' \ - -H 'accept: application/json' \ - -H 'Content-Type: application/json' \ - -d '{ - "model": "BAAI/bge-reranker-v2-m3", - "text_1": "What is the capital of France?", - "text_2": [ - "The capital of Brazil is Brasilia.", - "The capital of France is Paris." - ] -}' -``` +An example chat template for `NousResearch/Meta-Llama-3-8B-Instruct` can be found [here](https://github.com/meta-llama/llama3?tab=readme-ov-file#instruction-tuned-models) -Response: +Some models do not provide a chat template even though they are instruction/chat fine-tuned. For those model, +you can manually specify their chat template in the `--chat-template` parameter with the file path to the chat +template, or the template in string form. Without a chat template, the server will not be able to process chat +and all chat requests will error. ```bash -{ - "id": "score-request-id", - "object": "list", - "created": 693570, - "model": "BAAI/bge-reranker-v2-m3", - "data": [ - { - "index": 0, - "object": "score", - "score": [ - 0.001094818115234375 - ] - }, - { - "index": 1, - "object": "score", - "score": [ - 1 - ] - } - ], - "usage": {} -} +vllm serve --chat-template ./path-to-chat-template.jinja ``` -### Example of usage for a pair of two lists of texts - -In this case, the model will compare the one by one, making pairs by same index correspondent in each list. +vLLM community provides a set of chat templates for popular models. You can find them in the examples +directory [here](https://github.com/vllm-project/vllm/tree/main/examples/) -```bash -curl -X 'POST' \ - 'http://127.0.0.1:8000/v1/score' \ - -H 'accept: application/json' \ - -H 'Content-Type: application/json' \ - -d '{ - "model": "BAAI/bge-reranker-v2-m3", - "encoding_format": "float", - "text_1": [ - "What is the capital of Brazil?", - "What is the capital of France?" - ], - "text_2": [ - "The capital of Brazil is Brasilia.", - "The capital of France is Paris." +With the inclusion of multi-modal chat APIs, the OpenAI spec now accepts chat messages in a new format which specifies +both a `type` and a `text` field. An example is provided below: +```python +completion = client.chat.completions.create( + model="NousResearch/Meta-Llama-3-8B-Instruct", + messages=[ + {"role": "user", "content": [{"type": "text", "text": "Classify this sentiment: vLLM is wonderful!"}]} ] -}' -``` - -Response: - -```bash -{ - "id": "score-request-id", - "object": "list", - "created": 693447, - "model": "BAAI/bge-reranker-v2-m3", - "data": [ - { - "index": 0, - "object": "score", - "score": [ - 1 - ] - }, - { - "index": 1, - "object": "score", - "score": [ - 1 - ] - } - ], - "usage": {} -} +) ``` -### Example of usage for a pair of two strings - -In this case, the model will compare the strings of texts. - -```bash -curl -X 'POST' \ - 'http://127.0.0.1:8000/v1/score' \ - -H 'accept: application/json' \ - -H 'Content-Type: application/json' \ - -d '{ - "model": "BAAI/bge-reranker-v2-m3", - "encoding_format": "float", - "text_1": "What is the capital of France?", - "text_2": "The capital of France is Paris." -}' -``` +Most chat templates for LLMs expect the `content` field to be a string, but there are some newer models like +`meta-llama/Llama-Guard-3-1B` that expect the content to be formatted according to the OpenAI schema in the +request. vLLM provides best-effort support to detect this automatically, which is logged as a string like +*"Detected the chat template content format to be..."*, and internally converts incoming requests to match +the detected format, which can be one of: -Response: +- `"string"`: A string. + - Example: `"Hello world"` +- `"openai"`: A list of dictionaries, similar to OpenAI schema. + - Example: `[{"type": "text", "text": "Hello world!"}]` -```bash -{ - "id": "score-request-id", - "object": "list", - "created": 693447, - "model": "BAAI/bge-reranker-v2-m3", - "data": [ - { - "index": 0, - "object": "score", - "score": [ - 1 - ] - } - ], - "usage": {} -} -``` +If the result is not what you expect, you can set the `--chat-template-content-format` CLI argument +to override which format to use. ## Extra Parameters @@ -204,7 +109,7 @@ completion = client.chat.completions.create( ) ``` -### Extra HTTP Headers +## Extra HTTP Headers Only `X-Request-Id` HTTP request header is supported for now. @@ -230,7 +135,53 @@ completion = client.completions.create( print(completion._request_id) ``` -### Extra Parameters for Completions API +## CLI Reference + +(vllm-serve)= +### `vllm serve` + +The `vllm serve` command is used to launch the OpenAI-compatible server. + +```{argparse} +:module: vllm.entrypoints.openai.cli_args +:func: create_parser_for_docs +:prog: vllm serve +``` + +#### Configuration file + +You can load CLI arguments via a [YAML](https://yaml.org/) config file. +The argument names must be the long form of those outlined [above](#vllm-serve). + +For example: + +```yaml +# config.yaml + +host: "127.0.0.1" +port: 6379 +uvicorn-log-level: "info" +``` + +To use the above config file: + +```bash +$ vllm serve SOME_MODEL --config config.yaml +``` + +```{note} +In case an argument is supplied simultaneously using command line and the config file, the value from the command line will take precedence. +The order of priorities is `command line > config file values > defaults`. +``` + +## API Reference + +(completions-api)= +### Completions API + +Refer to [OpenAI's API reference](https://platform.openai.com/docs/api-reference/completions) for more details. + +#### Extra parameters The following [sampling parameters (click through to see documentation)](../dev/sampling_params.rst) are supported. @@ -248,7 +199,17 @@ The following extra parameters are supported: :end-before: end-completion-extra-params ``` -### Extra Parameters for Chat Completions API +(chat-api)= +### Chat Completions API + +Refer to [OpenAI's API reference](https://platform.openai.com/docs/api-reference/chat) for more details. + +We support both [Vision](https://platform.openai.com/docs/guides/vision)- and +[Audio](https://platform.openai.com/docs/guides/audio?audio-generation-quickstart-example=audio-in)-related parameters; +see our [Multimodal Inputs](../usage/multimodal_inputs.rst) guide for more information. +- *Note: `image_url.detail` parameter is not supported.* + +#### Extra parameters The following [sampling parameters (click through to see documentation)](../dev/sampling_params.rst) are supported. @@ -266,7 +227,19 @@ The following extra parameters are supported: :end-before: end-chat-completion-extra-params ``` -### Extra Parameters for Embeddings API +(embeddings-api)= +### Embeddings API + +Refer to [OpenAI's API reference](https://platform.openai.com/docs/api-reference/embeddings) for more details. + +If the model has a [chat template](#chat-template), you can replace `inputs` with a list of `messages` (same schema as [Chat Completions API](#chat-api)) +which will be treated as a single prompt to the model. + +```{tip} +This enables multi-modal inputs to be passed to embedding models, see [this page](../usage/multimodal_inputs.rst) for details. +``` + +#### Extra parameters The following [pooling parameters (click through to see documentation)](../dev/pooling_params.rst) are supported. @@ -276,7 +249,7 @@ The following [pooling parameters (click through to see documentation)](../dev/p :end-before: end-embedding-pooling-params ``` -The following extra parameters are supported: +The following extra parameters are supported by default: ```{literalinclude} ../../../vllm/entrypoints/openai/protocol.py :language: python @@ -284,297 +257,179 @@ The following extra parameters are supported: :end-before: end-embedding-extra-params ``` -## Chat Template - -In order for the language model to support chat protocol, vLLM requires the model to include -a chat template in its tokenizer configuration. The chat template is a Jinja2 template that -specifies how are roles, messages, and other chat-specific tokens are encoded in the input. - -An example chat template for `NousResearch/Meta-Llama-3-8B-Instruct` can be found [here](https://github.com/meta-llama/llama3?tab=readme-ov-file#instruction-tuned-models) - -Some models do not provide a chat template even though they are instruction/chat fine-tuned. For those model, -you can manually specify their chat template in the `--chat-template` parameter with the file path to the chat -template, or the template in string form. Without a chat template, the server will not be able to process chat -and all chat requests will error. - -```bash -vllm serve --chat-template ./path-to-chat-template.jinja -``` - -vLLM community provides a set of chat templates for popular models. You can find them in the examples -directory [here](https://github.com/vllm-project/vllm/tree/main/examples/) +For chat-like input (i.e. if `messages` is passed), these extra parameters are supported instead: -With the inclusion of multi-modal chat APIs, the OpenAI spec now accepts chat messages in a new format which specifies -both a `type` and a `text` field. An example is provided below: -```python -completion = client.chat.completions.create( - model="NousResearch/Meta-Llama-3-8B-Instruct", - messages=[ - {"role": "user", "content": [{"type": "text", "text": "Classify this sentiment: vLLM is wonderful!"}]} - ] -) +```{literalinclude} ../../../vllm/entrypoints/openai/protocol.py +:language: python +:start-after: begin-chat-embedding-extra-params +:end-before: end-chat-embedding-extra-params ``` -Most chat templates for LLMs expect the `content` field to be a string, but there are some newer models like -`meta-llama/Llama-Guard-3-1B` that expect the content to be formatted according to the OpenAI schema in the -request. vLLM provides best-effort support to detect this automatically, which is logged as a string like -*"Detected the chat template content format to be..."*, and internally converts incoming requests to match -the detected format, which can be one of: - -- `"string"`: A string. - - Example: `"Hello world"` -- `"openai"`: A list of dictionaries, similar to OpenAI schema. - - Example: `[{"type": "text", "text": "Hello world!"}]` - -If the result is not what you expect, you can set the `--chat-template-content-format` CLI argument -to override which format to use. +(tokenizer-api)= +### Tokenizer API -## Command line arguments for the server +The Tokenizer API is a simple wrapper over [HuggingFace-style tokenizers](https://huggingface.co/docs/transformers/en/main_classes/tokenizer). +It consists of two endpoints: -```{argparse} -:module: vllm.entrypoints.openai.cli_args -:func: create_parser_for_docs -:prog: vllm serve -``` +- `/tokenize` corresponds to calling `tokenizer.encode()`. +- `/detokenize` corresponds to calling `tokenizer.decode()`. +(score-api)= +### Score API -### Config file +The Score API applies a cross-encoder model to predict scores for sentence pairs. +Usually, the score for a sentence pair refers to the similarity between two sentences, on a scale of 0 to 1. -The `serve` module can also accept arguments from a config file in -`yaml` format. The arguments in the yaml must be specified using the -long form of the argument outlined [here](https://docs.vllm.ai/en/latest/serving/openai_compatible_server.html#command-line-arguments-for-the-server): +You can find the documentation for these kind of models at [sbert.net](https://www.sbert.net/docs/package_reference/cross_encoder/cross_encoder.html). -For example: +#### Single inference -```yaml -# config.yaml +You can pass a string to both `text_1` and `text_2`, forming a single sentence pair. -host: "127.0.0.1" -port: 6379 -uvicorn-log-level: "info" -``` +Request: ```bash -$ vllm serve SOME_MODEL --config config.yaml +curl -X 'POST' \ + 'http://127.0.0.1:8000/score' \ + -H 'accept: application/json' \ + -H 'Content-Type: application/json' \ + -d '{ + "model": "BAAI/bge-reranker-v2-m3", + "encoding_format": "float", + "text_1": "What is the capital of France?", + "text_2": "The capital of France is Paris." +}' ``` ---- -**NOTE** -In case an argument is supplied simultaneously using command line and the config file, the value from the commandline will take precedence. -The order of priorities is `command line > config file values > defaults`. - ---- - -## Tool calling in the chat completion API -vLLM currently supports named function calling, as well as the `auto` and `none` options for the `tool_choice` field in the chat completion API. The `tool_choice` option `required` is **not yet supported** but on the roadmap. - -It is the callers responsibility to prompt the model with the tool information, vLLM will not automatically manipulate the prompt. -Please see below for recommended configuration and chat templates to use when function calling is to be used with the different models. - - -### Named Function Calling -vLLM supports named function calling in the chat completion API by default. It does so using Outlines, so this is -enabled by default, and will work with any supported model. You are guaranteed a validly-parsable function call - not a -high-quality one. - -vLLM will use guided decoding to ensure the response matches the tool parameter object defined by the JSON schema in the `tools` parameter. - -To use a named function, you need to define the functions in the `tools` parameter of the chat completion request, and -specify the `name` of one of the tools in the `tool_choice` parameter of the chat completion request. - - -### Automatic Function Calling -To enable this feature, you should set the following flags: -* `--enable-auto-tool-choice` -- **mandatory** Auto tool choice. tells vLLM that you want to enable the model to generate its own tool calls when it -deems appropriate. -* `--tool-call-parser` -- select the tool parser to use (listed below). Additional tool parsers -will continue to be added in the future, and also can register your own tool parsers in the `--tool-parser-plugin`. -* `--tool-parser-plugin` -- **optional** tool parser plugin used to register user defined tool parsers into vllm, the registered tool parser name can be specified in `--tool-call-parser`. -* `--chat-template` -- **optional** for auto tool choice. the path to the chat template which handles `tool`-role messages and `assistant`-role messages -that contain previously generated tool calls. Hermes, Mistral and Llama models have tool-compatible chat templates in their -`tokenizer_config.json` files, but you can specify a custom template. This argument can be set to `tool_use` if your model has a tool use-specific chat -template configured in the `tokenizer_config.json`. In this case, it will be used per the `transformers` specification. More on this [here](https://huggingface.co/docs/transformers/en/chat_templating#why-do-some-models-have-multiple-templates) -from HuggingFace; and you can find an example of this in a `tokenizer_config.json` [here](https://huggingface.co/NousResearch/Hermes-2-Pro-Llama-3-8B/blob/main/tokenizer_config.json) - -If your favorite tool-calling model is not supported, please feel free to contribute a parser & tool use chat template! - - -#### Hermes Models (`hermes`) - -All Nous Research Hermes-series models newer than Hermes 2 Pro should be supported. -* `NousResearch/Hermes-2-Pro-*` -* `NousResearch/Hermes-2-Theta-*` -* `NousResearch/Hermes-3-*` - - -_Note that the Hermes 2 **Theta** models are known to have degraded tool call quality & capabilities due to the merge -step in their creation_. - -Flags: `--tool-call-parser hermes` - - -#### Mistral Models (`mistral`) - -Supported models: -* `mistralai/Mistral-7B-Instruct-v0.3` (confirmed) -* Additional mistral function-calling models are compatible as well. - -Known issues: -1. Mistral 7B struggles to generate parallel tool calls correctly. -2. Mistral's `tokenizer_config.json` chat template requires tool call IDs that are exactly 9 digits, which is -much shorter than what vLLM generates. Since an exception is thrown when this condition -is not met, the following additional chat templates are provided: - -* `examples/tool_chat_template_mistral.jinja` - this is the "official" Mistral chat template, but tweaked so that -it works with vLLM's tool call IDs (provided `tool_call_id` fields are truncated to the last 9 digits) -* `examples/tool_chat_template_mistral_parallel.jinja` - this is a "better" version that adds a tool-use system prompt -when tools are provided, that results in much better reliability when working with parallel tool calling. - - -Recommended flags: `--tool-call-parser mistral --chat-template examples/tool_chat_template_mistral_parallel.jinja` - - -#### Llama Models (`llama3_json`) - -Supported models: -* `meta-llama/Meta-Llama-3.1-8B-Instruct` -* `meta-llama/Meta-Llama-3.1-70B-Instruct` -* `meta-llama/Meta-Llama-3.1-405B-Instruct` -* `meta-llama/Meta-Llama-3.1-405B-Instruct-FP8` - -The tool calling that is supported is the [JSON based tool calling](https://llama.meta.com/docs/model-cards-and-prompt-formats/llama3_1/#json-based-tool-calling). For [pythonic tool calling](https://github.com/meta-llama/llama-models/blob/main/models/llama3_2/text_prompt_format.md#zero-shot-function-calling) in Llama-3.2 models, see the `pythonic` tool parser below. -Other tool calling formats like the built in python tool calling or custom tool calling are not supported. - -Known issues: -1. Parallel tool calls are not supported. -2. The model can generate parameters with a wrong format, such as generating - an array serialized as string instead of an array. - -The `tool_chat_template_llama3_json.jinja` file contains the "official" Llama chat template, but tweaked so that -it works better with vLLM. - -Recommended flags: `--tool-call-parser llama3_json --chat-template examples/tool_chat_template_llama3_json.jinja` - -#### IBM Granite - -Supported models: -* `ibm-granite/granite-3.0-8b-instruct` - -Recommended flags: `--tool-call-parser granite --chat-template examples/tool_chat_template_granite.jinja` - -`examples/tool_chat_template_granite.jinja`: this is a modified chat template from the original on Huggingface. Parallel function calls are supported. - -* `ibm-granite/granite-20b-functioncalling` - -Recommended flags: `--tool-call-parser granite-20b-fc --chat-template examples/tool_chat_template_granite_20b_fc.jinja` - -`examples/tool_chat_template_granite_20b_fc.jinja`: this is a modified chat template from the original on Huggingface, which is not vLLM compatible. It blends function description elements from the Hermes template and follows the same system prompt as "Response Generation" mode from [the paper](https://arxiv.org/abs/2407.00121). Parallel function calls are supported. - - -#### InternLM Models (`internlm`) - -Supported models: -* `internlm/internlm2_5-7b-chat` (confirmed) -* Additional internlm2.5 function-calling models are compatible as well - -Known issues: -* Although this implementation also supports InternLM2, the tool call results are not stable when testing with the `internlm/internlm2-chat-7b` model. - -Recommended flags: `--tool-call-parser internlm --chat-template examples/tool_chat_template_internlm2_tool.jinja` - - -#### Jamba Models (`jamba`) -AI21's Jamba-1.5 models are supported. -* `ai21labs/AI21-Jamba-1.5-Mini` -* `ai21labs/AI21-Jamba-1.5-Large` +Response: -Flags: `--tool-call-parser jamba` +```bash +{ + "id": "score-request-id", + "object": "list", + "created": 693447, + "model": "BAAI/bge-reranker-v2-m3", + "data": [ + { + "index": 0, + "object": "score", + "score": 1 + } + ], + "usage": {} +} +``` +#### Batch inference -#### Models with Pythonic Tool Calls (`pythonic`) +You can pass a string to `text_1` and a list to `text_2`, forming multiple sentence pairs +where each pair is built from `text_1` and a string in `text_2`. +The total number of pairs is `len(text_2)`. -A growing number of models output a python list to represent tool calls instead of using JSON. This has the advantage of inherently supporting parallel tool calls and removing ambiguity around the JSON schema required for tool calls. The `pythonic` tool parser can support such models. +Request: -As a concrete example, these models may look up the weather in San Francisco and Seattle by generating: -```python -[get_weather(city='San Francisco', metric='celsius'), get_weather(city='Seattle', metric='celsius')] +```bash +curl -X 'POST' \ + 'http://127.0.0.1:8000/score' \ + -H 'accept: application/json' \ + -H 'Content-Type: application/json' \ + -d '{ + "model": "BAAI/bge-reranker-v2-m3", + "text_1": "What is the capital of France?", + "text_2": [ + "The capital of Brazil is Brasilia.", + "The capital of France is Paris." + ] +}' ``` -Limitations: -* The model must not generate both text and tool calls in the same generation. This may not be hard to change for a specific model, but the community currently lacks consensus on which tokens to emit when starting and ending tool calls. (In particular, the Llama 3.2 models emit no such tokens.) -* Llama's smaller models struggle to use tools effectively. - -Example supported models: -* `meta-llama/Llama-3.2-1B-Instruct`\* (use with `examples/tool_chat_template_llama3.2_pythonic.jinja`) -* `meta-llama/Llama-3.2-3B-Instruct`\* (use with `examples/tool_chat_template_llama3.2_pythonic.jinja`) -* `Team-ACE/ToolACE-8B` (use with `examples/tool_chat_template_toolace.jinja`) -* `fixie-ai/ultravox-v0_4-ToolACE-8B` (use with `examples/tool_chat_template_toolace.jinja`) - -Flags: `--tool-call-parser pythonic --chat-template {see_above}` - ---- -**WARNING** -Llama's smaller models frequently fail to emit tool calls in the correct format. Your mileage may vary. +Response: ---- +```bash +{ + "id": "score-request-id", + "object": "list", + "created": 693570, + "model": "BAAI/bge-reranker-v2-m3", + "data": [ + { + "index": 0, + "object": "score", + "score": 0.001094818115234375 + }, + { + "index": 1, + "object": "score", + "score": 1 + } + ], + "usage": {} +} +``` +You can pass a list to both `text_1` and `text_2`, forming multiple sentence pairs +where each pair is built from a string in `text_1` and the corresponding string in `text_2` (similar to `zip()`). +The total number of pairs is `len(text_2)`. -### How to write a tool parser plugin +Request: -A tool parser plugin is a Python file containing one or more ToolParser implementations. You can write a ToolParser similar to the `Hermes2ProToolParser` in vllm/entrypoints/openai/tool_parsers/hermes_tool_parser.py. +```bash +curl -X 'POST' \ + 'http://127.0.0.1:8000/score' \ + -H 'accept: application/json' \ + -H 'Content-Type: application/json' \ + -d '{ + "model": "BAAI/bge-reranker-v2-m3", + "encoding_format": "float", + "text_1": [ + "What is the capital of Brazil?", + "What is the capital of France?" + ], + "text_2": [ + "The capital of Brazil is Brasilia.", + "The capital of France is Paris." + ] +}' +``` -Here is a summary of a plugin file: +Response: -```python +```bash +{ + "id": "score-request-id", + "object": "list", + "created": 693447, + "model": "BAAI/bge-reranker-v2-m3", + "data": [ + { + "index": 0, + "object": "score", + "score": 1 + }, + { + "index": 1, + "object": "score", + "score": 1 + } + ], + "usage": {} +} +``` -# import the required packages - -# define a tool parser and register it to vllm -# the name list in register_module can be used -# in --tool-call-parser. you can define as many -# tool parsers as you want here. -@ToolParserManager.register_module(["example"]) -class ExampleToolParser(ToolParser): - def __init__(self, tokenizer: AnyTokenizer): - super().__init__(tokenizer) - - # adjust request. e.g.: set skip special tokens - # to False for tool call output. - def adjust_request( - self, request: ChatCompletionRequest) -> ChatCompletionRequest: - return request - - # implement the tool call parse for stream call - def extract_tool_calls_streaming( - self, - previous_text: str, - current_text: str, - delta_text: str, - previous_token_ids: Sequence[int], - current_token_ids: Sequence[int], - delta_token_ids: Sequence[int], - request: ChatCompletionRequest, - ) -> Union[DeltaMessage, None]: - return delta - - # implement the tool parse for non-stream call - def extract_tool_calls( - self, - model_output: str, - request: ChatCompletionRequest, - ) -> ExtractedToolCallInformation: - return ExtractedToolCallInformation(tools_called=False, - tool_calls=[], - content=text) +#### Extra parameters +The following [pooling parameters (click through to see documentation)](../dev/pooling_params.rst) are supported. +```{literalinclude} ../../../vllm/entrypoints/openai/protocol.py +:language: python +:start-after: begin-score-pooling-params +:end-before: end-score-pooling-params ``` -Then you can use this plugin in the command line like this. -``` - --enable-auto-tool-choice \ - --tool-parser-plugin - --tool-call-parser example \ - --chat-template \ -``` +The following extra parameters are supported: +```{literalinclude} ../../../vllm/entrypoints/openai/protocol.py +:language: python +:start-after: begin-score-extra-params +:end-before: end-score-extra-params +``` diff --git a/docs/source/serving/serving_with_llamastack.rst b/docs/source/serving/serving_with_llamastack.rst index 8ef96c4e54369..a2acd7b39f887 100644 --- a/docs/source/serving/serving_with_llamastack.rst +++ b/docs/source/serving/serving_with_llamastack.rst @@ -24,7 +24,7 @@ Then start Llama Stack server pointing to your vLLM server with the following co config: url: http://127.0.0.1:8000 -Please refer to `this guide `_ for more details on this remote vLLM provider. +Please refer to `this guide `_ for more details on this remote vLLM provider. Inference via Embedded vLLM --------------------------- diff --git a/docs/source/serving/compatibility_matrix.rst b/docs/source/usage/compatibility_matrix.rst similarity index 96% rename from docs/source/serving/compatibility_matrix.rst rename to docs/source/usage/compatibility_matrix.rst index a93632ff36fb8..04dd72b1e3527 100644 --- a/docs/source/serving/compatibility_matrix.rst +++ b/docs/source/usage/compatibility_matrix.rst @@ -39,13 +39,13 @@ Feature x Feature - :abbr:`prmpt adptr (Prompt Adapter)` - :ref:`SD ` - CUDA graph - - :abbr:`emd (Embedding Models)` + - :abbr:`pooling (Pooling Models)` - :abbr:`enc-dec (Encoder-Decoder Models)` - :abbr:`logP (Logprobs)` - :abbr:`prmpt logP (Prompt Logprobs)` - :abbr:`async output (Async Output Processing)` - multi-step - - :abbr:`mm (Multimodal)` + - :abbr:`mm (Multimodal Inputs)` - best-of - beam-search - :abbr:`guided dec (Guided Decoding)` @@ -151,7 +151,7 @@ Feature x Feature - - - - * - :abbr:`emd (Embedding Models)` + * - :abbr:`pooling (Pooling Models)` - ✗ - ✗ - ✗ @@ -253,7 +253,7 @@ Feature x Feature - - - - * - :abbr:`mm (Multimodal)` + * - :abbr:`mm (Multimodal Inputs)` - ✅ - `✗ `__ - `✗ `__ @@ -386,7 +386,7 @@ Feature x Hardware - ✅ - ✗ - ✅ - * - :abbr:`emd (Embedding Models)` + * - :abbr:`pooling (Pooling Models)` - ✅ - ✅ - ✅ @@ -402,7 +402,7 @@ Feature x Hardware - ✅ - ✅ - ✗ - * - :abbr:`mm (Multimodal)` + * - :abbr:`mm (Multimodal Inputs)` - ✅ - ✅ - ✅ diff --git a/docs/source/usage/disagg_prefill.rst b/docs/source/usage/disagg_prefill.rst new file mode 100644 index 0000000000000..9fe714b4fd856 --- /dev/null +++ b/docs/source/usage/disagg_prefill.rst @@ -0,0 +1,69 @@ +.. _disagg_prefill: + +Disaggregated prefilling (experimental) +======================================= + +This page introduces you the disaggregated prefilling feature in vLLM. This feature is experimental and subject to change. + +Why disaggregated prefilling? +----------------------------- + +Two main reasons: + +* **Tuning time-to-first-token (TTFT) and inter-token-latency (ITL) separately**. Disaggregated prefilling put prefill and decode phase of LLM inference inside different vLLM instances. This gives you the flexibility to assign different parallel strategies (e.g. ``tp`` and ``pp``) to tune TTFT without affecting ITL, or to tune ITL without affecting TTFT. +* **Controlling tail ITL**. Without disaggregated prefilling, vLLM may insert some prefill jobs during the decoding of one request. This results in higher tail latency. Disaggregated prefilling helps you solve this issue and control tail ITL. Chunked prefill with a proper chunk size also can achieve the same goal, but in practice it's hard to figure out the correct chunk size value. So disaggregated prefilling is a much more reliable way to control tail ITL. + +.. note:: + Disaggregated prefill DOES NOT improve throughput. + +Usage example +------------- + +Please refer to ``examples/disaggregated_prefill.sh`` for the example usage of disaggregated prefilling. + + +Benchmarks +---------- + +Please refer to ``benchmarks/disagg_benchmarks/`` for disaggregated prefilling benchmarks. + + +Development +----------- + +We implement disaggregated prefilling by running 2 vLLM instances. One for prefill (we call it prefill instance) and one for decode (we call it decode instance), and then use a connector to transfer the prefill KV caches and results from prefill instance to decode instance. + +All disaggregated prefilling implementation is under ``vllm/distributed/kv_transfer``. + +Key abstractions for disaggregated prefilling: + +* **Connector**: Connector allows **kv consumer** to retrieve the KV caches of a batch of request from **kv producer**. +* **LookupBuffer**: LookupBuffer provides two API: ``insert`` KV cache and ``drop_select`` KV cache. The semantics of ``insert`` and ``drop_select`` are similar to SQL, where ``insert`` inserts a KV cache into the buffer, and ``drop_select`` returns the KV cache that matches the given condition and drop it from the buffer. +* **Pipe**: A single-direction FIFO pipe for tensor transmission. It supports ``send_tensor`` and ``recv_tensor``. + +.. note:: + ``insert`` is non-blocking operation but ``drop_select`` is blocking operation. + +Here is a figure illustrating how the above 3 abstractions are organized: + +.. image:: /assets/usage/disagg_prefill/abstraction.jpg + :alt: Disaggregated prefilling abstractions + +The workflow of disaggregated prefilling is as follows: + +.. image:: /assets/usage/disagg_prefill/overview.jpg + :alt: Disaggregated prefilling workflow + +The ``buffer`` corresponds to ``insert`` API in LookupBuffer, and the ``drop_select`` corresponds to ``drop_select`` API in LookupBuffer. + + +Third-party contributions +------------------------- + +Disaggregated prefilling is highly related to infrastructure, so vLLM relies on third-party connectors for production-level disaggregated prefilling (and vLLM team will actively review and merge new PRs for third-party connectors). + +We recommend three ways of implementations: + +* **Fully-customized connector**: Implement your own ``Connector``, and call third-party libraries to send and receive KV caches, and many many more (like editing vLLM's model input to perform customized prefilling, etc). This approach gives you the most control, but at the risk of being incompatible with future vLLM versions. +* **Database-like connector**: Implement your own ``LookupBuffer`` and support the ``insert`` and ``drop_select`` APIs just like SQL. +* **Distributed P2P connector**: Implement your own ``Pipe`` and support the ``send_tensor`` and ``recv_tensor`` APIs, just like `torch.distributed`. diff --git a/docs/source/models/engine_args.rst b/docs/source/usage/engine_args.rst similarity index 100% rename from docs/source/models/engine_args.rst rename to docs/source/usage/engine_args.rst diff --git a/docs/source/serving/env_vars.rst b/docs/source/usage/env_vars.rst similarity index 100% rename from docs/source/serving/env_vars.rst rename to docs/source/usage/env_vars.rst diff --git a/docs/source/serving/faq.rst b/docs/source/usage/faq.rst similarity index 76% rename from docs/source/serving/faq.rst rename to docs/source/usage/faq.rst index 9e858e612c8bf..d88da32092924 100644 --- a/docs/source/serving/faq.rst +++ b/docs/source/usage/faq.rst @@ -1,3 +1,5 @@ +.. _faq: + Frequently Asked Questions =========================== @@ -9,7 +11,12 @@ A: Assuming that you're referring to using OpenAI compatible server to serve mul Q: Which model to use for offline inference embedding? -A: If you want to use an embedding model, try: https://huggingface.co/intfloat/e5-mistral-7b-instruct. Instead models, such as Llama-3-8b, Mistral-7B-Instruct-v0.3, are generation models rather than an embedding model +A: You can try `e5-mistral-7b-instruct `__ and `BAAI/bge-base-en-v1.5 `__; +more are listed :ref:`here `. + +By extracting hidden states, vLLM can automatically convert text generation models like `Llama-3-8B `__, +`Mistral-7B-Instruct-v0.3 `__ into embedding models, +but they are expected be inferior to models that are specifically trained on embedding tasks. ---------------------------------------- diff --git a/docs/source/models/lora.rst b/docs/source/usage/lora.rst similarity index 99% rename from docs/source/models/lora.rst rename to docs/source/usage/lora.rst index ef0177eaf2162..c2c6fa2aebfaf 100644 --- a/docs/source/models/lora.rst +++ b/docs/source/usage/lora.rst @@ -1,7 +1,7 @@ .. _lora: -Using LoRA adapters -=================== +LoRA Adapters +============= This document shows you how to use `LoRA adapters `_ with vLLM on top of a base model. diff --git a/docs/source/models/vlm.rst b/docs/source/usage/multimodal_inputs.rst similarity index 52% rename from docs/source/models/vlm.rst rename to docs/source/usage/multimodal_inputs.rst index bcbe50a25fa09..680382e457cc5 100644 --- a/docs/source/models/vlm.rst +++ b/docs/source/usage/multimodal_inputs.rst @@ -1,34 +1,31 @@ -.. _vlm: +.. _multimodal_inputs: -Using VLMs -========== +Multimodal Inputs +================= -vLLM provides experimental support for Vision Language Models (VLMs). See the :ref:`list of supported VLMs here `. -This document shows you how to run and serve these models using vLLM. +This page teaches you how to pass multi-modal inputs to :ref:`multi-modal models ` in vLLM. .. note:: - We are actively iterating on VLM support. See `this RFC `_ for upcoming changes, + We are actively iterating on multi-modal support. See `this RFC `_ for upcoming changes, and `open an issue on GitHub `_ if you have any feedback or feature requests. Offline Inference ----------------- -Single-image input -^^^^^^^^^^^^^^^^^^ - -The :class:`~vllm.LLM` class can be instantiated in much the same way as language-only models. - -.. code-block:: python - - llm = LLM(model="llava-hf/llava-1.5-7b-hf") - -To pass an image to the model, note the following in :class:`vllm.inputs.PromptType`: +To input multi-modal data, follow this schema in :class:`vllm.inputs.PromptType`: * ``prompt``: The prompt should follow the format that is documented on HuggingFace. * ``multi_modal_data``: This is a dictionary that follows the schema defined in :class:`vllm.multimodal.MultiModalDataDict`. +Image +^^^^^ + +You can pass a single image to the :code:`'image'` field of the multi-modal dictionary, as shown in the following examples: + .. code-block:: python + llm = LLM(model="llava-hf/llava-1.5-7b-hf") + # Refer to the HuggingFace repo for the correct format to use prompt = "USER: \nWhat is the content of this image?\nASSISTANT:" @@ -41,41 +38,6 @@ To pass an image to the model, note the following in :class:`vllm.inputs.PromptT "multi_modal_data": {"image": image}, }) - for o in outputs: - generated_text = o.outputs[0].text - print(generated_text) - - # Inference with image embeddings as input - image_embeds = torch.load(...) # torch.Tensor of shape (1, image_feature_size, hidden_size of LM) - outputs = llm.generate({ - "prompt": prompt, - "multi_modal_data": {"image": image_embeds}, - }) - - for o in outputs: - generated_text = o.outputs[0].text - print(generated_text) - - # Inference with image embeddings as input with additional parameters - # Specifically, we are conducting a trial run of Qwen2VL and MiniCPM-V with the new input format, which utilizes additional parameters. - mm_data = {} - - image_embeds = torch.load(...) # torch.Tensor of shape (num_images, image_feature_size, hidden_size of LM) - # For Qwen2VL, image_grid_thw is needed to calculate positional encoding. - mm_data['image'] = { - "image_embeds": image_embeds, - "image_grid_thw": torch.load(...) # torch.Tensor of shape (1, 3), - } - # For MiniCPM-V, image_size_list is needed to calculate details of the sliced image. - mm_data['image'] = { - "image_embeds": image_embeds, - "image_size_list": [image.size] # list of image sizes - } - outputs = llm.generate({ - "prompt": prompt, - "multi_modal_data": mm_data, - }) - for o in outputs: generated_text = o.outputs[0].text print(generated_text) @@ -102,12 +64,7 @@ To pass an image to the model, note the following in :class:`vllm.inputs.PromptT A code example can be found in `examples/offline_inference_vision_language.py `_. -Multi-image input -^^^^^^^^^^^^^^^^^ - -Multi-image input is only supported for a subset of VLMs, as shown :ref:`here `. - -To enable multiple multi-modal items per text prompt, you have to set ``limit_mm_per_prompt`` for the :class:`~vllm.LLM` class. +To substitute multiple images inside the same text prompt, you can pass in a list of images instead: .. code-block:: python @@ -118,10 +75,6 @@ To enable multiple multi-modal items per text prompt, you have to set ``limit_mm limit_mm_per_prompt={"image": 2}, # The maximum number to accept ) -Instead of passing in a single image, you can pass in a list of images. - -.. code-block:: python - # Refer to the HuggingFace repo for the correct format to use prompt = "<|user|>\n<|image_1|>\n<|image_2|>\nWhat is the content of each image?<|end|>\n<|assistant|>\n" @@ -169,30 +122,114 @@ Multi-image input can be extended to perform video captioning. We show this with generated_text = o.outputs[0].text print(generated_text) +Video +^^^^^ + +You can pass a list of NumPy arrays directly to the :code:`'video'` field of the multi-modal dictionary +instead of using multi-image input. + +Please refer to `examples/offline_inference_vision_language.py `_ for more details. + +Audio +^^^^^ + +You can pass a tuple :code:`(array, sampling_rate)` to the :code:`'audio'` field of the multi-modal dictionary. + +Please refer to `examples/offline_inference_audio_language.py `_ for more details. + +Embedding +^^^^^^^^^ + +To input pre-computed embeddings belonging to a data type (i.e. image, video, or audio) directly to the language model, +pass a tensor of shape :code:`(num_items, feature_size, hidden_size of LM)` to the corresponding field of the multi-modal dictionary. + +.. code-block:: python + + # Inference with image embeddings as input + llm = LLM(model="llava-hf/llava-1.5-7b-hf") + + # Refer to the HuggingFace repo for the correct format to use + prompt = "USER: \nWhat is the content of this image?\nASSISTANT:" + + # Embeddings for single image + # torch.Tensor of shape (1, image_feature_size, hidden_size of LM) + image_embeds = torch.load(...) + + outputs = llm.generate({ + "prompt": prompt, + "multi_modal_data": {"image": image_embeds}, + }) + + for o in outputs: + generated_text = o.outputs[0].text + print(generated_text) + +For Qwen2-VL and MiniCPM-V, we accept additional parameters alongside the embeddings: + +.. code-block:: python + + # Construct the prompt based on your model + prompt = ... + + # Embeddings for multiple images + # torch.Tensor of shape (num_images, image_feature_size, hidden_size of LM) + image_embeds = torch.load(...) + + # Qwen2-VL + llm = LLM("Qwen/Qwen2-VL-2B-Instruct", limit_mm_per_prompt={"image": 4}) + mm_data = { + "image": { + "image_embeds": image_embeds, + # image_grid_thw is needed to calculate positional encoding. + "image_grid_thw": torch.load(...), # torch.Tensor of shape (1, 3), + } + } + + # MiniCPM-V + llm = LLM("openbmb/MiniCPM-V-2_6", trust_remote_code=True, limit_mm_per_prompt={"image": 4}) + mm_data = { + "image": { + "image_embeds": image_embeds, + # image_size_list is needed to calculate details of the sliced image. + "image_size_list": [image.size for image in images], # list of image sizes + } + } + + outputs = llm.generate({ + "prompt": prompt, + "multi_modal_data": mm_data, + }) + + for o in outputs: + generated_text = o.outputs[0].text + print(generated_text) + Online Inference ---------------- -OpenAI Vision API -^^^^^^^^^^^^^^^^^ +Our OpenAI-compatible server accepts multi-modal data via the `Chat Completions API `_. + +.. important:: + A chat template is **required** to use Chat Completions API. + + Although most models come with a chat template, for others you have to define one yourself. + The chat template can be inferred based on the documentation on the model's HuggingFace repo. + For example, LLaVA-1.5 (``llava-hf/llava-1.5-7b-hf``) requires a chat template that can be found `here `__. + +Image +^^^^^ -You can serve vision language models with vLLM's HTTP server that is compatible with `OpenAI Vision API `_. +Image input is supported according to `OpenAI Vision API `_. +Here is a simple example using Phi-3.5-Vision. -Below is an example on how to launch the same ``microsoft/Phi-3.5-vision-instruct`` with vLLM's OpenAI-compatible API server. +First, launch the OpenAI-compatible server: .. code-block:: bash vllm serve microsoft/Phi-3.5-vision-instruct --task generate \ --trust-remote-code --max-model-len 4096 --limit-mm-per-prompt image=2 -.. important:: - Since OpenAI Vision API is based on `Chat Completions API `_, - a chat template is **required** to launch the API server. - - Although Phi-3.5-Vision comes with a chat template, for other models you may have to provide one if the model's tokenizer does not come with it. - The chat template can be inferred based on the documentation on the model's HuggingFace repo. - For example, LLaVA-1.5 (``llava-hf/llava-1.5-7b-hf``) requires a chat template that can be found `here `_. - -To consume the server, you can use the OpenAI client like in the example below: +Then, you can use the OpenAI client as follows: .. code-block:: python @@ -252,37 +289,160 @@ A full code example can be found in `examples/openai_chat_completion_client_for_ .. note:: - By default, the timeout for fetching images through http url is ``5`` seconds. You can override this by setting the environment variable: + By default, the timeout for fetching images through HTTP URL is ``5`` seconds. + You can override this by setting the environment variable: .. code-block:: console $ export VLLM_IMAGE_FETCH_TIMEOUT= -Chat Embeddings API -^^^^^^^^^^^^^^^^^^^ +Video +^^^^^ + +Instead of :code:`image_url`, you can pass a video file via :code:`video_url`. + +You can use `these tests `_ as reference. + +.. note:: + + By default, the timeout for fetching videos through HTTP URL url is ``30`` seconds. + You can override this by setting the environment variable: + + .. code-block:: console + + $ export VLLM_VIDEO_FETCH_TIMEOUT= + +Audio +^^^^^ + +Audio input is supported according to `OpenAI Audio API `_. +Here is a simple example using Ultravox-v0.3. + +First, launch the OpenAI-compatible server: + +.. code-block:: bash + + vllm serve fixie-ai/ultravox-v0_3 + +Then, you can use the OpenAI client as follows: + +.. code-block:: python + + import base64 + import requests + from openai import OpenAI + from vllm.assets.audio import AudioAsset + + def encode_base64_content_from_url(content_url: str) -> str: + """Encode a content retrieved from a remote url to base64 format.""" + + with requests.get(content_url) as response: + response.raise_for_status() + result = base64.b64encode(response.content).decode('utf-8') + + return result + + openai_api_key = "EMPTY" + openai_api_base = "http://localhost:8000/v1" + + client = OpenAI( + api_key=openai_api_key, + base_url=openai_api_base, + ) + + # Any format supported by librosa is supported + audio_url = AudioAsset("winning_call").url + audio_base64 = encode_base64_content_from_url(audio_url) + + chat_completion_from_base64 = client.chat.completions.create( + messages=[{ + "role": "user", + "content": [ + { + "type": "text", + "text": "What's in this audio?" + }, + { + "type": "input_audio", + "input_audio": { + "data": audio_base64, + "format": "wav" + }, + }, + ], + }], + model=model, + max_completion_tokens=64, + ) + + result = chat_completion_from_base64.choices[0].message.content + print("Chat completion output from input audio:", result) + +Alternatively, you can pass :code:`audio_url`, which is the audio counterpart of :code:`image_url` for image input: + +.. code-block:: python + + chat_completion_from_url = client.chat.completions.create( + messages=[{ + "role": "user", + "content": [ + { + "type": "text", + "text": "What's in this audio?" + }, + { + "type": "audio_url", + "audio_url": { + "url": audio_url + }, + }, + ], + }], + model=model, + max_completion_tokens=64, + ) + + result = chat_completion_from_url.choices[0].message.content + print("Chat completion output from audio url:", result) -vLLM's Chat Embeddings API is a superset of OpenAI's `Embeddings API `_, -where a list of ``messages`` can be passed instead of batched ``inputs``. This enables multi-modal inputs to be passed to embedding models. +A full code example can be found in `examples/openai_chat_completion_client_for_multimodal.py `_. + +.. note:: + + By default, the timeout for fetching audios through HTTP URL is ``10`` seconds. + You can override this by setting the environment variable: + + .. code-block:: console + + $ export VLLM_AUDIO_FETCH_TIMEOUT= + +Embedding +^^^^^^^^^ + +vLLM's Embeddings API is a superset of OpenAI's `Embeddings API `_, +where a list of chat ``messages`` can be passed instead of batched ``inputs``. This enables multi-modal inputs to be passed to embedding models. .. tip:: The schema of ``messages`` is exactly the same as in Chat Completions API. + You can refer to the above tutorials for more details on how to pass each type of multi-modal data. + +Usually, embedding models do not expect chat-based input, so we need to use a custom chat template to format the text and images. +Refer to the examples below for illustration. -In this example, we will serve the ``TIGER-Lab/VLM2Vec-Full`` model. +Here is an end-to-end example using VLM2Vec. To serve the model: .. code-block:: bash - vllm serve TIGER-Lab/VLM2Vec-Full --task embedding \ + vllm serve TIGER-Lab/VLM2Vec-Full --task embed \ --trust-remote-code --max-model-len 4096 --chat-template examples/template_vlm2vec.jinja .. important:: - Since VLM2Vec has the same model architecture as Phi-3.5-Vision, we have to explicitly pass ``--task embedding`` + Since VLM2Vec has the same model architecture as Phi-3.5-Vision, we have to explicitly pass ``--task embed`` to run this model in embedding mode instead of text generation mode. -.. important:: - - VLM2Vec does not expect chat-based input. We use a `custom chat template `_ - to combine the text and images together. + The custom chat template is completely different from the original one for this model, + and can be found `here `__. Since the request schema is not defined by OpenAI client, we post a request to the server using the lower-level ``requests`` library: @@ -310,17 +470,19 @@ Since the request schema is not defined by OpenAI client, we post a request to t response_json = response.json() print("Embedding output:", response_json["data"][0]["embedding"]) -Here is an example for serving the ``MrLight/dse-qwen2-2b-mrl-v1`` model. +Below is another example, this time using the ``MrLight/dse-qwen2-2b-mrl-v1`` model. .. code-block:: bash - vllm serve MrLight/dse-qwen2-2b-mrl-v1 --task embedding \ + vllm serve MrLight/dse-qwen2-2b-mrl-v1 --task embed \ --trust-remote-code --max-model-len 8192 --chat-template examples/template_dse_qwen2_vl.jinja .. important:: - Like with VLM2Vec, we have to explicitly pass ``--task embedding``. Additionally, ``MrLight/dse-qwen2-2b-mrl-v1`` requires an EOS token for embeddings, - which is handled by the jinja template. + Like with VLM2Vec, we have to explicitly pass ``--task embed``. + + Additionally, ``MrLight/dse-qwen2-2b-mrl-v1`` requires an EOS token for embeddings, which is handled + by `this custom chat template `__. .. important:: diff --git a/docs/source/models/performance.rst b/docs/source/usage/performance.rst similarity index 100% rename from docs/source/models/performance.rst rename to docs/source/usage/performance.rst diff --git a/docs/source/models/spec_decode.rst b/docs/source/usage/spec_decode.rst similarity index 97% rename from docs/source/models/spec_decode.rst rename to docs/source/usage/spec_decode.rst index d57ffec53215d..f1f1917f974bb 100644 --- a/docs/source/models/spec_decode.rst +++ b/docs/source/usage/spec_decode.rst @@ -1,13 +1,16 @@ .. _spec_decode: -Speculative decoding in vLLM -============================ +Speculative decoding +==================== .. warning:: Please note that speculative decoding in vLLM is not yet optimized and does not usually yield inter-token latency reductions for all prompt datasets or sampling parameters. The work to optimize it is ongoing and can be followed in `this issue. `_ +.. warning:: + Currently, speculative decoding in vLLM is not compatible with pipeline parallelism. + This document shows how to use `Speculative Decoding `_ with vLLM. Speculative decoding is a technique which improves inter-token latency in memory-bound LLM inference. @@ -182,7 +185,7 @@ speculative decoding, breaking down the guarantees into three key areas: 3. **vLLM Logprob Stability** - vLLM does not currently guarantee stable token log probabilities (logprobs). This can result in different outputs for the same request across runs. For more details, see the FAQ section - titled *Can the output of a prompt vary across runs in vLLM?* in the `FAQs <../serving/faq>`_. + titled *Can the output of a prompt vary across runs in vLLM?* in the :ref:`FAQs `. **Conclusion** @@ -197,7 +200,7 @@ can occur due to following factors: **Mitigation Strategies** -For mitigation strategies, please refer to the FAQ entry *Can the output of a prompt vary across runs in vLLM?* in the `FAQs <../serving/faq>`_. +For mitigation strategies, please refer to the FAQ entry *Can the output of a prompt vary across runs in vLLM?* in the :ref:`FAQs `. Resources for vLLM contributors ------------------------------- diff --git a/docs/source/models/structured_outputs.rst b/docs/source/usage/structured_outputs.rst similarity index 100% rename from docs/source/models/structured_outputs.rst rename to docs/source/usage/structured_outputs.rst diff --git a/docs/source/usage/tool_calling.md b/docs/source/usage/tool_calling.md new file mode 100644 index 0000000000000..f8be023307b0c --- /dev/null +++ b/docs/source/usage/tool_calling.md @@ -0,0 +1,287 @@ +# Tool Calling + +vLLM currently supports named function calling, as well as the `auto` and `none` options for the `tool_choice` field in the chat completion API. The `tool_choice` option `required` is **not yet supported** but on the roadmap. + +## Quickstart + +Start the server with tool calling enabled. This example uses Meta's Llama 3.1 8B model, so we need to use the llama3 tool calling chat template from the vLLM examples directory: + +```bash +vllm serve meta-llama/Llama-3.1-8B-Instruct \ + --enable-auto-tool-choice \ + --tool-call-parser llama3_json \ + --chat-template examples/tool_chat_template_llama3_json.jinja +``` + +Next, make a request to the model that should result in it using the available tools: + +```python +from openai import OpenAI +import json + +client = OpenAI(base_url="http://localhost:8000/v1", api_key="dummy") + +def get_weather(location: str, unit: str): + return f"Getting the weather for {location} in {unit}..." +tool_functions = {"get_weather": get_weather} + +tools = [{ + "type": "function", + "function": { + "name": "get_weather", + "description": "Get the current weather in a given location", + "parameters": { + "type": "object", + "properties": { + "location": {"type": "string", "description": "City and state, e.g., 'San Francisco, CA'"}, + "unit": {"type": "string", "enum": ["celsius", "fahrenheit"]} + }, + "required": ["location", "unit"] + } + } +}] + +response = client.chat.completions.create( + model=client.models.list().data[0].id, + messages=[{"role": "user", "content": "What's the weather like in San Francisco?"}], + tools=tools, + tool_choice="auto" +) + +tool_call = response.choices[0].message.tool_calls[0].function +print(f"Function called: {tool_call.name}") +print(f"Arguments: {tool_call.arguments}") +print(f"Result: {get_weather(**json.loads(tool_call.arguments))}") +``` + +Example output: +``` +Function called: get_weather +Arguments: {"location": "San Francisco, CA", "unit": "fahrenheit"} +Result: Getting the weather for San Francisco, CA in fahrenheit... +``` + +This example demonstrates: +- Setting up the server with tool calling enabled +- Defining an actual function to handle tool calls +- Making a request with `tool_choice="auto"` +- Handling the structured response and executing the corresponding function + +You can also specify a particular function using named function calling by setting `tool_choice={"type": "function", "function": {"name": "get_weather"}}`. Note that this will use the guided decoding backend - so the first time this is used, there will be several seconds of latency (or more) as the FSM is compiled for the first time before it is cached for subsequent requests. + +Remember that it's the callers responsibility to: +1. Define appropriate tools in the request +2. Include relevant context in the chat messages +3. Handle the tool calls in your application logic + +For more advanced usage, including parallel tool calls and different model-specific parsers, see the sections below. + +## Named Function Calling +vLLM supports named function calling in the chat completion API by default. It does so using Outlines through guided decoding, so this is +enabled by default, and will work with any supported model. You are guaranteed a validly-parsable function call - not a +high-quality one. + +vLLM will use guided decoding to ensure the response matches the tool parameter object defined by the JSON schema in the `tools` parameter. +For best results, we recommend ensuring that the expected output format / schema is specified in the prompt to ensure that the model's intended generation is aligned with the schema that it's being forced to generate by the guided decoding backend. + +To use a named function, you need to define the functions in the `tools` parameter of the chat completion request, and +specify the `name` of one of the tools in the `tool_choice` parameter of the chat completion request. + + +## Automatic Function Calling + +To enable this feature, you should set the following flags: +* `--enable-auto-tool-choice` -- **mandatory** Auto tool choice. tells vLLM that you want to enable the model to generate its own tool calls when it +deems appropriate. +* `--tool-call-parser` -- select the tool parser to use (listed below). Additional tool parsers +will continue to be added in the future, and also can register your own tool parsers in the `--tool-parser-plugin`. +* `--tool-parser-plugin` -- **optional** tool parser plugin used to register user defined tool parsers into vllm, the registered tool parser name can be specified in `--tool-call-parser`. +* `--chat-template` -- **optional** for auto tool choice. the path to the chat template which handles `tool`-role messages and `assistant`-role messages +that contain previously generated tool calls. Hermes, Mistral and Llama models have tool-compatible chat templates in their +`tokenizer_config.json` files, but you can specify a custom template. This argument can be set to `tool_use` if your model has a tool use-specific chat +template configured in the `tokenizer_config.json`. In this case, it will be used per the `transformers` specification. More on this [here](https://huggingface.co/docs/transformers/en/chat_templating#why-do-some-models-have-multiple-templates) +from HuggingFace; and you can find an example of this in a `tokenizer_config.json` [here](https://huggingface.co/NousResearch/Hermes-2-Pro-Llama-3-8B/blob/main/tokenizer_config.json) + +If your favorite tool-calling model is not supported, please feel free to contribute a parser & tool use chat template! + + +### Hermes Models (`hermes`) + +All Nous Research Hermes-series models newer than Hermes 2 Pro should be supported. +* `NousResearch/Hermes-2-Pro-*` +* `NousResearch/Hermes-2-Theta-*` +* `NousResearch/Hermes-3-*` + + +_Note that the Hermes 2 **Theta** models are known to have degraded tool call quality & capabilities due to the merge +step in their creation_. + +Flags: `--tool-call-parser hermes` + + +### Mistral Models (`mistral`) + +Supported models: +* `mistralai/Mistral-7B-Instruct-v0.3` (confirmed) +* Additional mistral function-calling models are compatible as well. + +Known issues: +1. Mistral 7B struggles to generate parallel tool calls correctly. +2. Mistral's `tokenizer_config.json` chat template requires tool call IDs that are exactly 9 digits, which is +much shorter than what vLLM generates. Since an exception is thrown when this condition +is not met, the following additional chat templates are provided: + +* `examples/tool_chat_template_mistral.jinja` - this is the "official" Mistral chat template, but tweaked so that +it works with vLLM's tool call IDs (provided `tool_call_id` fields are truncated to the last 9 digits) +* `examples/tool_chat_template_mistral_parallel.jinja` - this is a "better" version that adds a tool-use system prompt +when tools are provided, that results in much better reliability when working with parallel tool calling. + + +Recommended flags: `--tool-call-parser mistral --chat-template examples/tool_chat_template_mistral_parallel.jinja` + + +### Llama Models (`llama3_json`) + +Supported models: +* `meta-llama/Meta-Llama-3.1-8B-Instruct` +* `meta-llama/Meta-Llama-3.1-70B-Instruct` +* `meta-llama/Meta-Llama-3.1-405B-Instruct` +* `meta-llama/Meta-Llama-3.1-405B-Instruct-FP8` + +The tool calling that is supported is the [JSON based tool calling](https://llama.meta.com/docs/model-cards-and-prompt-formats/llama3_1/#json-based-tool-calling). For [pythonic tool calling](https://github.com/meta-llama/llama-models/blob/main/models/llama3_2/text_prompt_format.md#zero-shot-function-calling) in Llama-3.2 models, see the `pythonic` tool parser below. +Other tool calling formats like the built in python tool calling or custom tool calling are not supported. + +Known issues: +1. Parallel tool calls are not supported. +2. The model can generate parameters with a wrong format, such as generating + an array serialized as string instead of an array. + +The `tool_chat_template_llama3_json.jinja` file contains the "official" Llama chat template, but tweaked so that +it works better with vLLM. + +Recommended flags: `--tool-call-parser llama3_json --chat-template examples/tool_chat_template_llama3_json.jinja` + +#### IBM Granite + +Supported models: +* `ibm-granite/granite-3.0-8b-instruct` + +Recommended flags: `--tool-call-parser granite --chat-template examples/tool_chat_template_granite.jinja` + +`examples/tool_chat_template_granite.jinja`: this is a modified chat template from the original on Huggingface. Parallel function calls are supported. + +* `ibm-granite/granite-20b-functioncalling` + +Recommended flags: `--tool-call-parser granite-20b-fc --chat-template examples/tool_chat_template_granite_20b_fc.jinja` + +`examples/tool_chat_template_granite_20b_fc.jinja`: this is a modified chat template from the original on Huggingface, which is not vLLM compatible. It blends function description elements from the Hermes template and follows the same system prompt as "Response Generation" mode from [the paper](https://arxiv.org/abs/2407.00121). Parallel function calls are supported. + + +### InternLM Models (`internlm`) + +Supported models: +* `internlm/internlm2_5-7b-chat` (confirmed) +* Additional internlm2.5 function-calling models are compatible as well + +Known issues: +* Although this implementation also supports InternLM2, the tool call results are not stable when testing with the `internlm/internlm2-chat-7b` model. + +Recommended flags: `--tool-call-parser internlm --chat-template examples/tool_chat_template_internlm2_tool.jinja` + + +### Jamba Models (`jamba`) +AI21's Jamba-1.5 models are supported. +* `ai21labs/AI21-Jamba-1.5-Mini` +* `ai21labs/AI21-Jamba-1.5-Large` + + +Flags: `--tool-call-parser jamba` + + +### Models with Pythonic Tool Calls (`pythonic`) + +A growing number of models output a python list to represent tool calls instead of using JSON. This has the advantage of inherently supporting parallel tool calls and removing ambiguity around the JSON schema required for tool calls. The `pythonic` tool parser can support such models. + +As a concrete example, these models may look up the weather in San Francisco and Seattle by generating: +```python +[get_weather(city='San Francisco', metric='celsius'), get_weather(city='Seattle', metric='celsius')] +``` + +Limitations: +* The model must not generate both text and tool calls in the same generation. This may not be hard to change for a specific model, but the community currently lacks consensus on which tokens to emit when starting and ending tool calls. (In particular, the Llama 3.2 models emit no such tokens.) +* Llama's smaller models struggle to use tools effectively. + +Example supported models: +* `meta-llama/Llama-3.2-1B-Instruct`\* (use with `examples/tool_chat_template_llama3.2_pythonic.jinja`) +* `meta-llama/Llama-3.2-3B-Instruct`\* (use with `examples/tool_chat_template_llama3.2_pythonic.jinja`) +* `Team-ACE/ToolACE-8B` (use with `examples/tool_chat_template_toolace.jinja`) +* `fixie-ai/ultravox-v0_4-ToolACE-8B` (use with `examples/tool_chat_template_toolace.jinja`) + +Flags: `--tool-call-parser pythonic --chat-template {see_above}` + +--- +**WARNING** +Llama's smaller models frequently fail to emit tool calls in the correct format. Your mileage may vary. + +--- + + +## How to write a tool parser plugin + +A tool parser plugin is a Python file containing one or more ToolParser implementations. You can write a ToolParser similar to the `Hermes2ProToolParser` in vllm/entrypoints/openai/tool_parsers/hermes_tool_parser.py. + +Here is a summary of a plugin file: + +```python + +# import the required packages + +# define a tool parser and register it to vllm +# the name list in register_module can be used +# in --tool-call-parser. you can define as many +# tool parsers as you want here. +@ToolParserManager.register_module(["example"]) +class ExampleToolParser(ToolParser): + def __init__(self, tokenizer: AnyTokenizer): + super().__init__(tokenizer) + + # adjust request. e.g.: set skip special tokens + # to False for tool call output. + def adjust_request( + self, request: ChatCompletionRequest) -> ChatCompletionRequest: + return request + + # implement the tool call parse for stream call + def extract_tool_calls_streaming( + self, + previous_text: str, + current_text: str, + delta_text: str, + previous_token_ids: Sequence[int], + current_token_ids: Sequence[int], + delta_token_ids: Sequence[int], + request: ChatCompletionRequest, + ) -> Union[DeltaMessage, None]: + return delta + + # implement the tool parse for non-stream call + def extract_tool_calls( + self, + model_output: str, + request: ChatCompletionRequest, + ) -> ExtractedToolCallInformation: + return ExtractedToolCallInformation(tools_called=False, + tool_calls=[], + content=text) + + +``` + +Then you can use this plugin in the command line like this. +``` + --enable-auto-tool-choice \ + --tool-parser-plugin + --tool-call-parser example \ + --chat-template \ +``` + diff --git a/docs/source/serving/usage_stats.md b/docs/source/usage/usage_stats.md similarity index 100% rename from docs/source/serving/usage_stats.md rename to docs/source/usage/usage_stats.md diff --git a/examples/chart-helm/.helmignore b/examples/chart-helm/.helmignore new file mode 100644 index 0000000000000..2d1303b784cb8 --- /dev/null +++ b/examples/chart-helm/.helmignore @@ -0,0 +1,6 @@ +*.png +.git/ +ct.yaml +lintconf.yaml +values.schema.json +/workflows \ No newline at end of file diff --git a/examples/chart-helm/Chart.yaml b/examples/chart-helm/Chart.yaml new file mode 100644 index 0000000000000..fb0f06f6d2701 --- /dev/null +++ b/examples/chart-helm/Chart.yaml @@ -0,0 +1,21 @@ +apiVersion: v2 +name: chart-vllm +description: Chart vllm + +# A chart can be either an 'application' or a 'library' chart. +# +# Application charts are a collection of templates that can be packaged into versioned archives +# to be deployed. +# +# Library charts provide useful utilities or functions for the chart developer. They're included as +# a dependency of application charts to inject those utilities and functions into the rendering +# pipeline. Library charts do not define any templates and therefore cannot be deployed. +type: application + +# This is the chart version. This version number should be incremented each time you make changes +# to the chart and its templates, including the app version. +# Versions are expected to follow Semantic Versioning (https://semver.org/) +version: 0.0.1 + +maintainers: + - name: mfournioux diff --git a/examples/chart-helm/ct.yaml b/examples/chart-helm/ct.yaml new file mode 100644 index 0000000000000..d273e118203ad --- /dev/null +++ b/examples/chart-helm/ct.yaml @@ -0,0 +1,3 @@ +chart-dirs: + - charts +validate-maintainers: false \ No newline at end of file diff --git a/examples/chart-helm/lintconf.yaml b/examples/chart-helm/lintconf.yaml new file mode 100644 index 0000000000000..c8e8c5d7d9767 --- /dev/null +++ b/examples/chart-helm/lintconf.yaml @@ -0,0 +1,42 @@ +--- +rules: + braces: + min-spaces-inside: 0 + max-spaces-inside: 0 + min-spaces-inside-empty: -1 + max-spaces-inside-empty: -1 + brackets: + min-spaces-inside: 0 + max-spaces-inside: 0 + min-spaces-inside-empty: -1 + max-spaces-inside-empty: -1 + colons: + max-spaces-before: 0 + max-spaces-after: 1 + commas: + max-spaces-before: 0 + min-spaces-after: 1 + max-spaces-after: 1 + comments: + require-starting-space: true + min-spaces-from-content: 2 + document-end: disable + document-start: disable # No --- to start a file + empty-lines: + max: 2 + max-start: 0 + max-end: 0 + hyphens: + max-spaces-after: 1 + indentation: + spaces: consistent + indent-sequences: whatever # - list indentation will handle both indentation and without + check-multi-line-strings: false + key-duplicates: enable + line-length: disable # Lines can be any length + new-line-at-end-of-file: disable + new-lines: + type: unix + trailing-spaces: enable + truthy: + level: warning \ No newline at end of file diff --git a/examples/chart-helm/templates/_helpers.tpl b/examples/chart-helm/templates/_helpers.tpl new file mode 100644 index 0000000000000..a9690bad3c945 --- /dev/null +++ b/examples/chart-helm/templates/_helpers.tpl @@ -0,0 +1,164 @@ +{{/* +Define ports for the pods +*/}} +{{- define "chart.container-port" -}} +{{- default "8000" .Values.containerPort }} +{{- end }} + +{{/* +Define service name +*/}} +{{- define "chart.service-name" -}} +{{- if .Values.serviceName }} +{{- .Values.serviceName | lower | trim }} +{{- else }} +"{{ .Release.Name }}-service" +{{- end }} +{{- end }} + +{{/* +Define service port +*/}} +{{- define "chart.service-port" -}} +{{- if .Values.servicePort }} +{{- .Values.servicePort }} +{{- else }} +{{- include "chart.container-port" . }} +{{- end }} +{{- end }} + +{{/* +Define service port name +*/}} +{{- define "chart.service-port-name" -}} +"service-port" +{{- end }} + +{{/* +Define container port name +*/}} +{{- define "chart.container-port-name" -}} +"container-port" +{{- end }} + +{{/* +Define deployment strategy +*/}} +{{- define "chart.strategy" -}} +strategy: +{{- if not .Values.deploymentStrategy }} + rollingUpdate: + maxSurge: 100% + maxUnavailable: 0 +{{- else }} +{{ toYaml .Values.deploymentStrategy | indent 2 }} +{{- end }} +{{- end }} + +{{/* +Define additional ports +*/}} +{{- define "chart.extraPorts" }} +{{- with .Values.extraPorts }} +{{ toYaml . }} +{{- end }} +{{- end }} + +{{/* +Define chart external ConfigMaps and Secrets +*/}} +{{- define "chart.externalConfigs" -}} +{{- with .Values.externalConfigs -}} +{{ toYaml . }} +{{- end }} +{{- end }} + + +{{/* +Define liveness et readiness probes +*/}} +{{- define "chart.probes" -}} +{{- if .Values.readinessProbe }} +readinessProbe: +{{- with .Values.readinessProbe }} +{{- toYaml . | nindent 2 }} +{{- end }} +{{- end }} +{{- if .Values.livenessProbe }} +livenessProbe: +{{- with .Values.livenessProbe }} +{{- toYaml . | nindent 2 }} +{{- end }} +{{- end }} +{{- end }} + +{{/* +Define resources +*/}} +{{- define "chart.resources" -}} +requests: + memory: {{ required "Value 'resources.requests.memory' must be defined !" .Values.resources.requests.memory | quote }} + cpu: {{ required "Value 'resources.requests.cpu' must be defined !" .Values.resources.requests.cpu | quote }} + {{- if and (gt (int (index .Values.resources.requests "nvidia.com/gpu")) 0) (gt (int (index .Values.resources.limits "nvidia.com/gpu")) 0) }} + nvidia.com/gpu: {{ required "Value 'resources.requests.nvidia.com/gpu' must be defined !" (index .Values.resources.requests "nvidia.com/gpu") | quote }} + {{- end }} +limits: + memory: {{ required "Value 'resources.limits.memory' must be defined !" .Values.resources.limits.memory | quote }} + cpu: {{ required "Value 'resources.limits.cpu' must be defined !" .Values.resources.limits.cpu | quote }} + {{- if and (gt (int (index .Values.resources.requests "nvidia.com/gpu")) 0) (gt (int (index .Values.resources.limits "nvidia.com/gpu")) 0) }} + nvidia.com/gpu: {{ required "Value 'resources.limits.nvidia.com/gpu' must be defined !" (index .Values.resources.limits "nvidia.com/gpu") | quote }} + {{- end }} +{{- end }} + + +{{/* +Define User used for the main container +*/}} +{{- define "chart.user" }} +{{- if .Values.image.runAsUser }} +runAsUser: +{{- with .Values.runAsUser }} +{{- toYaml . | nindent 2 }} +{{- end }} +{{- end }} +{{- end }} + +{{- define "chart.extraInitImage" -}} +"amazon/aws-cli:2.6.4" +{{- end }} + +{{- define "chart.extraInitEnv" -}} +- name: S3_ENDPOINT_URL + valueFrom: + secretKeyRef: + name: {{ .Release.Name }}-secrets + key: s3endpoint +- name: S3_BUCKET_NAME + valueFrom: + secretKeyRef: + name: {{ .Release.Name }}-secrets + key: s3bucketname +- name: AWS_ACCESS_KEY_ID + valueFrom: + secretKeyRef: + name: {{ .Release.Name }}-secrets + key: s3accesskeyid +- name: AWS_SECRET_ACCESS_KEY + valueFrom: + secretKeyRef: + name: {{ .Release.Name }}-secrets + key: s3accesskey +- name: S3_PATH + value: "{{ .Values.extraInit.s3modelpath }}" +- name: AWS_EC2_METADATA_DISABLED + value: "{{ .Values.extraInit.awsEc2MetadataDisabled }}" +{{- end }} + +{{/* + Define chart labels +*/}} +{{- define "chart.labels" -}} +{{- with .Values.labels -}} +{{ toYaml . }} +{{- end }} +{{- end }} \ No newline at end of file diff --git a/examples/chart-helm/templates/configmap.yaml b/examples/chart-helm/templates/configmap.yaml new file mode 100644 index 0000000000000..cc5d03782f878 --- /dev/null +++ b/examples/chart-helm/templates/configmap.yaml @@ -0,0 +1,11 @@ +{{- if .Values.configs -}} +apiVersion: v1 +kind: ConfigMap +metadata: + name: "{{ .Release.Name }}-configs" + namespace: {{ .Release.Namespace }} +data: + {{- with .Values.configs }} + {{- toYaml . | nindent 2 }} + {{- end }} +{{- end -}} \ No newline at end of file diff --git a/examples/chart-helm/templates/custom-objects.yaml b/examples/chart-helm/templates/custom-objects.yaml new file mode 100644 index 0000000000000..8a65ffd0e552d --- /dev/null +++ b/examples/chart-helm/templates/custom-objects.yaml @@ -0,0 +1,6 @@ +{{- if .Values.customObjects }} +{{- range .Values.customObjects }} +{{- tpl (. | toYaml) $ }} +--- +{{- end }} +{{- end }} \ No newline at end of file diff --git a/examples/chart-helm/templates/deployment.yaml b/examples/chart-helm/templates/deployment.yaml new file mode 100644 index 0000000000000..536983b587be2 --- /dev/null +++ b/examples/chart-helm/templates/deployment.yaml @@ -0,0 +1,122 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: "{{ .Release.Name }}-deployment-vllm" + namespace: {{ .Release.Namespace }} + labels: + {{- include "chart.labels" . | nindent 4 }} +spec: + replicas: {{ .Values.replicaCount }} + {{- include "chart.strategy" . | nindent 2 }} + selector: + matchLabels: + environment: "test" + release: "test" + progressDeadlineSeconds: 1200 + template: + metadata: + labels: + environment: "test" + release: "test" + spec: + containers: + - name: "vllm" + image: "{{ required "Required value 'image.repository' must be defined !" .Values.image.repository }}:{{ required "Required value 'image.tag' must be defined !" .Values.image.tag }}" + {{- if .Values.image.command }} + command : + {{- with .Values.image.command }} + {{- toYaml . | nindent 10 }} + {{- end }} + {{- end }} + securityContext: + {{- if .Values.image.securityContext }} + {{- with .Values.image.securityContext }} + {{- toYaml . | nindent 12 }} + {{- end }} + {{- else }} + runAsNonRoot: false + {{- include "chart.user" . | indent 12 }} + {{- end }} + imagePullPolicy: IfNotPresent + {{- if .Values.image.env }} + env : + {{- with .Values.image.env }} + {{- toYaml . | nindent 10 }} + {{- end }} + {{- else }} + env: [] + {{- end }} + {{- if or .Values.externalConfigs .Values.configs .Values.secrets }} + envFrom: + {{- if .Values.configs }} + - configMapRef: + name: "{{ .Release.Name }}-configs" + {{- end }} + {{- if .Values.secrets}} + - secretRef: + name: "{{ .Release.Name }}-secrets" + {{- end }} + {{- include "chart.externalConfigs" . | nindent 12 }} + {{- end }} + ports: + - name: {{ include "chart.container-port-name" . }} + containerPort: {{ include "chart.container-port" . }} + {{- include "chart.extraPorts" . | nindent 12 }} + {{- include "chart.probes" . | indent 10 }} + resources: {{- include "chart.resources" . | nindent 12 }} + volumeMounts: + - name: {{ .Release.Name }}-storage + mountPath: /data + + {{- with .Values.extraContainers }} + {{ toYaml . | nindent 8 }} + {{- end }} + + {{- if .Values.extraInit }} + initContainers: + - name: wait-download-model + image: {{ include "chart.extraInitImage" . }} + command: + - /bin/bash + args: + - -eucx + - while aws --endpoint-url $S3_ENDPOINT_URL s3 sync --dryrun s3://$S3_BUCKET_NAME/$S3_PATH /data | grep -q download; do sleep 10; done + env: {{- include "chart.extraInitEnv" . | nindent 10 }} + resources: + requests: + cpu: 200m + memory: 1Gi + limits: + cpu: 500m + memory: 2Gi + volumeMounts: + - name: {{ .Release.Name }}-storage + mountPath: /data + {{- end }} + volumes: + - name: {{ .Release.Name }}-storage + persistentVolumeClaim: + claimName: {{ .Release.Name }}-storage-claim + + {{- with .Values.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- if and (gt (int (index .Values.resources.requests "nvidia.com/gpu")) 0) (gt (int (index .Values.resources.limits "nvidia.com/gpu")) 0) }} + runtimeClassName: nvidia + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: nvidia.com/gpu.product + operator: In + {{- with .Values.gpuModels }} + values: + {{- toYaml . | nindent 20 }} + {{- end }} + {{- end }} \ No newline at end of file diff --git a/examples/chart-helm/templates/hpa.yaml b/examples/chart-helm/templates/hpa.yaml new file mode 100644 index 0000000000000..5ca94c8213541 --- /dev/null +++ b/examples/chart-helm/templates/hpa.yaml @@ -0,0 +1,31 @@ +{{- if .Values.autoscaling.enabled }} +apiVersion: autoscaling/v2 +kind: HorizontalPodAutoscaler +metadata: + name: "{{ .Release.Name }}-hpa" + namespace: {{ .Release.Namespace }} +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: vllm + minReplicas: {{ .Values.autoscaling.minReplicas }} + maxReplicas: {{ .Values.autoscaling.maxReplicas }} + metrics: + {{- if .Values.autoscaling.targetCPUUtilizationPercentage }} + - type: Resource + resource: + name: cpu + target: + type: Utilization + averageUtilization: {{ .Values.autoscaling.targetCPUUtilizationPercentage }} + {{- end }} + {{- if .Values.autoscaling.targetMemoryUtilizationPercentage }} + - type: Resource + resource: + name: memory + target: + type: Utilization + averageUtilization: {{ .Values.autoscaling.targetMemoryUtilizationPercentage }} + {{- end }} +{{- end }} \ No newline at end of file diff --git a/examples/chart-helm/templates/job.yaml b/examples/chart-helm/templates/job.yaml new file mode 100644 index 0000000000000..f9ea3541e78d2 --- /dev/null +++ b/examples/chart-helm/templates/job.yaml @@ -0,0 +1,37 @@ +{{- if .Values.extraInit }} +apiVersion: batch/v1 +kind: Job +metadata: + name: "{{ .Release.Name }}-init-vllm" + namespace: {{ .Release.Namespace }} +spec: + ttlSecondsAfterFinished: 100 + template: + metadata: + name: init-vllm + spec: + containers: + - name: job-download-model + image: {{ include "chart.extraInitImage" . }} + command: + - /bin/bash + args: + - -eucx + - aws --endpoint-url $S3_ENDPOINT_URL s3 sync s3://$S3_BUCKET_NAME/$S3_PATH /data + env: {{- include "chart.extraInitEnv" . | nindent 8 }} + volumeMounts: + - name: {{ .Release.Name }}-storage + mountPath: /data + resources: + requests: + cpu: 200m + memory: 1Gi + limits: + cpu: 500m + memory: 2Gi + restartPolicy: OnFailure + volumes: + - name: {{ .Release.Name }}-storage + persistentVolumeClaim: + claimName: "{{ .Release.Name }}-storage-claim" +{{- end }} \ No newline at end of file diff --git a/examples/chart-helm/templates/poddisruptionbudget.yaml b/examples/chart-helm/templates/poddisruptionbudget.yaml new file mode 100644 index 0000000000000..512bac727da87 --- /dev/null +++ b/examples/chart-helm/templates/poddisruptionbudget.yaml @@ -0,0 +1,7 @@ +apiVersion: policy/v1 +kind: PodDisruptionBudget +metadata: + name: "{{ .Release.Name }}-pdb" + namespace: {{ .Release.Namespace }} +spec: + maxUnavailable: {{ default 1 .Values.maxUnavailablePodDisruptionBudget }} \ No newline at end of file diff --git a/examples/chart-helm/templates/pvc.yaml b/examples/chart-helm/templates/pvc.yaml new file mode 100644 index 0000000000000..e8d203a7a5ace --- /dev/null +++ b/examples/chart-helm/templates/pvc.yaml @@ -0,0 +1,13 @@ +{{- if .Values.extraInit }} +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: "{{ .Release.Name }}-storage-claim" + namespace: {{ .Release.Namespace }} +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: {{ .Values.extraInit.pvcStorage }} +{{- end }} \ No newline at end of file diff --git a/examples/chart-helm/templates/secrets.yaml b/examples/chart-helm/templates/secrets.yaml new file mode 100644 index 0000000000000..4e88e747b616a --- /dev/null +++ b/examples/chart-helm/templates/secrets.yaml @@ -0,0 +1,10 @@ +apiVersion: v1 +kind: Secret +metadata: + name: "{{ .Release.Name }}-secrets" + namespace: {{ .Release.Namespace }} +type: Opaque +data: + {{- range $key, $val := .Values.secrets }} + {{ $key }}: {{ $val | b64enc | quote }} + {{- end }} \ No newline at end of file diff --git a/examples/chart-helm/templates/service.yaml b/examples/chart-helm/templates/service.yaml new file mode 100644 index 0000000000000..12d0f68b03a35 --- /dev/null +++ b/examples/chart-helm/templates/service.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: Service +metadata: + name: "{{ .Release.Name }}-service" + namespace: {{ .Release.Namespace }} +spec: + type: ClusterIP + ports: + - name: {{ include "chart.service-port-name" . }} + port: {{ include "chart.service-port" . }} + targetPort: {{ include "chart.container-port-name" . }} + protocol: TCP + selector: + {{- include "chart.labels" . | nindent 4 }} \ No newline at end of file diff --git a/examples/chart-helm/values.schema.json b/examples/chart-helm/values.schema.json new file mode 100644 index 0000000000000..812d54bde1397 --- /dev/null +++ b/examples/chart-helm/values.schema.json @@ -0,0 +1,265 @@ +{ + "$schema": "http://json-schema.org/schema#", + "type": "object", + "properties": { + "image": { + "type": "object", + "properties": { + "repository": { + "type": "string" + }, + "tag": { + "type": "string" + }, + "command": { + "type": "array", + "items": { + "type": "string" + } + } + }, + "required": [ + "command", + "repository", + "tag" + ] + }, + "containerPort": { + "type": "integer" + }, + "serviceName": { + "type": "null" + }, + "servicePort": { + "type": "integer" + }, + "extraPorts": { + "type": "array" + }, + "replicaCount": { + "type": "integer" + }, + "deploymentStrategy": { + "type": "object" + }, + "resources": { + "type": "object", + "properties": { + "requests": { + "type": "object", + "properties": { + "cpu": { + "type": "integer" + }, + "memory": { + "type": "string" + }, + "nvidia.com/gpu": { + "type": "integer" + } + }, + "required": [ + "cpu", + "memory", + "nvidia.com/gpu" + ] + }, + "limits": { + "type": "object", + "properties": { + "cpu": { + "type": "integer" + }, + "memory": { + "type": "string" + }, + "nvidia.com/gpu": { + "type": "integer" + } + }, + "required": [ + "cpu", + "memory", + "nvidia.com/gpu" + ] + } + }, + "required": [ + "limits", + "requests" + ] + }, + "gpuModels": { + "type": "array", + "items": { + "type": "string" + } + }, + "autoscaling": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean" + }, + "minReplicas": { + "type": "integer" + }, + "maxReplicas": { + "type": "integer" + }, + "targetCPUUtilizationPercentage": { + "type": "integer" + } + }, + "required": [ + "enabled", + "maxReplicas", + "minReplicas", + "targetCPUUtilizationPercentage" + ] + }, + "configs": { + "type": "object" + }, + "secrets": { + "type": "object" + }, + "externalConfigs": { + "type": "array" + }, + "customObjects": { + "type": "array" + }, + "maxUnavailablePodDisruptionBudget": { + "type": "string" + }, + "extraInit": { + "type": "object", + "properties": { + "s3modelpath": { + "type": "string" + }, + "pvcStorage": { + "type": "string" + }, + "awsEc2MetadataDisabled": { + "type": "boolean" + } + }, + "required": [ + "pvcStorage", + "s3modelpath", + "awsEc2MetadataDisabled" + ] + }, + "extraContainers": { + "type": "array" + }, + "readinessProbe": { + "type": "object", + "properties": { + "initialDelaySeconds": { + "type": "integer" + }, + "periodSeconds": { + "type": "integer" + }, + "failureThreshold": { + "type": "integer" + }, + "httpGet": { + "type": "object", + "properties": { + "path": { + "type": "string" + }, + "port": { + "type": "integer" + } + }, + "required": [ + "path", + "port" + ] + } + }, + "required": [ + "failureThreshold", + "httpGet", + "initialDelaySeconds", + "periodSeconds" + ] + }, + "livenessProbe": { + "type": "object", + "properties": { + "initialDelaySeconds": { + "type": "integer" + }, + "failureThreshold": { + "type": "integer" + }, + "periodSeconds": { + "type": "integer" + }, + "httpGet": { + "type": "object", + "properties": { + "path": { + "type": "string" + }, + "port": { + "type": "integer" + } + }, + "required": [ + "path", + "port" + ] + } + }, + "required": [ + "failureThreshold", + "httpGet", + "initialDelaySeconds", + "periodSeconds" + ] + }, + "labels": { + "type": "object", + "properties": { + "environment": { + "type": "string" + }, + "release": { + "type": "string" + } + }, + "required": [ + "environment", + "release" + ] + } + }, + "required": [ + "autoscaling", + "configs", + "containerPort", + "customObjects", + "deploymentStrategy", + "externalConfigs", + "extraContainers", + "extraInit", + "extraPorts", + "gpuModels", + "image", + "labels", + "livenessProbe", + "maxUnavailablePodDisruptionBudget", + "readinessProbe", + "replicaCount", + "resources", + "secrets", + "servicePort" + ] +} \ No newline at end of file diff --git a/examples/chart-helm/values.yaml b/examples/chart-helm/values.yaml new file mode 100644 index 0000000000000..9c48e7d061bf7 --- /dev/null +++ b/examples/chart-helm/values.yaml @@ -0,0 +1,119 @@ +# -- Default values for chart vllm +# -- Declare variables to be passed into your templates. + +# -- Image configuration +image: + # -- Image repository + repository: "vllm/vllm-openai" + # -- Image tag + tag: "latest" + # -- Container launch command + command: ["vllm", "serve", "/data/", "--served-model-name", "opt-125m", "--dtype", "bfloat16", "--host", "0.0.0.0", "--port", "8000"] + +# -- Container port +containerPort: 8000 +# -- Service name +serviceName: +# -- Service port +servicePort: 80 +# -- Additional ports configuration +extraPorts: [] + +# -- Number of replicas +replicaCount: 1 + +# -- Deployment strategy configuration +deploymentStrategy: {} + +# -- Resource configuration +resources: + requests: + # -- Number of CPUs + cpu: 4 + # -- CPU memory configuration + memory: 16Gi + # -- Number of gpus used + nvidia.com/gpu: 1 + limits: + # -- Number of CPUs + cpu: 4 + # -- CPU memory configuration + memory: 16Gi + # -- Number of gpus used + nvidia.com/gpu: 1 + +# -- Type of gpu used +gpuModels: + - "TYPE_GPU_USED" + +# -- Autoscaling configuration +autoscaling: + # -- Enable autoscaling + enabled: false + # -- Minimum replicas + minReplicas: 1 + # -- Maximum replicas + maxReplicas: 100 + # -- Target CPU utilization for autoscaling + targetCPUUtilizationPercentage: 80 + # targetMemoryUtilizationPercentage: 80 + +# -- Configmap +configs: {} + +# -- Secrets configuration +secrets: {} + +# -- External configuration +externalConfigs: [] + +# -- Custom Objects configuration +customObjects: [] + +# -- Disruption Budget Configuration +maxUnavailablePodDisruptionBudget: "" + +# -- Additional configuration for the init container +extraInit: + # -- Path of the model on the s3 which hosts model weights and config files + s3modelpath: "relative_s3_model_path/opt-125m" + # -- Storage size of the s3 + pvcStorage: "1Gi" + awsEc2MetadataDisabled: true + +# -- Additional containers configuration +extraContainers: [] + +# -- Readiness probe configuration +readinessProbe: + # -- Number of seconds after the container has started before readiness probe is initiated + initialDelaySeconds: 5 + # -- How often (in seconds) to perform the readiness probe + periodSeconds: 5 + # -- Number of times after which if a probe fails in a row, Kubernetes considers that the overall check has failed: the container is not ready + failureThreshold: 3 + # -- Configuration of the Kubelet http request on the server + httpGet: + # -- Path to access on the HTTP server + path: /health + # -- Name or number of the port to access on the container, on which the server is listening + port: 8000 + +# -- Liveness probe configuration +livenessProbe: + # -- Number of seconds after the container has started before liveness probe is initiated + initialDelaySeconds: 15 + # -- Number of times after which if a probe fails in a row, Kubernetes considers that the overall check has failed: the container is not alive + failureThreshold: 3 + # -- How often (in seconds) to perform the liveness probe + periodSeconds: 10 + # -- Configuration of the Kubelet http request on the server + httpGet: + # -- Path to access on the HTTP server + path: /health + # -- Name or number of the port to access on the container, on which the server is listening + port: 8000 + +labels: + environment: "test" + release: "test" diff --git a/examples/disaggregated_prefill.sh b/examples/disaggregated_prefill.sh new file mode 100644 index 0000000000000..87155273a81d1 --- /dev/null +++ b/examples/disaggregated_prefill.sh @@ -0,0 +1,109 @@ +#!/bin/bash +# This file demonstrates the example usage of disaggregated prefilling +# We will launch 2 vllm instances (1 for prefill and 1 for decode), +# and then transfer the KV cache between them. + +echo "🚧🚧 Warning: The usage of disaggregated prefill is experimental and subject to change 🚧🚧" +sleep 1 + +# Trap the SIGINT signal (triggered by Ctrl+C) +trap 'cleanup' INT + +# Cleanup function +cleanup() { + echo "Caught Ctrl+C, cleaning up..." + # Cleanup commands + pgrep python | xargs kill -9 + pkill -f python + echo "Cleanup complete. Exiting." + exit 0 +} + +export VLLM_HOST_IP=$(hostname -I | awk '{print $1}') + +# install quart first -- required for disagg prefill proxy serve +if python3 -c "import quart" &> /dev/null; then + echo "Quart is already installed." +else + echo "Quart is not installed. Installing..." + python3 -m pip install quart +fi + +# a function that waits vLLM server to start +wait_for_server() { + local port=$1 + timeout 1200 bash -c " + until curl -s localhost:${port}/v1/completions > /dev/null; do + sleep 1 + done" && return 0 || return 1 +} + + +# You can also adjust --kv-ip and --kv-port for distributed inference. + +# prefilling instance, which is the KV producer +CUDA_VISIBLE_DEVICES=0 vllm serve meta-llama/Meta-Llama-3.1-8B-Instruct \ + --port 8100 \ + --max-model-len 100 \ + --gpu-memory-utilization 0.8 \ + --kv-transfer-config \ + '{"kv_connector":"PyNcclConnector","kv_role":"kv_producer","kv_rank":0,"kv_parallel_size":2}' & + +# decoding instance, which is the KV consumer +CUDA_VISIBLE_DEVICES=1 vllm serve meta-llama/Meta-Llama-3.1-8B-Instruct \ + --port 8200 \ + --max-model-len 100 \ + --gpu-memory-utilization 0.8 \ + --kv-transfer-config \ + '{"kv_connector":"PyNcclConnector","kv_role":"kv_consumer","kv_rank":1,"kv_parallel_size":2}' & + +# wait until prefill and decode instances are ready +wait_for_server 8100 +wait_for_server 8200 + +# launch a proxy server that opens the service at port 8000 +# the workflow of this proxy: +# - send the request to prefill vLLM instance (port 8100), change max_tokens +# to 1 +# - after the prefill vLLM finishes prefill, send the request to decode vLLM +# instance +# NOTE: the usage of this API is subject to change --- in the future we will +# introduce "vllm connect" to connect between prefill and decode instances +python3 ../benchmarks/disagg_benchmarks/disagg_prefill_proxy_server.py & +sleep 1 + +# serve two example requests +output1=$(curl -X POST -s http://localhost:8000/v1/completions \ +-H "Content-Type: application/json" \ +-d '{ +"model": "meta-llama/Meta-Llama-3.1-8B-Instruct", +"prompt": "San Francisco is a", +"max_tokens": 10, +"temperature": 0 +}') + +output2=$(curl -X POST -s http://localhost:8000/v1/completions \ +-H "Content-Type: application/json" \ +-d '{ +"model": "meta-llama/Meta-Llama-3.1-8B-Instruct", +"prompt": "Santa Clara is a", +"max_tokens": 10, +"temperature": 0 +}') + + +# Cleanup commands +pgrep python | xargs kill -9 +pkill -f python + +echo "" + +sleep 1 + +# Print the outputs of the curl requests +echo "" +echo "Output of first request: $output1" +echo "Output of second request: $output2" + +echo "🎉🎉 Successfully finished 2 test requests! 🎉🎉" +echo "" diff --git a/examples/offline_inference_audio_language.py b/examples/offline_inference_audio_language.py index 050b791b62adb..68b786961b14a 100644 --- a/examples/offline_inference_audio_language.py +++ b/examples/offline_inference_audio_language.py @@ -25,16 +25,16 @@ def run_ultravox(question: str, audio_count: int): tokenizer = AutoTokenizer.from_pretrained(model_name) messages = [{ - 'role': - 'user', - 'content': - "<|reserved_special_token_0|>\n" * audio_count + question + 'role': 'user', + 'content': "<|audio|>\n" * audio_count + question }] prompt = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True) - llm = LLM(model=model_name, limit_mm_per_prompt={"audio": audio_count}) + llm = LLM(model=model_name, + trust_remote_code=True, + limit_mm_per_prompt={"audio": audio_count}) stop_token_ids = None return llm, prompt, stop_token_ids diff --git a/examples/offline_inference_chat.py b/examples/offline_inference_chat.py index 8814f4d7bef0d..de565eb47819f 100644 --- a/examples/offline_inference_chat.py +++ b/examples/offline_inference_chat.py @@ -1,7 +1,7 @@ from vllm import LLM, SamplingParams -llm = LLM(model="meta-llama/Meta-Llama-3-8B-Instruct") -sampling_params = SamplingParams(temperature=0.5) +# llm = LLM(model="meta-llama/Llama-3.2-1B-Instruct") +sampling_params = SamplingParams(temperature=0.5, top_k=8, max_tokens=300) def print_outputs(outputs): @@ -14,9 +14,14 @@ def print_outputs(outputs): print("=" * 80) -# In this script, we demonstrate how to pass input to the chat method: +# outputs = llm.generate(["The theory of relativity states that"], +# sampling_params=sampling_params, +# use_tqdm=False) +# print_outputs(outputs) -conversation = [ +# # In this script, we demonstrate how to pass input to the chat method: +llm = LLM(model="meta-llama/Llama-3.1-8B-Instruct", tensor_parallel_size=1) +conversation1 = [ { "role": "system", "content": "You are a helpful assistant" @@ -31,40 +36,45 @@ def print_outputs(outputs): }, { "role": "user", - "content": "Write an essay about the importance of higher education.", + "content": "Write a short essay about the importance of higher education.", }, ] -outputs = llm.chat(conversation, - sampling_params=sampling_params, - use_tqdm=False) + +conversations = [conversation1 for _ in range(100)] +import time + +start = time.time() +outputs = llm.chat(conversations,sampling_params=sampling_params,use_tqdm=True) print_outputs(outputs) +end = time.time() +print(end-start) # You can run batch inference with llm.chat API -conversation = [ - { - "role": "system", - "content": "You are a helpful assistant" - }, - { - "role": "user", - "content": "Hello" - }, - { - "role": "assistant", - "content": "Hello! How can I assist you today?" - }, - { - "role": "user", - "content": "Write an essay about the importance of higher education.", - }, -] -conversations = [conversation for _ in range(10)] +# conversation2 = [ +# { +# "role": "system", +# "content": "You are a helpful assistant" +# }, +# { +# "role": "user", +# "content": "Hello" +# }, +# { +# "role": "assistant", +# "content": "Hello! How can I assist you today?" +# }, +# { +# "role": "user", +# "content": "Write an essay about the importance of playing video games!", +# }, +# ] +# conversations = [conversation1, conversation2] # We turn on tqdm progress bar to verify it's indeed running batch inference -outputs = llm.chat(messages=conversations, - sampling_params=sampling_params, - use_tqdm=True) -print_outputs(outputs) +# outputs = llm.chat(messages=conversations, +# sampling_params=sampling_params, +# use_tqdm=True) +# print_outputs(outputs) # A chat template can be optionally supplied. # If not, the model will use its default chat template. diff --git a/examples/offline_inference_classification.py b/examples/offline_inference_classification.py new file mode 100644 index 0000000000000..de539b639a196 --- /dev/null +++ b/examples/offline_inference_classification.py @@ -0,0 +1,28 @@ +from vllm import LLM + +# Sample prompts. +prompts = [ + "Hello, my name is", + "The president of the United States is", + "The capital of France is", + "The future of AI is", +] + +# Create an LLM. +# You should pass task="classify" for classification models +model = LLM( + model="jason9693/Qwen2.5-1.5B-apeach", + task="classify", + enforce_eager=True, +) + +# Generate logits. The output is a list of ClassificationRequestOutputs. +outputs = model.classify(prompts) + +# Print the outputs. +for prompt, output in zip(prompts, outputs): + probs = output.outputs.probs + probs_trimmed = ((str(probs[:16])[:-1] + + ", ...]") if len(probs) > 16 else probs) + print(f"Prompt: {prompt!r} | " + f"Class Probabilities: {probs_trimmed} (size={len(probs)})") diff --git a/examples/offline_inference_embedding.py b/examples/offline_inference_embedding.py index 7d5ef128bc8e0..58d004313ad51 100644 --- a/examples/offline_inference_embedding.py +++ b/examples/offline_inference_embedding.py @@ -9,9 +9,20 @@ ] # Create an LLM. -model = LLM(model="intfloat/e5-mistral-7b-instruct", enforce_eager=True) +# You should pass task="embed" for embedding models +model = LLM( + model="intfloat/e5-mistral-7b-instruct", + task="embed", + enforce_eager=True, +) + # Generate embedding. The output is a list of EmbeddingRequestOutputs. -outputs = model.encode(prompts) +outputs = model.embed(prompts) + # Print the outputs. -for output in outputs: - print(output.outputs.embedding) # list of 4096 floats +for prompt, output in zip(prompts, outputs): + embeds = output.outputs.embedding + embeds_trimmed = ((str(embeds[:16])[:-1] + + ", ...]") if len(embeds) > 16 else embeds) + print(f"Prompt: {prompt!r} | " + f"Embeddings: {embeds_trimmed} (size={len(embeds)})") diff --git a/examples/offline_inference_openai.md b/examples/offline_inference_openai.md index 4c64197975534..2436417cb543a 100644 --- a/examples/offline_inference_openai.md +++ b/examples/offline_inference_openai.md @@ -1,45 +1,48 @@ # Offline Inference with the OpenAI Batch file format - **NOTE:** This is a guide to performing batch inference using the OpenAI batch file format, **NOT** the complete Batch (REST) API. - - ## File Format - - The OpenAI batch file format consists of a series of json objects on new lines. +```{important} +This is a guide to performing batch inference using the OpenAI batch file format, **not** the complete Batch (REST) API. +``` + +## File Format - [See here for an example file.](https://github.com/vllm-project/vllm/blob/main/examples/openai_example_batch.jsonl) +The OpenAI batch file format consists of a series of json objects on new lines. - Each line represents a separate request. See the [OpenAI package reference](https://platform.openai.com/docs/api-reference/batch/requestInput) for more details. +[See here for an example file.](https://github.com/vllm-project/vllm/blob/main/examples/openai_example_batch.jsonl) - **NOTE:** We currently only support `/v1/chat/completions` and `/v1/embeddings` endpoints (completions coming soon). +Each line represents a separate request. See the [OpenAI package reference](https://platform.openai.com/docs/api-reference/batch/requestInput) for more details. - ## Pre-requisites +```{note} +We currently only support `/v1/chat/completions` and `/v1/embeddings` endpoints (completions coming soon). +``` -* Ensure you are using `vllm >= 0.4.3`. You can check by running `python -c "import vllm; print(vllm.__version__)"`. +## Pre-requisites + * The examples in this document use `meta-llama/Meta-Llama-3-8B-Instruct`. - Create a [user access token](https://huggingface.co/docs/hub/en/security-tokens) - Install the token on your machine (Run `huggingface-cli login`). - Get access to the gated model by [visiting the model card](https://huggingface.co/meta-llama/Meta-Llama-3-8B-Instruct) and agreeing to the terms and conditions. - ## Example 1: Running with a local file - - ### Step 1: Create your batch file - - To follow along with this example, you can download the example batch, or create your own batch file in your working directory. - - ``` - wget https://raw.githubusercontent.com/vllm-project/vllm/main/examples/openai_example_batch.jsonl - ``` - - Once you've created your batch file it should look like this - - ``` - $ cat openai_example_batch.jsonl +## Example 1: Running with a local file + +### Step 1: Create your batch file + +To follow along with this example, you can download the example batch, or create your own batch file in your working directory. + +``` +wget https://raw.githubusercontent.com/vllm-project/vllm/main/examples/openai_example_batch.jsonl +``` + +Once you've created your batch file it should look like this + +``` +$ cat openai_example_batch.jsonl {"custom_id": "request-1", "method": "POST", "url": "/v1/chat/completions", "body": {"model": "meta-llama/Meta-Llama-3-8B-Instruct", "messages": [{"role": "system", "content": "You are a helpful assistant."},{"role": "user", "content": "Hello world!"}],"max_completion_tokens": 1000}} {"custom_id": "request-2", "method": "POST", "url": "/v1/chat/completions", "body": {"model": "meta-llama/Meta-Llama-3-8B-Instruct", "messages": [{"role": "system", "content": "You are an unhelpful assistant."},{"role": "user", "content": "Hello world!"}],"max_completion_tokens": 1000}} - ``` - - ### Step 2: Run the batch +``` + +### Step 2: Run the batch The batch running tool is designed to be used from the command line. @@ -85,18 +88,18 @@ To integrate with cloud blob storage, we recommend using presigned urls. ### Step 1: Upload your input script To follow along with this example, you can download the example batch, or create your own batch file in your working directory. - - ``` - wget https://raw.githubusercontent.com/vllm-project/vllm/main/examples/openai_example_batch.jsonl - ``` - - Once you've created your batch file it should look like this - - ``` - $ cat openai_example_batch.jsonl + +``` +wget https://raw.githubusercontent.com/vllm-project/vllm/main/examples/openai_example_batch.jsonl +``` + +Once you've created your batch file it should look like this + +``` +$ cat openai_example_batch.jsonl {"custom_id": "request-1", "method": "POST", "url": "/v1/chat/completions", "body": {"model": "meta-llama/Meta-Llama-3-8B-Instruct", "messages": [{"role": "system", "content": "You are a helpful assistant."},{"role": "user", "content": "Hello world!"}],"max_completion_tokens": 1000}} {"custom_id": "request-2", "method": "POST", "url": "/v1/chat/completions", "body": {"model": "meta-llama/Meta-Llama-3-8B-Instruct", "messages": [{"role": "system", "content": "You are an unhelpful assistant."},{"role": "user", "content": "Hello world!"}],"max_completion_tokens": 1000}} - ``` +``` Now upload your batch file to your S3 bucket. @@ -104,7 +107,6 @@ Now upload your batch file to your S3 bucket. aws s3 cp openai_example_batch.jsonl s3://MY_BUCKET/MY_INPUT_FILE.jsonl ``` - ### Step 2: Generate your presigned urls Presigned urls can only be generated via the SDK. You can run the following python script to generate your presigned urls. Be sure to replace the `MY_BUCKET`, `MY_INPUT_FILE.jsonl`, and `MY_OUTPUT_FILE.jsonl` placeholders with your bucket and file names. @@ -179,21 +181,19 @@ aws s3 cp s3://MY_BUCKET/MY_OUTPUT_FILE.jsonl - ### Step 1: Create your batch file - Add embedding requests to your batch file. The following is an example: +Add embedding requests to your batch file. The following is an example: - ``` - {"custom_id": "request-1", "method": "POST", "url": "/v1/embeddings", "body": {"model": "intfloat/e5-mistral-7b-instruct", "input": "You are a helpful assistant."}} +``` +{"custom_id": "request-1", "method": "POST", "url": "/v1/embeddings", "body": {"model": "intfloat/e5-mistral-7b-instruct", "input": "You are a helpful assistant."}} {"custom_id": "request-2", "method": "POST", "url": "/v1/embeddings", "body": {"model": "intfloat/e5-mistral-7b-instruct", "input": "You are an unhelpful assistant."}} ``` - - You can even mix chat completion and embedding requests in the batch file, as long as the model you are using supports both chat completion and embeddings (note that all requests must use the same model). +You can even mix chat completion and embedding requests in the batch file, as long as the model you are using supports both chat completion and embeddings (note that all requests must use the same model). - ### Step 2: Run the batch +### Step 2: Run the batch You can run the batch using the same command as in earlier examples. - ### Step 3: Check your results You can check your results by running `cat results.jsonl` @@ -201,5 +201,5 @@ You can check your results by running `cat results.jsonl` ``` $ cat results.jsonl {"id":"vllm-db0f71f7dec244e6bce530e0b4ef908b","custom_id":"request-1","response":{"status_code":200,"request_id":"vllm-batch-3580bf4d4ae54d52b67eee266a6eab20","body":{"id":"embd-33ac2efa7996430184461f2e38529746","object":"list","created":444647,"model":"intfloat/e5-mistral-7b-instruct","data":[{"index":0,"object":"embedding","embedding":[0.016204833984375,0.0092010498046875,0.0018358230590820312,-0.0028228759765625,0.001422882080078125,-0.0031147003173828125,...]}],"usage":{"prompt_tokens":8,"total_tokens":8,"completion_tokens":0}}},"error":null} -...``` +... ``` diff --git a/examples/offline_inference_scoring.py b/examples/offline_inference_scoring.py new file mode 100644 index 0000000000000..5da9e710959b5 --- /dev/null +++ b/examples/offline_inference_scoring.py @@ -0,0 +1,23 @@ +from vllm import LLM + +# Sample prompts. +text_1 = "What is the capital of France?" +texts_2 = [ + "The capital of Brazil is Brasilia.", "The capital of France is Paris." +] + +# Create an LLM. +# You should pass task="score" for cross-encoder models +model = LLM( + model="BAAI/bge-reranker-v2-m3", + task="score", + enforce_eager=True, +) + +# Generate scores. The output is a list of ScoringRequestOutputs. +outputs = model.score(text_1, texts_2) + +# Print the outputs. +for text_2, output in zip(texts_2, outputs): + score = output.outputs.score + print(f"Pair: {[text_1, text_2]!r} | Score: {score}") diff --git a/examples/offline_inference_vision_language.py b/examples/offline_inference_vision_language.py index f08f22eec164a..6d0495fdd4054 100644 --- a/examples/offline_inference_vision_language.py +++ b/examples/offline_inference_vision_language.py @@ -5,6 +5,8 @@ For most models, the prompt format should follow corresponding examples on HuggingFace model repository. """ +import random + from transformers import AutoTokenizer from vllm import LLM, SamplingParams @@ -17,13 +19,168 @@ # Unless specified, these settings have been tested to work on a single L4. +# Aria +def run_aria(question: str, modality: str): + assert modality == "image" + model_name = "rhymes-ai/Aria" + + llm = LLM(model=model_name, + tokenizer_mode="slow", + trust_remote_code=True, + dtype="bfloat16", + mm_cache_preprocessor=args.mm_cache_preprocessor) + + prompt = (f"<|im_start|>user\n<|img|>\n{question}" + "<|im_end|>\n<|im_start|>assistant\n") + + stop_token_ids = [93532, 93653, 944, 93421, 1019, 93653, 93519] + return llm, prompt, stop_token_ids + + +# BLIP-2 +def run_blip2(question: str, modality: str): + assert modality == "image" + + # BLIP-2 prompt format is inaccurate on HuggingFace model repository. + # See https://huggingface.co/Salesforce/blip2-opt-2.7b/discussions/15#64ff02f3f8cf9e4f5b038262 #noqa + prompt = f"Question: {question} Answer:" + llm = LLM(model="Salesforce/blip2-opt-2.7b", + mm_cache_preprocessor=args.mm_cache_preprocessor) + stop_token_ids = None + return llm, prompt, stop_token_ids + + +# Chameleon +def run_chameleon(question: str, modality: str): + assert modality == "image" + + prompt = f"{question}" + llm = LLM(model="facebook/chameleon-7b", + max_model_len=4096, + mm_cache_preprocessor=args.mm_cache_preprocessor) + stop_token_ids = None + return llm, prompt, stop_token_ids + + +# Fuyu +def run_fuyu(question: str, modality: str): + assert modality == "image" + + prompt = f"{question}\n" + llm = LLM(model="adept/fuyu-8b", + max_model_len=2048, + max_num_seqs=2, + mm_cache_preprocessor=args.mm_cache_preprocessor) + stop_token_ids = None + return llm, prompt, stop_token_ids + + +# GLM-4v +def run_glm4v(question: str, modality: str): + assert modality == "image" + model_name = "THUDM/glm-4v-9b" + + llm = LLM(model=model_name, + max_model_len=2048, + max_num_seqs=2, + trust_remote_code=True, + enforce_eager=True, + mm_cache_preprocessor=args.mm_cache_preprocessor) + prompt = question + stop_token_ids = [151329, 151336, 151338] + return llm, prompt, stop_token_ids + + +# H2OVL-Mississippi +def run_h2ovl(question: str, modality: str): + assert modality == "image" + + model_name = "h2oai/h2ovl-mississippi-2b" + + llm = LLM( + model=model_name, + trust_remote_code=True, + max_model_len=8192, + mm_cache_preprocessor=args.mm_cache_preprocessor, + ) + + tokenizer = AutoTokenizer.from_pretrained(model_name, + trust_remote_code=True) + messages = [{'role': 'user', 'content': f"\n{question}"}] + prompt = tokenizer.apply_chat_template(messages, + tokenize=False, + add_generation_prompt=True) + + # Stop tokens for H2OVL-Mississippi + # https://huggingface.co/h2oai/h2ovl-mississippi-2b + stop_token_ids = [tokenizer.eos_token_id] + return llm, prompt, stop_token_ids + + +# Idefics3-8B-Llama3 +def run_idefics3(question: str, modality: str): + assert modality == "image" + model_name = "HuggingFaceM4/Idefics3-8B-Llama3" + + llm = LLM( + model=model_name, + max_model_len=8192, + max_num_seqs=2, + enforce_eager=True, + # if you are running out of memory, you can reduce the "longest_edge". + # see: https://huggingface.co/HuggingFaceM4/Idefics3-8B-Llama3#model-optimizations + mm_processor_kwargs={ + "size": { + "longest_edge": 3 * 364 + }, + }, + mm_cache_preprocessor=args.mm_cache_preprocessor, + ) + prompt = ( + f"<|begin_of_text|>User:{question}\nAssistant:" + ) + stop_token_ids = None + return llm, prompt, stop_token_ids + + +# InternVL +def run_internvl(question: str, modality: str): + assert modality == "image" + + model_name = "OpenGVLab/InternVL2-2B" + + llm = LLM( + model=model_name, + trust_remote_code=True, + max_model_len=4096, + mm_cache_preprocessor=args.mm_cache_preprocessor, + ) + + tokenizer = AutoTokenizer.from_pretrained(model_name, + trust_remote_code=True) + messages = [{'role': 'user', 'content': f"\n{question}"}] + prompt = tokenizer.apply_chat_template(messages, + tokenize=False, + add_generation_prompt=True) + + # Stop tokens for InternVL + # models variants may have different stop tokens + # please refer to the model card for the correct "stop words": + # https://huggingface.co/OpenGVLab/InternVL2-2B/blob/main/conversation.py + stop_tokens = ["<|endoftext|>", "<|im_start|>", "<|im_end|>", "<|end|>"] + stop_token_ids = [tokenizer.convert_tokens_to_ids(i) for i in stop_tokens] + return llm, prompt, stop_token_ids + + # LLaVA-1.5 def run_llava(question: str, modality: str): assert modality == "image" prompt = f"USER: \n{question}\nASSISTANT:" - llm = LLM(model="llava-hf/llava-1.5-7b-hf", max_model_len=4096) + llm = LLM(model="llava-hf/llava-1.5-7b-hf", + max_model_len=4096, + mm_cache_preprocessor=args.mm_cache_preprocessor) stop_token_ids = None return llm, prompt, stop_token_ids @@ -33,7 +190,9 @@ def run_llava_next(question: str, modality: str): assert modality == "image" prompt = f"[INST] \n{question} [/INST]" - llm = LLM(model="llava-hf/llava-v1.6-mistral-7b-hf", max_model_len=8192) + llm = LLM(model="llava-hf/llava-v1.6-mistral-7b-hf", + max_model_len=8192, + mm_cache_preprocessor=args.mm_cache_preprocessor) stop_token_ids = None return llm, prompt, stop_token_ids @@ -44,7 +203,9 @@ def run_llava_next_video(question: str, modality: str): assert modality == "video" prompt = f"USER: UGta`P8A_*tXFSZ=JYdp!T`;rbZuH~_^4BZv&CadElU}H!8QYl%E?8FrcaI44)=)3UoJ{mk_}4c9W0eQk4oqax(@F z0bcGYM$Utxw9c>N`}6_q1&)mExCrzqAd<*LkRWQ2!y&M4D@b(6EPCu0^xIW7tD~)I z;yYt$U5w&Yh9O0e)jqkYIn0mS@wCVmdKmfV;Uw? z$bJxIZyxs(#ki#_KQHOGUPH}|#(*Oy{Fpo=ojl&$=Au7yCRfA8z9eLXCu2eKBtC@E z$QP+*@vFHlT3}Ik@uT_W8(E#CL*?O0h5gz^hKH$#-X4*QsQ2eJ*vXPP5G=NSxbId6 zXASQ)_zVx$_}A|oe1f+eIyK_-I>FHk6i~K)H*loKK4EHtCHY|!LDuuG-glO)2H!ca z#Dg1Fn{%$@vfA!4%(RZIsoa{zh+)UC*I}nh0uQZjq7u^{Coc@8+^ptFdF<}r-9EIX zFLNuPQVCbo_2!00r1>!@4D#s2qAUC2*jGg>NQ}tDepmp@#N)E_DM;0?&tD_HNvNxM zK_3urU|ExJ)}1OpB7N7GQRe0DQ$H^)Klzb=aqEkx=3(Wl46=+kx`rNjy>U9bOd!-loW1o5~xwF-|n{@PIw87!r-Y=2t9b(6J^5=M|!Pj3N!% zUBUZfcG(sX()bv<$W~>HVPAXusIT-{k8~6opJHvk(855W%PV;QiB_1cM~87}28jB= zLug?R|8BSU2|oXJ^zEQ>EgHi}-#FMSK6-Ph-j{B78D(X?GQ}se->#G$s0Wgf-gx8e zy4@pvy{=sXM_tm10820odJeX@_dF?b@lXN=I=;$(Jk{(_y)lYEW1_>Q+xfd}nZNMH z{?&c=t{^`Rc#sX1ZQ`cY2Qp70Ien4zarS7O%8DFrp~E!%;gtY?d~Lur(EFU)!$>{3UUJ#MtOQI?}Y(#PeJe#>p;@A}?a^bJHx7JPj z_D%abM={o=g~wb~B5mq_f-IeXZE?kbYNVVXh5%2H6E)67-^NKQ^e497^OAVhs^cq5 zY0=WCdZ_rW_pJV!K#0mTnS~XEi;Xo9beo^MlwC(oHbLsOO@a&d9+$lhidkW_e(YTM za)~*cV#(|aJu|(S2v+AdNP~bJMa8Zbe$B=)`HlPy>2@2oG6K=po02WPYq=25HR*xl8wu8j z_ITq87!)<#4&1-!5qFe`88tcQ)>$Je!>#>X1FNvM>F_vRYtRkD9S*r*{Z!cUri1&! z%YNRn0D(Dw4d-bR>l{KKo6zhEtyL$LqUUeuJ4X9(8wz!Mm_MVGHJGni6|>D*wlw+zR$V{ErqAD;7Q(h*CU-lBXDqr@ zRwHW_6^E6_r3+fct!Mc>9Ah442yST((Y@t5u!u&0^5A;iRcrh*X|Tezs(d}6gfcCq zG;7sRlg-qlNmY+15?3wOgD7eON8S?VLw!C;ncA3|y%#4tqpNIhJTdX4Hm!yA$0umr z`E+?86Z`@U$)f<|#pnI(vGT#KM+L#9=GDdq)vi{{+-3P3J~TrYF&LP!?d2fRb7I&H zVNnAMg0NMOatL;_<^jT2-p*dvd*%I+R`Yq}GjMQ!e3)HEUdRyH`BqbXN6ulKflKJc zP7lQ{m8E0MT||}R(-GAu-D?tnm0t>zGk8VqafD$pJZjdkF4|^7OuyoQCO%+XGOm@4=MMCI^(xb0U99`r+|*Q zVbb~Xp!Z){wGTIw-AG*cTNv|i)WTQBR?~F})o*`(_!je{ciUbw z1x_~;miVF#h_^t40eHVM}YEv0cM`qvFCxB?qA@= z(QVCRRE;2J)ss62L)Y{`_40zxQW@U*>#I+%c~8XS_I~wxB5S9jF)aq|P9Fs{KQ`3k z0&VMFDE_*3Q4vzK#5-xO8t+PwWr@rKnjiH7( z-W?iel@sbqfU?;_jq`)EXO6I6XY-oOUp$Nu5mFM+(~KoB-NQ!3VT3I7CJvb&lS^#{{52!;a4!9zhhr>za)IDFC;aCK?Xj9QsB760AZq%!n)M7BG%(uhHux%p!RPmJQ2;hFaU&lSOT8axGVQlUC=GhY`~cmTe3O z%!}>uoHJ*f&Md^0QDDn<$QT&)N5yG8iVF+Zy)zd=jY( zwME#9Bww^2b_M0u+RS7X99H`)p4X^tFxzgNs0#1ZxZQ+qws_hKT&^U$;^^~nn0Y&C z4inNqP0Of97VEAPMFpmbq-Un1fqje?eJzxyPWt(1Kp_hD4#(+azHY1st@zcO4au^_1T8_NhW zAg?VMBP}y~Z(L|fx3W>4N^t^%@7 z1c9kJSvfb3&0D7pPclhdYt0FDCrP35Ox6P~+|G{@KU0a7e0tD-` VyoS(94Q7bHvy3QPzj(r^spo3ajpLn~R&DgWWN)+dLQ?s9eREqH zJW4W7V9nh5kttCso?dM8T4r0y>`tsZ1Q+ zzC?t7Ns`~$+}pgvF*czup<1vHqC=5+vybGFUxGY3+_zPB^g)pt?WPC$6Y_@<-^*PjUS-L z0Ny$C6>=Mhb)zA_{Q=%XQqb6U{{ebSy$mqfbu4~>;tPI&LLZSo!@ixH1{SDRF>=R< zum7X+KgZ+0c07jTba-2BS;B=aACI?HWTbS#^Z)qW6r+ggM<>V_Hmv*gIMph{F$pR-}vVsFq2G@3{i3(`TWqTEPNiuFiv#66R#d` zRCROd4)NO42luUwIr%**uHkZhPd-Xaj49AaQhck%Vkq{rUHs2_i2kNz?__*?Zs*Dr zz+7$w6w90d&9DDtLdLwOD=$36l_)G*i)A0#%Ir7;2Or&#&H~ub7O3hep;VfKnnrNv z5Fp$o+Ji7TvP6jCU1n=-3*ZsPBhr4@JWIf^dwo36aW{l(;xs?R+C(*crB*jJ(Z>7W zLW9T(fL~~a56BrC-LGYI@S?&uv9k@L@5T#JygcCK87Sc0WW@$tJ+5wHhO>8ZWrl9= ztxq*;+)C&h#=QIH*En8+;$-e)x&16^heE)IbrwE_vkwG9O=u0FHcW;nrg+t}yt#T_ zd?5Q z_GwN@uHFa%%9cURHI-5bU%()QsfB|WaimI+3mdvUvhj^rEm!4amg5s*#&?ZW8ip8q z7~L8z2klF*TZ+7_BoRRnXW#E5>XZ^;W@9Fu?q(f1mg2cH(VM}YChMa9o!SN&&cTgT zhOZ$vt>oIaC&6tHUfU6k7uE8QC8d*1x4#`tX}LTujeIKfzPtXZC@q{i(~IMZE{n(ttr`uHl5$Pw;rd)H zsx6)7_B#yUg{I-n$1VoMmbC1d&*JmW3tI0{XFMUI*cKaaT&h^>Azj2$=7LFZHlBk} zl6w3ifLS){;OM?q;M8}42l5`RJjWhA=7ja_sj>N`1o0xr>oxWHr7JVG0%VTinpyP0 z+pc;Ess1}J^dDYbaTF;_F0@#`ZkPA=(Z~H-y&S^TYTpJ!1M->iI8DBbKo!E>d1;hg zr~di_56Zxh-M*b??~kTx_GePpEcT;`h)il6v1oEJs5SnnpBJ>;a_e|;{c&df4r$iT z4pt9HeEf9|d_^D$H#=bTBpN>)lzELYpXmPiVuoK=ZPNXM&XdZPj-Q|28*rFa1Zc3R z5M{>9E^DEYg+WhYBh%^LlCD}?$1Guq4M3nx%>NR_u)ehg9x*Ef@CI`+leSg2cZrJlET2a02 z=Gn+FoPf>oywtW@#)3kTXf)t}eVPX!@-`{`00n76iGWw-0jD8{x`L7OgJ7~q?C0JH z9P;nqLlG@!fw=YURL2}qI_L)|;tkV3^dS@gruwUZgK{PylX&EMxQKaga;B8@0DpE> z?pBKo{)jmg*zOY>AsxL@(d46q3*YfN%1%7yg^9qglWOf^jsK8&b2LTAC5(@cQZ4P>H5) zo@qo-dw?u}(m_cN6~H|#%Ez2gf;9Kr1zrxG3n36`+)quBH`7K&7K!){Zo_8}K^Ty?*cq|}HV z2KTB)%|5H(EVhOa1QQlMT~|0}H>esZ$vH}9GZZU8vI@T9k~X;n?n0fMhQES)OR_y0 zbec+mRvI5(RsJe)dE)irX?3OIfL*ilA+)%-1lnb@%vG_!vP1g0xvRKO%NJb*lZ2R{ zREcNxY97|S#1t%q-HF}TIDElH$Ygl)>hh$SwT4&)uFOb_pOe+2%Ub%2A~@e4(|P?< zOt{@^|7^WFO~dC2eP`50=bZ;&&~ehugicuXQYfIb^~_P3tpRVIJ%@4;M> zd8y4MC+cYI9kH(~6&a`~=U?LJleBNVPpB|yh`n%i^P-{B2YwEo)LUB5Yt^2HwnCyO zeY#Pg74u?i2RC@g&U>p_2R1p1+2Lg>R)KO*to&XX%9>a`pGm*2zjD|U>1lPAN>~tS*+AS(H$F+y_uVa;*dZBi(@YDl9 z#6Zq>E^Ijjr-w3YdS?>I8jdKKP-QIceyTXw(EMy)M6?U)t(OL8;XV_tNr#`VMp z-s+Ou_l)AX>q}o$EUCzy{#XW5bf@-++Cx_7cQe{!X*#ZLDN1~ijPT;l6#P_n=7H(` z2-%D!Z`D(Fn)jU#UHcs~ydEAg^;8d7_Ou`S?smZ#o5V}j7|_T9hol1&hA|9KvH+O)tG{s35nqqIZVD#g(H+(@HRmM}>#9W_Q5hp3ji$WJ$8fGr5&AB$o zIeb!IWOOGo9*OOJlHGD8YD$Of%r{0)o!Gx}hhpWVzgadsL31Q4T+GWtbZk=D!tX`w zw|&9qX5u=I!dcIOoYr5uIk0R)aXWp-OJpPW!dYa`*W{y5q@u6?ZZT|Ncl^`8{Vzj1 ze~^ZJ*Xj?0ZU95FxtZ%bw~-F0)m7qM$(^6L0kyh&r53P2U&hQH?*52ErSF7`qmjpf z_^QHveb=QPiTN}+*BR#BMh>BfM_}-A;bO(+rYa<~$+G~)6(keF z9)~*XkT3@n8rru+XKqO37)SHT*y*39ql=RItZhJ)n_mhA#1t~*2pO45-%@KM7i*IB zViwdM>9s+UG7N%Gs?XMo_vb2v?Ldh_!7agZ8_8IC&wjxrCv;Uyp%9}$t@=~B@A-{3 zXc%^COuht)@?qONat5l<>J1g>;L$n#jCM{HQ*M3Jh@M!u7^Z=a=6t%pQ_&cJZ->y# zIhlXj4n-K^bVK*APgSL6ps-4hCdQ{5{gDL_^xVoj&`ka!rXOzBx-rZEzrf%i@d}E7Ip?DfBc3fLl3NTd=^eyG%836V6#ZoV? zvL<0!rqS+k@^H`BTK+um1pN^UNN{M)kh!+>y;NGp&E90yfYnLO z_Zv>lM290mqAT_w`W(SKdqJ0#2tdk=W2>8k52C6K3uFSN`kr#?mzTE+WxTc(Y&0c< zSK>(anF~)c4#>>3Rgb<2hJJ)q`lnxfdh_H|W5BZFR<~4_Xc%7UYE_}k$)7sH@?=v!oDCaPV zPHifKA0;6<(Jg{G2l<`ssj4gQR0ky|UeKS5J>BTz+8g6z%UhUg(iTmxLHtxox*h1Z zM4(3Jb>&X0?wqeVqkgy0XsS;3?t>)$$;hbh`TIUVHqhb-$Zw8MEJz*;vQ7l_1V9EC zj)Of<8Q!VZtY&1}(XKMROCR#^l~e-iRV-+^9BqV00trVfhU$?rbWr*z{g$>X*BX0j zQqI^}ent1`ybn}Aotu2aL`mJ_?gYd3xBGKrQSCDdt=Dgl#jn)JzBB6*pN<%M>E0RN zrqcfz89JC|gRsqAxwVF9Ej_?mnj`mJ&@juZ`b`!WB*yP;e>l*YkbQa=dA~!@@M+m%vkt3J#k7>UjR81g?xkzel#~b-J{c5oa<$& z_*Wnsp@FGObEle0YA z6VsHmyzftJ-EsOVZ&`9J$|d4@GFS(_KIGO7e^cVN_N=InxBOoEPR%`d$H(Oy zpHvi`#MO^TM{op}Mpyp?6x$zNbuF)IV4Gx=rMvhs&3wvkUPGM{6*rO`b8VreeN@^B z3H7#G`9XXnADnyW{0hwp3VhYuHL7tR+_1%maQ0txmT>+>)U1UoF)$5(fP#LHUc zJ-S~r$m-|D$M87D-KP-rwK8E$Ugj-60c%0JG_bWHKtM$x9y}H>QyyAKzquLT6->t_ zJ9)|yq`534R483V383XeM2Yl2KtF}H%@Kt|e}KZX$sFi;$zx^+4RSsULKbY!i-^k* z2hd$6cy@9)I0%E+2Sf?ZYLKL0t*}D@WhS74_!E?*+AD|QG2e+)0&%l+e^kO?SLje>&ZwZDa zpf{5Okg-$J?ld4VO|A-{IG9K&=HK7efa@#ujT7d7)XpX#|2!XcWpyFBsQ%sTgnmLNBK zk#=(-02(0_MV4`#+=_N7nKdH<%2ZREy$@qJKA17RpAsoCT`cFk26|1QN8%opXyGDB z9A{zihp*1zmoNqCBBQ(o;+TPf3J8ZD_fogi>%-Io$si=n6jnVS*Oy%w{S!RXI@z4= z!o;Gho?_*P&r+jriGFz1skUtbAFt|~xUDWBrc^fjVq+^PS|D0K%f_8Mcs=I$l5nHs zE6e!+5Q!GAOS&2meuztB)0XsFiK$QO{qBv+osSul7(Xv?41A1r;qLmCZmc zdHID1p_>#6O3Z6twFN2)c8{FP-2PKcC~3_koRe69~a?-r((p1{PCQjzC%Rp zn^|~AC*<8|9PW{wzxs|}q`%`?2`-0#zQ;nwc0EitsVM_FVnUkfJ1|r&fj92$$V) zs%O)@;nX4E_ICAI{-q3R`BlRYc{8CNy~;9hp^hIQurjblYsp*#&eHOWV>yGc4YqZ< z=Bl_d{lMq1ZE#9*a-|Z<*mX$cTums-u*+C#?a(K%s*&4Yu>Cum=Dm3Mpvf!2%ck4T zQXkr^TPfCIEXrr)I$hZi3Zu#T7H6Y z7cj(|2$OxOyg99lS<_RnDVpgl&3+TZfrfcu+@lURpQ(ZvN)7y-p!v+HngMBE!74m8yA`#=;z`{O#@w$uBY17^mj+Gq zh1BT+P}YW?&=x5R0Ps%}U8ZPQT7!~J_Ix)faE5WGd!;<A< ziF0cuMG~ix5Ok&(SqW}~o`fgwd*Sz%n%|DYll2HP*|4>SkhhNoG8VL6RbKK)`c6Ge zb>@I#K6Sn2B!Y`z(zdBA$!HBP>*(Y=RT=R}tZG%dHJL7^?WLCB#$`^sHlC2>sdNBZ zX%C1BF%2TY2qj{%TnLF(`kfYI^im4z%xL=3HG7MvX*Jq$uWjo2UstQ@>VT5>1xF>( z%FJu#Ix~CS#jXJC??|HpQx~f_WbxCyMt}J?TwIKZWLPf-*sUDrK=2PV33>3|QC&Jm zRGrN0{6}u0u`n>fZNtH(DV-LOy-X92k*dKvp@N8`6Z_i}gl%C;pnm`-R zTAkobu~wx=d`cMiDXcX11_2vTAOPNA9zbq90*3Zw^|#F)5<;6yAbSBo|FbP1V)nvv zTN;EGj_NMABnFNdp$y1Vs5_Plxi;rjvd8he3I3$Z z*=0)|!rkCcM0R3h_i6HgGB;s4ru4S!8z69OUbASwvn^!MT<-Il0n*HKvrj0qNn>mU z%}5eLf=N#Zp`8>3B_9^(=`En;-%T&SGE(21SmbRo@xSSNms z&w22w&eu;qXZfK*7ftx-GiDs+`LgQ^@xV}Ho zJ@y${8{9F6U>Rx>I-p0?T`vJc}lS4S&mN!j7~8`n@qc zEoT65jhvm=F=~PF;sOT0RL&XXFouu1DJ=R~Dp3h;+vPr2$-YV9L&(OC%*HLGAvZF^ zGtM;WaCaVB<{Il(b+d54yb+~#sqa$v)z~W_p*D+j`vFgVOQ9ZcR4TYtH^jF;8llGT z9|?p>-UFF{>O#+%8ys6(ofO23XM-GbQFA;^oOt?n$puL+r!%Wbc1?|Nl{q&biE9xY zJs-FHX>iYB5^E5^g?ogYeDFO3B1Hlbk&=gv#p_;;1l|pMOwygcC+Z4YG{m%A4LlM{ z-ALf?8X!3k`?(zXt~KdQ3Wd&z+hZe*zg`FjQ@dX_>p!=Q;a`dP=5Q~uKJT7L`kCEQ z^o9_AThG=52Y8DtB+cRJt)zo4uFcNeDa;n`!EL|O0r{oQ=#86raTZ}fY%o?yKl}&i zOY!uhvXMLVZNNfM0V1wlGkt+dkT~YJ)$7ikB$;Y+Cu{wR-iH>p)LF z?KneX0OpDlP`#h4`2pgBV#CPP$N`xFA;|Mpy{zq>VStQy~E-k_8NMd;rR8<08wyIXUb7S8^kKa4Wz{zo(j z);d9L2%R?e2tD60AS0S4fBG;w9;7?Wbx8d$UlU^472kvFX-#3<#lpHSHF;%q1kN%M zBO?RXpds8NlkeSLQ&#)M&xlTtPXMimc!|wrALi#yJ0v5)%0lLO>zTCc4`>BAzHa8U zn&IYm>aKBic~&oF^0b^-f$&tFvT3>EpnPFyZj(zcTr5C)=E^((RgdDWrxbn7BJ}r6|?3EJjbV*@IFnYZY3kIcxAtPBkumR`JC=mZR>B1BLG?w%T{oj}RZP!*dVN(fZRqIe)9Ot38GFprQa}MEV7MO~M-o zJ)m(S!8oVkVWQL1^D*OceJy}WBaB zu(W9xw$8T1Pc^yG6k0NT&4!njT3WIJM4o4woh+dwU<&HBIeL z%NHt)4O`nDpUVCC>|3hnrxqsQlf?8w_|D+6Zb%RL41fXQlM7Lu^%c^v@rB&$^3{>v zDtiXN%xwOw7{aAc{spyU{dzGm|A+t1B&K~NjsFejD!x>jP;~kM_s-aaA z8kfIg2j9%_X4%%LA6#3nGhq`!lz-iF(sA-w=OS+Jx8qy#@sd5}KAZgW>-3Sh{2sn^ zoIl$~Y_-+Za{}XSd=D6_mQgPmUW@yOTk9VEgm4;ErX||zw2W4PPdg>CA4EC{?X$fc zYd_g0wky2ys=rY28RJu-8zZ^|k2Y@%(|y9XS;3+Ec*}AP_lCt}x|$ZtsG=xKJjFw! z<+s;lcI-O=lcpOm{$<7WTqDr~pTyxs`__1dkzD^Ir(O5q>7Il+8l@a`EJ=e9q&g~b z|2Uj}rNlc?kFCS#I`hH3E1L$YEJ6-22+<$ zb|gj#vliQaUifmU*F3WdrC>ZLE7|GsRMpnE9_bOAUQSIrB@UjT+b6RU?^b{qGH+-G z7*Nb-sO$k/(Q$Jff*IbJuFlQEzVZ>WIJ)I7Ob73=o-)SB=lAiDH?c;OjygkDbJ zIc6Heh@0(6iXuB&E5e|x#FvF_R1ja;*iPF-P~2+9OVCRbZhyk{!9E;yA|5|J{h@G; z>!;LqD~Y*F7qmF(;?k$Wt*`X}aamtJb>bFanGAoCBcxn*om|#z!K-0H$k&LD)UIbq zO7#|LFT=2C&q5CIkw)L>5>tDHJGg0}WXVZj4UJNi%XOO6X`fm3&CM|xO{O2zv{PB> zSStbJtPRUnj5CD4?1{7M>B@E()Fky{rAFmXXM;aPKY`+Ppl7VtGT-H)-mV#NdVy)z zOlEmf#&?gxr)&;It_$z$TU(3B#wUAZr49A~s_|-1xL*Fc3;lmt3;i!_ZTycaKV^bR z&K0l;=>99zg7AqkEUGi*emQ~V*?r!tCi-EIrzq*-3q+QWre0;j-umt^X%v)=Etqtq z(=04M+r!WhGrGJOX-sRKoC;{$+S(ZMiDssvqD(i1`EwCIc-wBS<Hw$i~LD`&?W168}FGFY|+$oT(st%2B4^~=Bxh8zWCc`nv8%ivloV& zF5>074?m-PE`}7^=4r`P51Y<#N8JnY8;k0gca5?ZC*I5_-O!CZjLr{V8sr}p`{r`J zS7u07@xC*?%pS8@OFG;GV;Fctt!WGW+=5C#+I#0x^ffhM;AG~v!wrtDmI=i^|sqi zpl*xkWPlPvG8xC-cI?;geULdZHWn*tJ{_;36&IAZ!SFmTP=jB7Q!N+mN3k0k21qVu zoWN@n*CZ#obqR7&C8~4QaD7)7*sTWDqui>Hi+zQlL^Fz+9XvH0>fpi$NEpKaGK+os zV4$?_w&)Rx7MgB zA0+JHN)3rlK#RIuLISz<(1aXuYn+%kf9z^_oKHRj4;;E(S2y6Jg4tb^1=nXAk2s{m4G1&sJmD(rTub;Xz z(K9fh^)pNTzTQXM;e&pw*eO)AuD@A$}R?u9`%hNTneW$iG;zppi^?* z@!lYc)j;(@r!8@Ajd%Dhlx?m<&A`O!=;e#m)lD~PSv243)0Bh2?M+Vm`p2*PTcOmm z{bi1nozT-HjVE4`% z7(w*VHyX;S)8u>^&smG1q|Ed^Ol?Z*%%tBdwb)MrL{!c>-FW-xP-Zi@w<$+=sV_av zub7i# z^>x(l`)>A5a2U|e07bxc0h$A5idueHDD5ZSq_j6H#>u)2Lsd8O>gxP#I5J;}6gu1R zow|m029!ROeCKd;ApsYON*l)NwbxC;HY9SS{NMEpSlSJDW^+ zy;3uKu8D}mHlC_RfhC$9+|IW5bElLh=5eRW#F>CHK(-{KW_x zGqoprZ~dB=;K;)0!{^_YVxLCxzI#ho)cEXskUP`m%QWRgGR&`1|mK{|M^hr4w4$sc$sU5oiZoV-y9-|E+|3o=sV@Yll5Ct z1w+RD+hpntOy7K0F$O)$ju3Z(-KBSK2+~pzO zowG4}S5Nax0O3BRpH#m9#oUG3*6q`e%)y&9`73iwVoh33^Zp(>uhwL*kEE})**`rc z$rD?({B@UIL__|ZEpIdTf_*!TV+P*e4s;W5;zi~n+ud>wQQGH*lLW6Q^A03Yo0e7`tx9aA+N_5>6fQblMy9@_dKk-_ zDVwzdhjf5xW`XvQoP?tr+G*JZS~y0-jVl6M?=-}oSSa4$cz}B4$yZ&9bB#Wjy?z{{ z2z{FUnI^Kyxn`Cl+$izZY@bk^N9$4XPG3J2jqyVB&3zx@y;6jD1!35{&%AQoTBK>T zG~mOfC+Cu`vC7%0b4%5zykGtF#QMx}nxY1F4y9A5_0(~WQOqlHo7urzRQ_u2-mq{~ zDc?sbRlBL(47%6$g2|%qIC~LK2-ImBrS;l*&FJ&q zk;|w0cPk0;nz##j#2X8eV7Tl4GyHxs{&P>4aR$TK#ujeW^OHd@0YC-ck5XhmlQJCJ zUmZ&tKR)T()9o6pEHKjKfXxgGplB?wA%ujoB)Lzlr3IJe+VP+GqEK-0l)OzVpWCVx z_*peF+_$C%))l}5}p-(dko^YmbAiw4JgU6=o#g7LY-%iWa2LU+ms_Cor{=_ zuLL^N9C(hEcyE)lQRv6!Q>&KHX9(7N_;oClB;oJqH+KfU1Ete0oSw6sz4N8&;q>it z0q%$kMOo);8td-s3O4KF@&orEVRJN17T9Qk)`pD2`=k3yUwbv`-esmd6JSzewD^Kx zK;c7m@DCfprR`NB)iIhDA{`s;oT~BHA!$^o_R{X1I!|R0{8m07-o!_d(3v6DOjyb$ zKuvOsiDkcgpGYJ>%z;G)wti-`3)dm$6aJt~}%9}asTrv3?5ztISN6AQrDW4<;&zn|F9eZ>@9RjT+@20$s zS)(1gcS40}P)=%f+@F5iZk!=AD%r5($P_^xEEsn_Q3iQM#+{XL0>}Thr}-%|CV(HV zyCB!o&%K%nWSOyoE%^N42Po|!X>qo}z0I%n9h_VCP6+0|M7?%EBZMO?qW zVJ_41`Z4LETv+MW{nXJVQ_kD87u)(S+@dhh;QSuxeut*j3WD{@tH$yQbxOhWTQM9i zb`jdoi`Lji=J^Ef+L0lULbI>vy`%l~ zWJTMD*Rg5+H+SZNGNJq~lougS%qL7rf*%D)d?86~zX% z=b;meW8+-bX|CC;H8pHu1Cx`T<((xPix-}O#5xmN*R*67pd5hRUYf!shbjvbxpfEk z*3)z@STP@H3_p%8yYCT3Ge^+S-?WJO+u} zBbLwWC}WwZ-M$#LnKe`Oz0_B8{F%x9b2v(2l0-S%lt!lQxnN1UPR#cLll&|Zx5n&V z#01m#1S%DyiG{dzSTKeW0l3-SMxkm$OHC9FSBo;mq4T=cNG|&g3jESGHgh9wUO`ER z(<}PU#9rejNj`pc(yh+h2E`S(me;glJHc(Z_7}0h)W?Q^vN@_x@*wTIQExs#?}jA7 zw&iw;U;ZnpFP4Av?|;ij94$$~EZWrq@`T=G&(=|ZD^5WK*w-hE0loD+eC=^~66Vm# zUk&p6Ca`)mYx%zi)1r;gBF71s#nAvp0IZmjja+4rS^M)2*S}`&{*RYWar4jkL3g=+ z=@wN8C~|$LnL>_TB&z~#U&VmKhOjPSAQ+(BMn@h9IGNlLAQ-4QE62{|UHr(~YPFVs zOP6xbn1;ubKgc>!4TQLe1xNFV5WzTlXb6PwrG@3kS=~_bd6A7FuVILn;N#e7TTYrl z|0}uVIAf-w-kYKfAvEVu=34>hNY{ZV3r05!cLK=qdCghY4BGbaw3itl!AAEco>O_t z?64K$p*W$9%&WTi-lt?qgrs2|QNgrss>C6@pUsEr{$1X<5MPF~$!OIi!O7d*cMr2y z%td?19OMxUkb{TS#PJmhg*54NPC-LRTI}PJSmf4Aj}Yh<8|#Kx?wwQTj>fV%FNx;e zD+ea7-d~sy{{q|QS?+YDG6$rz_!_ZJQ{koJGETP)8ap#Q2&TeCrH*DLR@VWCz5TaD^Dl1j-#uf=LI@YU zTw5`90|6CYB7Nsd(vWt;g04zznN{V6=!eoKEdpl0!0R2+HESAZs>aZBmWb;d1t3>f z>QIWu5g#*?`LpZ!-p1nH(oKhEUS=(NLAjw0^tz!9)#yOd#XL-mvdCKY!xXF^W!+oo z&g9pg)1A#)_h~7I#1vo?sQtTjMB5S=;wNHgu~1Zt9?Ye@nfY z-iB?TDq^#Rg8Z?C_Bh@1DHi`VXDoGOA7V&gD-_BsSY!5auv5e-ZgS|aT31*bnGY6_ zV#2z90`j!WVxn@^z`|q&x?L2K)^AmMZ4(Kw!?n!MsnfRy(NR#Rbn4xCNH>a!{AE(D zDuL*}z=B_n*|agZHBkw!oSXNOk{uB$lVu8y_RqX4vkBHEC`bC|IF3G9SESDFUX{FF1UQSaJjc!sf~)O0w!r? zM$Fu&92FR>HuVCtlKYddBoE0HB<45b3m<-{t)on?`S2Z9J9h4v7mGgYz<1@#CJ;Ru zRF6uh<<2mEeVL{C+VArA=CDcaof1dU>?D0R>6NUA@z{SceZOSm{_L3&V7}uqTEx;^ z2k+}uwWoXX9lZ}r7@e`Y>GY3P+?h<2)Vx{rwLbz=Chxa7mA}3QogGU3zPuo`$yf}> zb)5xz)w8=mB1lgqx3;}Hzkfr6)!`2Luk6fkd>-$9$~SfUmRn#`NP-`W@?{~C^l}dv zOrjeEPd3Tt>ax}28Vf$#M$BH}qGyB7C04`@ODGWz+%I%TguOLVKT*(1IS9yfxgBSC zLMQrLOS4ypw2Pg5U*sKFj%~}=k4SPSC*2%>5L^6Ocuyc9uUyWe<=Y9^AIy{@om0(fv6O9H%(ON%B+* z+d-Q$1z}MNU-_t^YSKC0ZzILhe&>)54e3IoY8G>sH%5vpV@mB8L^)3%)7n9E{Y%xH zD_GeLo?buzXgT^I9P*f3Hw>shKg|a$QZE$y1OX_PsCa>J5Bcw(4lN5fTEb+)dl)I zy+pRp|D51aA6kHt_#A$KNbW6zS*ViXEuJ`K0K6L9M!=Ax^E;75MF2iS{0hO>Da!$R z(N#p$4-ic%@&||qT~(01E~5}Cpc{$&!{us_Bz|TTeREL;@PutknLvU~bW=q5@Y5aa zKkwj|?_fe%lH_IA39VFbfz_sZKPSfR5O;CGwJ)SUTz)V}=5xsS=>=brM6RF_iI#6xdWX!u)Y5 zWdCu2CGID(_&>~l@^85$$iIB?f3leW)F5b#I{GmEH@XF&Vf+~!`ZPlA`1OVt>a63~ z_1PjXe*?;=-cvbJJ$@ot3DYM*U>h38L&1MqnqN)(|It>?qrza-8l9M6WuEH?K=Rps z6R9ysg`Z3-^hy)rdr-zK(=M-AL7DHn@|O7gNE05{6b;!&hN{^^#OK1Av>#VU)|=j} z;G19_0qn3gW^r8b2dE+x*f<8qi`Oh%i3gF#ZKOi~`}uA=KM^OP6X1n|?ixy*T$jv1 zlboR9HVsKBy!1l4U(T3r3a78#Q<|C~m>x-rsEY*$DYMq%=K`zfq2Nb&&)TZn?9Mh# zG46x%TOYNQt49Y+Z{4}G`7*HvC@rz@Q{c3kh9e)MZ+-7tHOco?!?)O0?uzUVK&;$h({juNh4thd?tYbevcO0H7 zM~>~VF8&W)JIkLpvbzEFW%z4j;!I$tj`goSc84uq`o?b1^=G{R{y}BeKRUqx>%#1B zI7@S*UuzqlqYJ;Gk^hyP@uz{uoxa-{-(7Rbsd=#u4C z`0D>39x)LYAaI<$L82s#-68rf9CP714!xVXz)bKAGfLAnxodoO*=NW_HUVq3vm8Bo zB0Yb&b(8Z1Tb$&^0HL_G0rjHEgrM7Ma;UQq1X#a|Ss$o^UZx5$+X)!HzxV#|;WNdv zKm|0bB!D5HZ)ute>n-5B050`;UyV9-hoUE(UFZi@{djlT9!YQbZi}J;NW3i{? zr*7dqj&D*p5)b&W?;G8nMksCz;3tUhHfW(8tVJ;;X4*sxxLv$JG{f@-YzfF`k-2u~U60u$Pav@xK)lfdcx53?L?Dh)i)_s#>Q5pG zy`a5b(f2@V8?8(Ygc|qpLSz>2Op-h5zuOUe#eaaDpn-A*VoyjgyjZ+cdloU~W5PE+ zJD)>vR27)jBDXuTbak=`eDI*6A;u1>n)8L`69yo2f0w1jY}^|F0;3v{!=x#)(!vkW z6Qsy@Co&z7VH9kWJC3>zB`0;k45pAhO|UKSR~6;~wZG*iDeml&p<*RLib>C2=5 zGi1|TZ`uDAW%<9e+W+eN|8o_~)mRKT{=ua0Ka|h@t$`a&{MGWw)TQM9^;`%yg5Q1` z!Kd4m-7%8^YX5z6K~{3#^$bZZ1<|J@klDZJQ`<|r_4OwJ8rZ9y2b%#i2SaKhn82@$QPTHALcIj-y;tc%)JlTRm_j2 zUCDmF<*r*`lB-LXR4J8sr>FmPKu61okvt1Dk7Ix!F}dLJ>=>W$=)-h5pUzvxmz8S9 zTyw~Mm{Xnt^tSk^4n45HvK%yx()z8MSJ9K0_kAg^g-F7%5X`r8Z~(1xQV$8RTHAwv zMy{yRgkbPSGyq!AYKGOaxZLU=`Xq~cS)2EhO#IsbE`ZMP9E&Eg0)WF#U>VV|W6{T_ z_<*q-(8Q)%z%c-2D?3AqkeY?$nVcG=A*{*&kOyBM>NAa z;4_Ba>r|8v1gt1?f$tQ#?*;N=8NcaqNDHGQ`-yE`d_L*IVIXQ|7Vf>{M5}ig#EtqI zaZq&Wn%Y+22Jr|7*+?dM7~**V{xPbXew!5zx15@l3{~_BA8%qVDd2B5n#Ej+p?l_6 z?U&r)I@PF&GOxmic0(CvNj!w87i!@Bl0YlL?+p)Uzad`79awO@r+77%KPnW}1b&0~ z34i=KbiswM-*WIVyK_=)1?}mhsBiq045yz`*V_k2weT{ss@E!gz7!9SH%)&K-D>A0 z&j3CbG;O#(Un+s?`2i{~9hzA|7b4V&4BY;+;cn+Mjq#IbceR|CQ{GtTg3mEcSNfOE zL-`&-&m#Eq;F%p(6q2uPc+U0zr5~81Ub}o4Io4<&{JmPF<=qu}(`0 z-_e+Rx?!aNNJs-{uT9yc<(u8bMP>7Dh5gFuY`q%&16v`AFMhrfbNP-j^#DyaO${?B zV_~{o)wjYoxiqPtm?U@744?`1=E@kz+NoQX?DIka)xZ*QbV)u?YFhfa)MWE>sp(zZ z@aY@Htc0-*!-ZpAtVxWb{*gQ}V@n%N)X3sO2cfsP#h+%qA3yUT!<^>3^7CiyZ~Dip zu^+BAUM+r-6(OSj&uaGHyvJW0ntx9<`{!u=KkLEzYp%g7_tzYR&SOD`|Ha;$$3y+@ z{o^B*Eu@HSQ&B|7z8eyfkjhRfStn#43?o9;389FwW+(f;WKGs=Bl`?xXNHVnmcH+E z?)%*5oX_WT?%(;H`}=tOzTf+I|5129xaN9q*Y$e6p3m3wY1$3p-bO9uZ?$1;P9%)M z^e{oGD!PS0i{Q2I6Rv8OHy@9@F`Q>D32B|MXJilpc|b8c-+1ey_T6G2Yz}wP{8#Zw zSk3Dz-g#Lh6%o}JXHY~Aw+N){%{z1PKULX{9)A1s|9lqab^KSp0zS1~4f}y=%=g^( zO{A=~tNMC^%=%_wKiPvvv$LBULk;Wvwm-y8()TEdNJx(vm_*G;`@^v zr>nk7S6i+Ho&XFZ84CX=&-hJU?f>A7NW4ZqK4+nS?CBZ#}}apB45}FoU!PX&5fp@E=)H*;$yuPnaa{f~BR7GRIlHH`C-tMA@)b_(UZ`Gqp~C51ZZ;1nM_be) zeQKqge9oOQ^mLbe;V{D($3;IS`NUY~hDMJU*$%(g!B0CX!!vg;V9&i=U0h~PWT?Y3 zdgJAEN!N|wq-n0FwV<^G=s@f7)mkx<2|RF;ds1#3+cdV7(*By8Rk+DOs{pw4 zn3;C=a-wQ`V{LI1!VBAC9ZZwNCINq(ij;Z8RI8*+x9X zKzLwoUED(`%7=)~748p7p80*}vYdUkX-bC&^gvTR8Y1l;%DFK3H2We z^9;n(8lm@@W&)k}3|>bE*fCagr5tyO($aZc{WzF*#Izn@zgjMlz_E$2U!d;P`B26B zucq1wXw8}oHeubEil)PuZ+6!@Ro!2Vm`ajZC4usd<TX zO9^fEYCYLD;Vr=mNJg&nh|3n9-@cQEe9KjrK2t1zLjV)PkNZ|TBsFyppC;fOXjtl)9ks{&<*#_ef9E#ee>MOV{$tO#&_ANV z-Cq^Y1Gcx<9e=@$|95=-PjKgRcfUM{3AwHXS0!Bt$2BjU3*sL#7FW5tq`V)+LFKVb zCfyYKsk}@}8MARW?FOF{vs`r8n#h91Ma#7L8VpRWZDg+Qma(+OFsiK+BKYS+MY@hO})&z%{?JR)Dnl zh3J6Ehz4Y#VZ~jI{vw<{;KJ@F`a}w`(WHkC#Ouu9OZUk?+f^ixy)15YsjJ^aH=B{nzI{Elql|hR_8E;&v zcNtYMruF1KRneo{Yf<;_N+N&sI(tISHal>3qQa@d_&Y;vUV&2BeLXMn_zEqur6bvy zl_)g);tDa4#7~y1Y|>Auo1`;+UpH^-Enp?S^E}}KS@bjFnH&m`9eP1!TC2Q1u6AKV z((<971M|6ewAauc0grFI$XGU=A;{fzsx=`bR|uY)#FrRpadl)1cNFjigBW)!?lr#& z;A5Xm5dv=g%kjVAyC(fL2KqA-_m603|MIcFVY_;l{!O)Y?L{e&!qVa`sZ6FopO-$y zjdWHF@w4$L3bI&(*!7wR=n555XaFcHFUVw_F#IhZ`Ougogy<_F1B-2iv^D_pazQ5? z|K(X`M+qMbZvyo21KN&$I0r(g8ia-YKE&&SKL#$Wk#G^fvjlSD|XE(AI{4arQCP06X>RJ89aapFfn`3+~oNGX<7bF z_WXY~K>G@4(FgoeHOgbuu26@lQj~O+$AxfBBIl#jwy^#4%)_s!+^$mzZ6*z4Bpq?- zw@3WJUbI_`A~`~wURTN+_UJyfAv0KsD|y52m?na?_Q0EQ8NKCQw@pDh=(Q2lyXUhG z9|F+XrV#uLK^mviQ|bZcu zogSX0w15mj-#=z8+CBYrQmQBLeAF9oG~>P)c*(SLm`z)7v^fA1tIMkCR5GLY3zXSV z)~(QQ?_eJ{6b1Cmz1-^r|CDk96TzInKpZ+`p$wo)D=YEVzl-A+XYo{e=B_h>^Zr#3I~=Y|N?d`qj(^mjSb-hU){dEiHf^VhesdNv)Cp znUm{RtBIAaos(-7=f%m^nbSRr5^DL!XoxB0@_58nDGu`K*1|I)GG}2x!7}ii_P8}N zbC)(aDEFR6)Lh&EVR{fR6j_Ztx$5gb)KWOTdZeGdlR+RDu^Sq^G_#83!4jHdlb@9mX4x}6fs6`L zC04pcDdjaWl){xNC}|p1uFaOTq){b%gZgFe?-^Ivh~8rN-8_e}lg$Xy^I5%YvI=zP z7@XT-*V~47_U7Z3Ndc;Y%uiQi);X#7-U1aSBuxym6U+?f!>OfUBp96Bow)ha!70qm znhA)=xyGuBzH5GhXk93jIkrS@a-`SIjq#eh=9#dYcdv7(FM>{jIK=!C3q|5~mChLb zX}KPF{6Ac+-|@QsFX3vPghV#;Wx__g%!v`2W8NE*31a)ox3&dV%D*f|?dn4bJVZJy z!!&dujGsqJvhTWuAMI{TLAhskuo}P@itwinp?=;5l-$0g%uwVu1|B2_l3Yd_tAc(%@n`?l+ zTRu>{Z3S{AfO6j1f^aZAx~wEY*_ZzlfjnC{lQkXSd(5|0h&?TQ%*Yf4?dKx~ zxWuHy<9zSP(67Inpa~uM?0u@feJ8cEv6q@MXmfvr4zLdX?R5YME`NVPZEhN=gO!1J z_3tkOVAc6|7c`I!Aja(guA+Z;5s<&To=HCe#Bi_ecXtW$o9k)%72xUpGxM{C^bAk; zV#?cf1uF4Z8VJbK29U4K%DG*x`#d4q>SkvJ9VR|1ZalmkNYbQ^g3q-rqu`QXgSZ_N z0taoe%rkmE>wYY?uMx!xn=`#FR<>8+!uoj)YqhA4tebnb7ZN1GxEUUQ-aDg9w>+ff z?-m-w2v;NeUDCxMkHZie=_k(xLhgj0N9@#C_u^%D)5zSEOt@iWvp(sLvrpWd3tSRI z3Xm~;d}R|=c-IMnc^7oimy$=lvNHYSB1IHY{9sc@#bDtd5@~E`iJb&7p#PYUeT`p` zk{NVy!{0nraWHTBW8e7%IhNRv7UN)nS(6pAv{0ACks>0pjmQCP`e$_}ihNj|&Teyy zVs?o3Hm_`xC?C|WoZM=}UV>b^pF`9mX%X+7hf9)l4U5=Dhk?fK5Da)~fa@cfQzp}LM{a504={2Nj3ZgF0{?Ih>FZEH$=+*I9JEZ>sE ztq_8%lgK<+VF`&X;MLjCZ8$l9G!;jmK-U?=FC7zrR>=`2->M#)+6QX1xpx0Uto^$M z`~Q-~`nO>B-~RkB5Nj_gEaOC%z>(nU{8)se+u8M?V?q&(pzhy zFvBuO`}d4Ybks0^*4n#kZDVP6sb#RsWVK7!$x2udN2j`m=DcgsVV~-Vd3oxc;G?3G z((h5upyVj$Tb063AARxXP-I^R0qv`jLH$%VVx8Z~Cbk9~B5w0lq9HyaVqx=uFr?&a zCopJ3u&jMLstukqX z9ErSq>?n-jq3kM2)ui{1-C)+f z-G5N~jQ{4%F4I&83NrtdV$SdW_Wws^sA6ZOqz_12*)7VroXPXDHCJ&#ww~zq0Rzy{ z$ca?Zl{TKB1;jBGR@gIp{C;vxq3hJOW(d!^RK-vJTaOU5RxeR?%c)~ZFrCGSu5z|t zpkc)yV`n-z4+e&Kz&LJavCWr(4qEV#wwktH%ay^D%{~=({s<#{STifJ;6+#f^ijB`7@MKtvQa4k5?tkY(w%m;ihP zlaIi716YfI(PlomL@~v4BT94`L3C!F=A_1U8URG-bud*shsqsF0J6-S=VNWS#)w~_ z_yWRxstIp@!+tl%F68%v{mqOP{%u0z0at&4L|N=+kFZgP;NOo26vZ+rS%}qHnH{ot z7N8O3tor5`C@Df}JJ4MVj{JiG5uJB~M%sS5a{E#_fkw++S^h7GFP-mv)#|fbb$&k_ z1XBQUv^kPz2BIXU0>mLV(hl)(B*hShwE_Lt@>pB z3&i4c>6i!gx~>skfseId=uXrO&thG-LmZfetUa>Q1b;HLzHBj^NWOS^T93;>{|UbVAbExVQ6c=d$KVyeK*z^{#t8F|0G_)I{FJw6I=;W#`@6{VKQ;|O2lF1> z*(MlFOV-5_vq>h!r*5EcS5KSG+T4cMpB5%9C?C^#{-Nbbt0JYSETU_8x@obuQ-Cxl zeKUNim%HSeRO$Cb<_yGEW(t1Bm}^h>qP(4_4q&~YDDkO0j6g^&*(xA*nkUsD&p3P3 z9*Wm6wE8i)Vd)Lp172j5Kqm{bTZr9S=DYzY8nQFKHf0e^6t+(*+c*N0Ca$1t-BfnJ z^g6$A^q24v%A^%cJ%R#qXIpl-rKRu&wOCAxiQUAK=7q{}v1x|O8VB#BJ8kRf1V&q3 zhoTo+gA8FA# z>p76x-c;wWFnw(<{r36$?d2t|ANSl{4wD1#=a2z4&j!4wDG}=A_ndldw3IsG?K(MB zQ4;XvStR|mfe+lI&h&tH$YszkI+Vi4 zaNC$`h{b%H)0f_FCXjS?x6sWoVkI`TR9f~3>M>_l(E7R5d@_4g*Y32HrZ4@NP)r|F zgmnj3r?}2}x=_PQABuC=t#nT;%fdk^{MJNddK=h?_%0qFsJz~glfH~}Qc zukXLm(Q>K7kSJW&CF)R78{oKg9-Le!i{aX%n`u-=+eN6YgR4v4SZ5zY zq>nNWv_nt)x`Dc)T2gF!i@DB-Cm`=+O_G9xak(DOV)?6Q%(aiSlDVNX1+#P~?(axa z7W-6AEyi2^0-Xe;&ylonAIr~Vp`z-8NlkgBz*(^q1A4FJ*&lydeo_DQ>5?M0^$u}D zJ_<}AjBa_QNw8WupLR0(5axy*mL{npyt*nyq@Eb?3J*X0HiI-GTH+wsSk#uD9Uk2M zk*K3%@-Z2Gw*uF%667y)r=m&w`&-m?o#bI0>+Np_rj-_FZ5t*LE{0*;%n`4)%W7uA z09z-0r5~&nw=O66NP#K}X<_{L6Xs5yG!#&4!hp|Oz{WZnbzWUN^Yr6{*rk>_PKHWD z5v@~UDHS3by)AHM5+{)pF!0{Gi1##}sO}Hc-qUM(Yo7Lo1*j_VDxXkspgn&edyv=C z>{E-^I0a>w6uudIlHXn@>7 z-IEQ>dU!lVv`T3NQ1uu1XCdH!>i7QIT=)NpYV|)gM}YJAufy3QqG<)sw!YWbCtV1+ zEFoBV-B^oOFu@-re9U66Ed~%m=Is0h`bN^=u8Sk6HFL--(5+8?oYm+L<%LhbgqiIb(Taf=C(!$Z2a zj4cPw9hL4)uYIi8_-@hhKzR6(y6R+W+>oD2s_1AqKiiroo!k%<=syP*n(faPFq=+p zoruzkQ2sPKUY?ezf)mJwzOv*f(HxgQaZCTr2KNsbZhuC*PSKuXi(hrj4pt#yIX4e* zXl%8siuqzJk9krL^LTQHay)etzZH)-$??O&ce8WJFw|v*3`Fo{#3z$#rXAEPZd)!F z8-~VB53}jGzK|ETOuvqs+{Vu?*plqNO(@*uPfYNw4=tEj`~HVt%g|o()9?)$!*!|r5arO%D%^X0 zi+~*Zg$%(|2KLuV%raKpf3r_5?BmqYlX`Bb_A?+LxbWwvdl22W)Q#9ELMV-v%>Bg(_(R=UnS$&n z&41?kS2F`1IcESAo$?J3ZDYn`1e6j@~ZASFPZLC5wr>Q?P)p zy>I5dmAl4Yx*|aDpJps4KHLYW1p>K1>}o>;R5Yhvq5^TuOT>L@7wP(BYrn))&@|Vll}$C9cXjD$74JP`03(s#{GI7I zRs=BHs@2H=}lsE&aBKa);jzFosp zq;?di+C>|{BI=%V!n(X1)Um>H_L@<<@iD>Tu%Z1PZ3F3PWC)^?H6)0Rn7Y^H51)>! zD;wZe*r{#iWC-EAJPcCr0z z>Gxsm-LawK^t_}OhrtS;H$jT+r4r$7mGZ&VGvph!c;E1DO5R*+I^}(BxrmGbW0;f8 z_t{Z7DFgMsRXUC>FQY!5`Aqoyj%_2n)20&kGSXHt&2D%O_5ATyE`B;JY_EB6caRn^=3Qc&2HQkzo@3Zh0K4 z$|e-z$YTj6-db#^9p8l2On4n0_hrjclzbmj0vOIqv0jLI|CK02y1r_2yk?`tW2}q+ z5_&%%&E0X&8T~znjrMh`S-ZkkeXTOcuiQ2}<(gZ7Gp9-uMq(3>SqNsZ;ntBGZ$F+A0d z;F+;@J%0557Tu!g79(-sP8|eycR1qQTK_ar-Z<6J&hLpahz; z4*d6b#iu{7b^q$I|JI%B9HtK@uKiS%+A$gze^U9eI&!7HA-zL~-I+bl1hUq5jr1_Kw5T$FJt6j$=GvnM^?Pd7V)tIi<|0 z5o>T{pW{$?xp~k{+I&{9cXgkpXG8F=4#qh&saSdRN-G~ z2^f4#T=>Kd?m{xI1CCf%(pXBf*V(wr#2n6_Qaac2B&@1ooJhjMxaYx&mlP7fXl@aNEu zfuW|c!u3tkdCKd+u6nEEKcNI%eb ziWYKR2pwme&dm-MT{{XaOSE;fvvuJ}Pj7~@%i%8ET2jBx@!&a#FANS{vXsW{P#Ld@ zr^8zySfP*(!PxeIjjKU+{JEOn0ie&DkOVIWHp=6;Qc7KC8#DRA$d<*1H4$3eTly9Y zZcgoP^YuGTkbJ#B7%+MBy-)Vi)3OqO7mhRxifxU09k-oQwkChoFK^xC>)iCLwnwG* z`gy};Ehp1v4`6A$bF6viA+d=Pf$#5VV?k7^07ZZ~xfLVo7|CEk;0x=npC!xY=0?4( zDzpD(cqqvb)@FsX*|Ns<>~$(>Op>)cvmTl`5#Dw9jmf+!`vuy(;#*glI^^-)lv}l_ z8^RNg!rR^?24Ds`sb^w676Wrk9Jk4@ssjuUH$ z%@vOahL7#eHEokB<_ z@yTxz*GgBjk+d+ZI~t{#4+#A7*7^nk9liYMvSy6#Jw z3QCbUWe}4=(l9@y7*voYTj1)HR#JC?=m_Q zJ={hkhQP=X=kT6b!J^AyZs?AqX9Lo&T(qw+A{y<5>Tsb6k~SK~-E{>aQNqPQf%iLA z7=Cjs2ym>x(?##w8`+|~D%Wq4wtF8L&V_WQ%tzz1NpyL29C6MF~zl-OVc=vdb za1JM;9oQ)O{&o!^7T!lkt&D%^HTo}q{ckQ3iU)6TVq-N4rNlP*PhggL zT_@Z|ryB=4ZXarYVZJZ@>zez={hl>d$BOY*uo!52H15&l~sUPtIsL4L_qJ8eoXYT;ra(Li#?Et--K6Yoo zPtlb|HMp!qRl-TMCI0>QFF=NRXLR$YwH(@TYzx?(Ya$nRcqI9hL3d5LJlN$SO}SNI(S||%Wwx!oIIf;{9P14QK4|QS8utSE2BoA`J}{zZ z{sPW-YC%te5+P20LM@FDxww0IH#bAQ zd$u*>lvv{x`g z;nT@_@YJwjZJkYYoqJcYr{~h(1HCJXkE%Wzq0|}3gYFAVKNonM!PT6-Ow4pW{x%V^ zoh7~Q*OWel625Jh5ZGvYdc3-z;XLXJvkZ{y?1y=FKLoy65?+pwHE?NNYS(c4aZLqQ zPC+htlSsFG$?;9skoZbVcu120*X!pXAr)R2coSe-&}*SL$4YfdU_9n^Nk3I@Tl;oZ zI$|%6Z>4`Qx2Gyaw?(|fqSLt>xK#S z&8WPGDeq60w}5MNZ98K?vR8pw-$h$88G zF0}=ltx!e&{0mpTNWn*!nxR&dA{EIG#H#xv)#f(X?FucU9y(Z4fh3*$U>w2+WwAboI z)Kn9tviaii6m;!0U9_wr{nWsS6|Bi{l-otp+VYf%y2oebkG%f8F^7=O1~*wZK!PtU zMI*@-KybWkYZK0WhUdbIk156Mr5?0}D)qixq`W0d_zyd)fLS91Ec0HMCtoy^RB@G# zU4Tnvco_JVWX~mN%9CC2DIL_~M0*v4pZDd1@bhBd^-5)rf4KF-?t%C5807o2L+%;U z-Op2OYK2~Gd5ft~BD(YeJ>Bk)(LMTkD*lph697X~sfOdOw7in%LFl1G&4O+KKIjlK z^5fO@Pm3Oeh-EE_hXx4Kuq^5fX+MX4OQxyb}LU~>bj^h zm?ce7$o!&XIuj+Y)#^ILYxjhjlYA!fd6^#8w~NAx0f)_4qf@_^PQ$hy)mFY7boW$= zU|=_jGe@fU9}6(maVavNx3-$Gocp9YSjeOz`T0Cn@aaj=@>75GS$r`jE)j3rDaWmr zRn(VlGqcXN;KUu)RFFJRkPVEBnpI>RPEyr?SbUwIrjNiZ^t^5ME7Q-OBSN|}&P>>b zZi(`@?Ap>D|Ze&T!B&PM$HA1+nU=DZWji#;( zT@^TIRsZQw+uv3ZTLEXnD`QzhgT$OrOhFeBc2N20(wKurNTk<WjliO3@BOayT~{FDhh&>q<-tqJ&4yjC~QOR`(`wZCEhBXNt-?Bh(x1Fv3%LA!yU zw|vy1$JJC|YW)}{@}2M_YrPKWncEIkF*0wR&Z)3JsTUcLV&M9;(F2RMCG_JSA_MJr za$rSnr{=80N`l0qbr72=@voS^4;k*Tr+11#UM|Y|9dz%!tV03y4qHg%v@xNMr9~~P zK(1)daw_^(e12;RUMYj*4=EQpluKVGoFk3T#u1NT98{=XW7W3l8%yh)n zAeyWV#2K}#umFrsn2ino#8|JR^^*`LuRU?`JlmI1T)RvSn-CS#fi`Zcqy=0XAMr`~ ze6uXpjeh+#Z0B};+;CT;{UF3OUp1XC!rOY9mfZ?Pu=H9mgZ_ zDB-e@8$dr0i|7((hxOSdhfa!NN9sC9$eQLSUoOg4ddD6wK4YkKm`_{hhI7g$pf78U z=Oiq27)G)b;&nbA_vB!?WJ&<^zW=jDqug_(7LliC?qZhav-+g-e zRpuiQ9f)Si6cyPWXq^Y0olb{1!WRGwa}6SZ_^PVh2a1PV!+)Y-kyYsyk>sCvL+aT& z2w6Yy>I#rUsq#>dWc+p!!$7l3YpnY(kQ3xP8L`L{XftqFPUR29=}`S)`9Q!lgQ8wR z68%{}fe+WPD6)_%f|z=Ae>joqFHiaDxVH>c17ZOJBR8ZPwM&KIXxFI@g9zMDa0p~? ztQ$f!m<42c0qfY2AbJ4%H)ID$fyw~h+=U*^fTQ-lw(aETpoGXj-AJonDL|z(1^){a z3^v_U12|q0nPfI#^b7!7@90v(y^Tga$N;qL{6!J$gEiv(@eMhK4AX7m@!mo|@s| z>cYX-9Fp!YkOWX82dS0+fP+bj)G?b%#AY34h9cD7h=_I>%2LeOyW2A2YTs19ys4e| z=rFTvA>hX^o{W*;H_qwP004uV;Q97n9= zM%FHf56?n3C#G{a+n z+|cd)XjR5}Rk`7A2nvQRJ$do+Pgsp+p9UaPgSg4AcyOm7S2r$YDWNx) z*a(?hoxj>MsND5Bj^pbvgPLRmi&P*Hq90LG+9wd=zK+ax%#Qkb>PB-!x6P^j7E3|~ zP9n}_bas_nE!NN0Yf$|1nJ#QYfEEAtQk5iQk(Akc5Zyhe;F>*yeAk6q)2$OOi>mSm0Jx$Cp4TaW6&u~V zqx2y+;=0qS&Z&Y5@U7a6T!5Z*s_Vg;cS&F{E1Ny`rr9a^orZzd6!?Hjk&gT=-+0?kDvg% zB$#Xh6tyRQf%?W^mej6(8;W>qHj*f2Lr#|uYumh214QN<>%z4A_X!4nqL&cme}SIZ z11n$db#xIr>Gzta|E0NS0Tr6xNtuc;`%={p5c>n;M+kE0R@+9xFVIE9%*JHl?B+TG zirCn8td)j{?kV~rAv33@`Lr~&agBOQo7E=$P5dhJMU0iKA3MWBt3k?6lEIhnCQ(lklFq1P9nxOe4R zK+?6dsvV5`Q3t}E2@JdmzK;RL{pCMZ^LZyjaVzRLbc~7)HdbjcbCRqHY!6OW=a=2( z-!$2FoD+FVM^qp7UvtcU&ASj1#J37zS(;cHc{V-h?Q$c~WK(8W?}ydEOs;!N+=wnI z2jp|?BUJ#-fOG5Czdd#zEwh$lR{g5;WMTeXzk^qNVtU5YJGo_um&+coa`o(_q!K#t zBI+fT8z^I?QRuX*>*G4eP1ZcceP#A<8=qayi~ly<-@X#8(5;IB(6|h4TwABnqL|#L zMO@|d@Y8F9U2*ewx(A|vNOLZKCU1Wp=4vmijJg(#k#z$aRVG{6g*u~4C1*N}KJhjC zpd1t7&T)M7uWKn^+E06Ch)A4+pTgzGHVXh_v#gKITbZv)b*iDB*CmJzNZrgcZXWSH zTm9M-t%A-p0wmOqiEQ0fRVWw4(`^>xX1X%2Hlw#?pwR&{Yj@E2sdf2tlLtcvx~amm z0ttSKV0n0)C|dZ#XuyjB7#$`*yp?n9fS|@Tx-|N%yqy0C>i8}u2wGIkaqpH^NBNg} zT0YU?9U!^{&e9^e2%SLm(Qv!`sWI(#92Mhrm?xy1F$GEX6N;@@{e*4FhywKFc*|pe zl*OTU{se*!%KfAdQ)_mAQ*Kwav8e0I*P^#yl0Zc`)@Q~j9vGb_F>;YK%(Ov>^~<)- z){$)-z@K|yXU8)V&Lz^q>b&Zh|CCO8i8VaqJpSNiZ+NptZfB9>m#9KFIZytH>5@2> z?~jzO0~Rdbn`uZ2L{Us@8%teYXOY$V#~YKl+4a<#!G)yrJ;vvBM3lUH0yoB1^S%Jy zlvoI3E2r)UGK?6DKPGmbteU^U#Gmm||Dx2wAk)VL+=u6{W>0LZi@j(~C4ju3M!4M5i3Pq_I2k-Cm@b<)re_`!RgoCVEkVAC|CU(>7LqZ9h7**w;6A z-SnJYipHgn6w($#)!4?>xcYF^Ksj9%(M#9d99@6RFWwUD zOF_KVea@g<88C^8R^wlND2G!;zm$!ZYa9_TO&+o|PQBHCGZWM_5y}$!+S8hkeKUBO zY>O-J00Jy$n5E5V;@z(TW|ImJbxe`=a#FW)l|J^)10KFh)i&RBFlCYMd2?m4o&kkU zb%dj&+B`(Ttd&*nh}>!$Y{;M+vOB!Zdn)u{la}~{19hOX|8Lea0wSMF%8FJyPn7`b zI2wH2V(c`Sg^HvV@>QT{f4cS9&<|B+Gh%Uv@G|C^1Ki5;Lx9%RMTe|mPaAvFj<%Za z@dZZj@^BPyi4l4DIMjV+ood#XsM~yrDLC-CxjS_%bh`CNU47L0ydoyvK3t0>;Ub_> z^;%<_zMQ36@ygc;PA}cRN*evGtg?61><6dc^8esTq-Th}r<4@PW`GUl40QGAu#+(L z3zX1D5=TrC+KvR#U-uX7A-sVsvuXpvMHb6QmWYBr9;sW&qm`BKPv1C0uT}%x&oLaT z7tEtZPqghxP3d)eWx_fA{+>)N0lnChXZLhTK$YDT#prD0kyxp{n`qEyMY6j3Y|%cU znYK5B33m`C7R2T_FG+lXEiSz=(?y@^{WCE+(YR63o#X3WkNSs7G_@@}y*4}m32eb6 z(E!g@xi!87Vr8bsLL=`r;+J$&c& z?qZE&!Vfn=29D#8UYSUu6_4|KTBEk?@bz#eqH+oJTvfkIxs^I@O9n2b=NhC=9%;Ti0rF79N9+0$7h)|xMA1oy?& z^f>s2=r{6Xyk_S-?L@PD?mF6_vHVq8Yxf6p%busnexN>O9WB~~-e9Fv)wJ*Ttlfxo za;4HG+08&$3_J;RaB!MI4gO@7=z3%PrQutE7{vC`BB-lUgG=0W`Ee81=iX8aq9;DF z0|Fq!P15O1bhnp zsj7xr<4^WV%d(G)mfcc0O@ASTUL}M+U&J${IqAi1iJOC|heeP>-JW;9Ku@cFfjm6X zqcwXE5W9)7($qc&>hen204qKQN~S}5_mEA95P(M-JI_`Rr~!E?yZGKpLy14q?3%(X zxrgiUy}Kp*&B*4q-%95JioR7fsK1f}XFN=n0Ebr~ibnPWUYu;p@9phpC(bCn7`Z)AZw^3ODTP?Lf~*7*WR<5t2CNS^Cp`-SHVyh#Ci1D2t_=23 zSv@{0D7VYoRDF-#$KMCN;y=qZzw{{4cJB^ohT!gizfmySYJ8JY55R|9nnQV1}< zD(c+^$}Wux15#{w>Izw73CT<};7qZ^vn1tehhB3le-iQ2o}&}dSq`9}3s~Iz$4*iy zR+J1T$6B+7w=Jfls_MeZ&#W(ClETRAp6O4vSBy0G^&+hiw{5Jbs{s({ftt1%+jc2u z)*RwFW*cn&t4a>91$wiVk-kN@w=cL)?F&**#wrs6h=U|UgG9LHcyGsW#4}$+2P-D6 zP1>mSVV0W&sH1Szm`*@|KFFEG*%@?pgv|G$H-D8|4wY@DeLcC+yU4ODXG8>a0&1#) zKbw)pt^%oUIxgJam^t*fQ04*H!PI124@&N{!Ha%V^hOrHFZDzJ zoc5*Zb=zk}Na5qIQ}Ql!G4BZ+XMAdck2<$r2flMvAyS9hrTUv)F6`2)O6o!VPV6@h zyU6&St`il!bKYa`&AI#ONo|U{FjM=7(T{H>{v$ze}#0n;NWg<3RCmB z*wc?a+!Itc0v1buW^fS4%K8@EOM0<)zi(13MhD-tobrd_gEEJona-b5WXz3tb}1W%oL5O6XT z!uPl9U?aLzW#Q)7a&~L$&kn8euRew=zq=I21$w4a-DAcU!emQy&TofG=2Hj3{H3SH ztm$P9*>2c3zV5sB;<4TArl0)<4?_#~`Bc|)YHj9hIeWBM?>IbHvE8_FtMY=^qjhb~ ztDj|o<(2+{#HMjp*$5|PSLfnA0`q<}8X)1A+BQ4+FD)=Chm?H@bnV98Bv_FlQv z!nA}AP99!LBhK_tk2ew%ao@e(RE8+aa!Ocr&MKI6KS|zodVkZ+M!qYkIA*T7`w;;deMB>;zSB;TLVjtdr2E&XD&rcn2 zJ_EJb`VjN+aNa!T&Mm1Gx{J>hmJ~Ov$(}k=$#Giop0QsvICjxneT!fgXfY1`U|c2u z+Hzy5+dn8#tgA>*H8-y3mLbHuX<~RNTY4nP`Wc zm$G*p3Nn3i+Cd<8YM)M8f0i|OYhP5aIPyaH`uTk1iKRI8frho$tQ=2YXC^$SvpAF| zgb+_)SYOyMy21l+8+Z8G@rExaTr*p%l0vOBpYw~pUl}y%6H+yFw=r5h__{I8oV3*4 z$TRaaDf^vLapq-n!-`KQ3w7rQw5FDIU#mPTi^#e9xm*G8gBN}Xzf9CDhgIo@Pg4&J zt`~Ja*NsVecRGC9v)u#Q2l(b@FD$Y~1YIXe+_Sfops_YAUR&+D7^vjsl*RO(g-gmI z>yf`#8Myl)eLXIMeD=VmWjsN$jF1evf9y36EBDW!dE5v6&xHai zNMiETmTW+P`~*xBsIMI80F@I-pqx@=Ec6i>)LIX<}$^u&Dv{*a#l(r!m)T5 zNd==(W@C$EvMpDBT*q0`v9&Z8Se!q(?RQmJwe_xsVmWjYvdk6;m~{vOL+iseNXoOl zxP@CpMLf5YK-E|8Xy^h1v`YDUoGj;=eyFs^b4~3p29hzSVn*MjCTz+C_;%S$-`)VW z{A%A&HEt&pAK%)eNaKm^%TEMroH*v$)mg6@GqJ_d+@EhDv~rF;X`Ue4;i7vj-s|({ zES2-mZHOeY&Rc%FTs`uwgQt-`*nR85L3yKasO!N=AgTHfBU;)-V%?%1A_~kzHpPgR zW{Wx8$rw;Rb?Ex}24unp%IeJlC9vTsyj||1Kl6$C3Zeq;qPooHbIVOY_YJGdyq(`v{c5g@{!g`RMgB^G z-b?;5F#2bSsq)LjcO*0NCE|@?&M0_5uK7a=@T0mnbJmIPPhWT?^Af3Evz!91Y3nwO zD*g^XN8~J-o4LN!^-wiQCYs~Rck9QqPm_Y4s&G7!ZeItf)s(9dQ6vqx98m(g0xT!f z@)41Sl49FsZ)R^d2uOA`#2?aa7p^@#&$?NM&&Ot$v~j8_+nAewh6G~TB78axYJ1P$ z_gWhCR#;iSC-vOl_AB)~wFg3{c6N?ui|3PxyhW8le2ulOH)Td{uBo{%`m~>_$Pf1oY z0`K38p{7U&ku-_E4v{bYhQ0<~-{Yu1FHGm2Ai6#+OpA4!)@jGX0i~18c+6*6Yg5a( z&K?Y;TDG{hvFZKWZ_&D<9$g904W<7q*UZ9#4>|V@kA9~70;$X5*~Tmh&8CtP*5@R|?CajxUp2~k&LWljh#KBt^K#*o z3^LS)^=CQVs>JgngVeH*JPovc;U2x$K*QId;pwIZX;E`EM`>SFM)flUNT%lL^464e zms7iQJP0214p(s=uY|3-*KW3RaZ-kb1woe`xr5q(hyg@2k!3R5?oJ5K;kvdZRtE_UV+!OcBDxiLHtcu2O=(v`|U9RhQk5@NlSJ_x^^lFKL#0wps-=kAd#~%cwmAWsX z7B$WK{el47;DIA@(zBztj3;;0-_f6LD|?eKGV+(#4U(28WeVt_L_H*?CLhzHC`n0W z2nhM@9^^UYTDCcUB?h5m|H&XwfqeEC(;bW`c*>A3C0DEi(-u=jB6!{pk7V%R+4T>RpaW+-v9(9IG_V&9^KUU~14 z99c!|(>^i3Noaj_HWcyc)w)eqGEjG<6We&@`6(X8fe#_xRZl555-GOwaC)RXTldoF zuxZ^HBi&le`YhPsAjKUkeHI?AoyAcdX)u90{hc#ljhp@vpEeiFeMRIcExhbB(!PaS zhuZZ@DX~F zAg9^L+qsd?`L6fm>l4&4g3$y^0h+Z>*zuE_qaVloX0^M4I#w0oT|{Z*PB!-bH7_}{ z{g^vak{lgzIoPCG`jcN;xap0F%`%r}9!E^oDfcj?=D(!UCnBF4a^JltKtgawFN>Q&ZP zE&oeFV!RHFJJZq{8VInmhqK^7QSXn;k!lA;I^Re!i10k&_Ct~-xBuI>B^{yoZ}PC->If2U3DQx*CIT2&Aw;whP-xu+3v4>;Tq>K#~jJ zHUooiMUAQV&ellevXXA!rm+9|3_!1Q-~2sOm-wzOfG;q{&-WOCiOV6=dVaq_Y?hNZ zJl%Pas=`e6=FJbK82Ifn_KvS8c&P_s3xLKU+Z^%40C?QD%`1jTuC8uV{HFZJw; zCSP)EUA|R+r<{gk&AYc7mPho35k%KQPO@VHHTgJ)Z(pbx+M~bpnE(Bb{?)uR5k$^I z_qLUc?Lwe+;HBx0&u(i0WNftBIY7{1x&3SzZ!k7fYYM@qSXnD9r!Uv6Xzj#UTo*~5 z9o=v3RcWaJwz1qXO>UC$VA-~m^u;=YZH0rvVqCm@PEz-ar;1T3B2Q&rtwIgWWELP% z$KVdDP+MkN26xPu0#CL6>$C@A+e=UCt#_z28Xx*t6tk>6BdRSZbu&v~&`>5%v@?bb zX-f+jw&*H$IT(>bc~i~STy6e^rKw=TSUeSC+ShmpsZ?E?uY^X2@vH}bo?dIvXlk@t z=QS6xnr)xX&A8U4N7cxGPHwpAM;szX3O*3!H=kUwX6cyLSO(``ReaSs5&ZuB`#zib zc7D$rJQtPUz7i_urlUDCLU76?BpVU@0loUOeW0*Cb$TMbQg33@Il}5X)mWG4hA<$E z35}SCQ_{)O@`-d^XXiz8^4)BCENuLau#kpVhXq%EN|jK7CElG4671LFFACpHYQ#HZ z=erqg)HFEh4WAGtjW+y+aiKP4`=YFSH)AVs!lo1w5eG0PeHdQOuc7&l+SM7S8rvT zSXT2|bg0{cxEMuo&2u%gu4kPc+Pb2ou&Pe4EFLh5JJo+Qff3=)9cn zOqo@NG|6%rvp^=httR3Q-7YVgmTv{ML_7(TH1I9#!1c9I_kHSfAxs0p8HLjuz6`M- zF1ZSu%s@XtRrdm)ywTz|UulDCPv)MBFC1G8@|1U;`ex8^_mg6pT!bubz%g#TX)aF1 z%vewlHxMBQo4ZIf^8IM!T;Qt7(7Zb}?tYR*uL6*SX*+OU>t>SwH9X_WTBc!7osbC@P{)?dAyrVPSA(TtkZ@U_pNW z#_{%?hpPjDd|-PG7-%t70+g%hv0h`iY!621dKo50VjhsEM6>mn6D!^n$+{p}exQULPpox4@ zQR^MIRRx}YwLlk>u9(fGE^+q9{2uwnRWfp=X27t;$oM+nJQLC}`xy`-au_{BGvuz; z-r9zKrJItPS%|O<(%0>IHP)DIxqoLFEk}@R7nG+VhI9psqN!MX9b=9M#!=unS-fmOEDeItsSR(<`y^R9zP3c!F$l`eO!CPru>GdVL@ec^zeDJA5eTd?>3n~xto=AnU8cC2*uA% zAb%}}T|;87&tt410KLw^vqY+AsOrD_UY$Rb;$mc-Ufns;uz7fi&g#h+ZrVD^pW2lz|Y;Nu0E$zufdqAd&>7;9apwK$P1fVE#2B1Eifnc5~%pa?O0UPi=C89CF?ptR4 zf6WI*^ykfJl_1*;A9LQ995{esSlXj_ij=x`Gpw z4+zw(tF|q8#se7Mb7LiM%?s`-jjFbJI1M> z0WXP{#J2=k+dM5vNwf*)37D{WhhHxkgvHUut2~n|{e7MCYoOXR6RWf7TcYb&JxJk#2XHRgHPoTe{s)%= zn8*_5BkyW%?}~)J(n0{eCx*aH;4c6IWkBN3{00H!#-V?+HWqT`ePo8lK;>l=fJ?wg zk;{=Q>lkSY6%-IuPC@?bg2r>hwtm5t$=7DxNPK$OPsr{*9!L{FK%0rU3I3NA!S({X z^=4)6d_ z4`6XQ)cpRjSq_M18V`T~G8^@*g?Rc;v(M$;);A?6t4+-1jOmhr`8^< zdZ#SA&C#_O7@Htt6KJm12?RULqIXqoY!gJ!Hj*&-VcAC*#BdRYA67hr9+NAu?rh*ka(x&mZ6vzzb0Dg z>;R;=rJ-$5&UMq=HYDD7`)s$*1p+Eu67_nI*&F8d$bRd#O96#O0-;uY`!0oj_7~tJ z`*H7|b~jMIasAVM<-(P>k{@2WvIr;YR~%4_HoFl+u-oxK-Ai7{RsmlSSCxGVerwZ5 z>*H#AhU)y+LJ{}!uj5w zdxG;peC-$jPZ{7?Z9awfQ}UT|shO};wH+u5@~r;!uAV%Vb)6yW1`d#xv_h*ADiL5p zZH&4@fn;(Ff8sEwk?P_Tta}EhF`Xo%XTZ`^K`gK}VkA17jz4~0#%@%d zwwhx)WqmPCob?Iz3m~l90>=VsZgmK@FGQ(4(!4T>Ao)Gt;Hx}MhHCY0R zA+74DE!cXzP1>ug(MxP!v)_=f|KQ`9w)Mq3L1P7hDDPQd7Z_iSR>MJ9-Po=k*Sw`o z;eRxM(2B6qXWzPts)mFlzm|Jp)Mu4NMNBOt#vjBIQx~QuaiNUYt45Y9Vjr3KyN)Hi zweJfZO4!Tq520H3GwW+%1*TVJ;`&?Su|w*{IYc?5A^(%vEB@LeT1%tbg5$d5vL?b> zI@hh!9+{_%aJ1l|2NC1`tl`Zn1aP8#)Cm+X*_{I`1m-#wYM~Vit$dzd%a3vV#g(w& z1N!vF{Fo)~h`lpRkvN}^poVZ6CHuM19N^c)Ix1YeHJYkR%0AG%O*%|5Y3uM`bXG?w#6*yLLwW+_%SVTy+{$nV z+;m7UgxcD?U`qsMWm9@>u zef1G)F}v?yez4EB=3S^pO-^9UVnto6jiR#HM#Y{`D|AOc+41r{jB`dc1q+;yLTr{I z+tdO5i+iwkNb>Urb*b;>)tUXCUscq#7aXSf14YVS+)5^ryrLN_4n zv)2pFxLaqg|B_e(47E!YC+x&XJ=g3GV6#)|l9VrRzqDK=a4cPLyeP6KZ~}gyLz0`{ zWX&@WUIkZGrKfBjU7SE%fkD*wT_!bjlsBRG0j{)!-UrH03OGxP-)Ho2$F!NJjLw zv&r5w*d|6D!1a4(efiP|IM^viZTE2pBrXH*zRlO`&!UCrT3vL$G%3J}fy3a)yjFX?QAPIyNf zvC8kPyb)S#Z1b4Mlu6@8KVqdAxN{Kaev`xGXhl`utr@U>UDi-I4(*`MXfr$>w`Pbc7)?+exN z?#tBc;u4E3Y6lbesdAWSs1q}6X^%xuis1CbY~)!(G{pl${S`(6j%H-@UZWCQe@ z$#v@lS<>B+s|;7wo9?K%gOgp)f(uH6St3vLNumUGw_|8$HlwWsKXXPEwMMAb&q2C; zV~YmoSieM9y^(4w2|No!b)BBd-jQCZ_`elT9hv<#vs)6`rr)yo6L^R!qJe(<@Xz8k zD^KEKJh_cjc*^phrrG|rS_`dzP)#I5TA@)e&1feDB>(!vl{`31uiK!1;~lN=NsR1< z5kE+`ALNUyD%iC0$DeeXBALT@R~c}S_>8I$Y`J8-fhapr^D`^c#IocB*J7!I2Evh! z5I+)CS7s&~8}#CAd11Q-79@j`Ha@SfM&nVp&C*V`XiT3JBc)Puo*;_0W; z-#Y{UJMTf@uIhMSOjD0Dsvl&TJUYJ(?Q%)m>5C^zugU{S<^gE zFo>96<@#Eko|mG0@}+yeeJzN2eY4AFEQdF4-3KSe{>xHg5pa`FUp|2r4?e{Gx(#}0V2f(j4q+I|W>r+t5Fz$UXJa7X+%sE?1>p5yRKBr$kz z2Dbis%&>8ph01$TEMtv|``-KC}+JC_MII1N$ zM)#(RmAd<4vP5TOuYJ?mN4*A)HGp6?5;ibL7Y`t@5f`xDWxtf}Ppxj|yPli9_4($H zf%7T7xgNU8D$3jSbYCtRw9KrkAR->XuU#&@#mATLNU~5IxypGSKHV2-K2Tyzk^vhux!Tb-{64(S5ZQN)N!?GFa>h^A9kHn%!z&Wg`k{hZNC z&^deZJworsjd|`Z*f5Ee(5OwUFValO2k3U#Ze0?*Ug&-3#; zbFA(?SgIV<`39y76EQluM$}%xpb`)=SpA|s*mPU28?R={r5V-gs+Z`B*D{ecH-F4a z_{d(N2oSljo-t6GFd5BiJA?)n5)=4kO)NNzmD0ZwwT9Dr@j~ZJy4* zMIfv*n|Y&T&mja0X0=*9{^(*x2tq!QaR_aUYP0#K3rPUgi-RZO|;cNYU%<= zCE1k)k5?#!>KFoyg#oTxi}l(stK~meebl*z45Z)k9sLG*n@K6j*bqp*?TGPW@{v1xqDQvI-1SU7<#>xOU>e~o^x6dfZ)PSdHF?9waKi8k-!Yh(SAx#7Vb7K5Jl zqw?{+X;!d!WZmjCb?qm$M`)x$C6VBa2|=<*y$e?OKLTMXjOCe zpE|!(k+^LxOE_I?^F@Nf(X?ak>LJ&zZ-nXP$zH(~Zox%UXA=8%+e8){`87#E^Aq2R z71RQK1ODThrG7oH9g@RQVG&uAke;P9sLq4;)Je@9*A#)(96#v?OR{7$GHfmNeEfVW z^twINV^!6rEINO3wdPuBb>h1#o-|hu9TLxr7aoA9Y)!?@UM4$y%otv3_)l@yKgHx_ zm+Eu>=KzF?B{vlMh(J7*9z@f`uLX-}$iIpo?gJM)6-cp4f=T z>vx{c0E&>1$byv2pcgJRzZQ24Egk|Wvfw3Pl!{_LK+TvTXDNWXr9n067u23*I8}tY zC+TSEwIA$ld4KJ9$R>&@J*%ooLCX*z*xJZ9HA^V(usm+ z2*IZ>aCHjuaOUk8B8MUt_5`!PlO2IsFo0*1L1{Dk;|yd9&4aJ(#1}5CW~44f%B&P* zK&MeT`TOy)m_2nf=+QS$ zG!P~uJfm`7o%1A*rG*6T12lZQ<-&*4jEGmb+6_dp-0J!;EYN7yyI_aJ2;s-ZzI>WT z>rKJwB3GdgfZtBj1&P1MI=Rwmz&k}U8N>xDGs1_h>~QKwF?Gj#s9S>o2^#WzPPBD6 zOqgh7{K)&uv}H}efV!P#2>4qy;ub!$l_K>U^kO?w9a4(?c}qVctbSnr7I~R8P#RRz zc)~;i5zcl^v_lxRb~%58Zj@UY8j-!Hdus214o*S}J%x8Amy_VTi9_Y~*@Z>0B?%N! z6x@2+S2PjRZ`Q%Zm-_5ju`!4*f?oJ^p2xGWPf_G>WW^=|#YhSywqSNs0kLGe;UOF7 z8f5HC_#jo*&z?IVNx1?P!eI1TblK)6(yuq(}y9 z>=TaxZm}o!wd@0ic}a5D!4HR^KzQ8IiXF;{A?7X^b%R-Tugl8d8~e}r$i-5FT4{NN z0=wnzyF%~}wkMA_KPEpUh!$bu->O=zt1>S>q4i?8!dFQ5>K!if-95UoQL|_w@?e2@ z8v9VYl30(?4SWe2RXAU%A=jH}4^9j=il$tKD|Nk51;_hIK4z~%Is_>PkMqz^NXOups#it7bASC2J2{qmhc?*j70Oe|2KWAae9!5Gm4_W&lE>3hB* z?aeEB-*~2MP4nvyns0Nfg(?YpM^LSq;OF9lfjK7A`rTQwb%My}np`>fnurA1#3E(p zP~|lxX|_dwK?_u|pJ%+SAb`*@`;*Mqhc>=N(R?IBux<2?L_lxFJpoug2k0uV# zWa-irB_Rl`&zi+8iR|qZP-b^V$<)0uqgO?3&R-xhFMfkch4{~^tz@N2=GO^MXu;>h zfu|sLA0qIRDAqm4Fct=eXB)@qRsGs_id!2d3e9}wC!EV~U))^T%DU02`Sqro6(G&w z0Gy0>>aNOq)h`a7GPaKHV0P#Z9*+)EN>Yb7VrC6I^ONSLy0|;`6K3BmMQMQ4 zojE3kXMkqVr+*9s_`65qZ|^=Zg2Y|VxM4W;*LuoN10IDIHMb){{k!<*KYSZ~*b^MM zmm9tTKNkQ={zpco=}WGVL^Xpgfa%)F(HK{S?dagwflA(;UK7C&zh7*8dg5*uTCY*O z`JUAh7!?&?ur}ql?q4uJ-Q(}B>i1IYUT??G2v(e}G%lHOiim3q29jy%*9uvvf*JF> z^aEe7K6yqlFmL&jWOG`>D-UkqshXTZHf85*L_=3Z6ws)%;E2!3?dXz+?w6!iw3PCb z^ZwE%gH`zdb~*rD_Y2v~B&89qgCKGdA{1ZLGl`bNb|blo7Dn_5D_k{Sf4Y0uUH(jjW|>OZ3EwbumwJU62VHB zIEkOXO^Aw{cK4^hrSD-S4ZDWbg_85^bQVJOCi$IVjSW}qXCJ=Ww1GaQ$PfdukZ@1+2f2D` zQ|7U0JHzmVK*cYejd@YGA^xpBg1GU5w;G@=alVkgkO8@kLpGzO3Z0#IisJ$gaXAMl z0bgJ00|s48xR1ZJLX?cg{jd6V~`fw5uE&x=rzaGle*&2&X-4Z4 z$cK?tZ_ygA?pVgo@%*M2SpB+|i8~Ad?fx3k55o2IG@c%)MywrA^?W-q=rpA~iMULf zU~C%D;#o8LfI8Je{^C~*VbRCFL7&D+44jTtuR~}tzlJ(zuZRnnPzuEK zlk0E?j9FY&3+p-zpf?w}O7wKYw&_`Lzibe$I>O?Ue;oJg#@>}P zpK0~P%0XtS#1Y&v+_|$MI(jdg8s6A-TpA}~*r1&mW9HS{Is8S~d*GW9)Uq}oac?M0 z8o^HBijgbjZxM>Hh2$`WXL%HR5MF<6fGQr+4OCj6RWJ{ ziqBuii}+Yur3vZt=-BD-?@nlVxh%)8acbDc?X>VCf(1DywyWHQTjwMRi3i!`a{^pO zf_E>ZLdj1`;#OeP&m;%vI5eLFcMV|2(`_|hbWzMmtN3J@TXh_Y7MB)&Y%p*VE=rbB zaUOOV-vX)t_kYf{Sqnqe9&y3w$Tb#a`J-0>)cHVPB~~8A>1ym2ER_o@b{&2x+)$b7 z7+db;_00>W-E2mFNfC0HXGA^=N&6AP=1Lu2lE=gblYIriQ6%v3meKn#vpEe za%q@Q7!=c2j`AS6+ z&!ge0X(Lrw+r_l+*9BwfzV}x#B?aZa_4KM(uQJO-2opDRN!FtZWN`txhxqLn`73M+ z%v@H_FV;U(RQU3=$*rinWWgJ%_kd${qg4uNFI1@M~#`Rr2&>1Y^djs=yWeT z%R`eywN?=?wdZ4(X9}5a^`PS1T(wlIMbXRTC@lnOU?5iSXX<7(JQ!u zHnP%z1s(=lIe12FbPo!YwOY00=Y4JYemvhxriS}snymT$*Ip3+J3Q;mVK(8&k+O>v zFD@b)X^mHxikXed7pu)a(34fn4pJB2nRlNbsKD=F#9a*MB7ABGv;r)r1t;NC7o)1KzxgBvI68LHJ>s=bBPb` z5uUf9tU2T9$(n2HgNjjK-;VGm_Ao;@& zsK@uM?vV-ZlEuvGa6zjDjgU@owP!aVYvl^&*JQ;HbcAYeagLOgbvC>{jug6C7JfaE z%WY9`{i&4*N)8K+4`U;y5`4RBkZkI%rEp}2tzZkKe^}mzVfzAm!{uEmr?g4EM|z+O z>UP<84l-MwHvmFXTb~ z3oSs(&Qmb5A2Pt@s>yuKniyg>o@OuEX~y=nyw`>!`|E4ASACCn!!y=3`6)lxiMeeV z;5dXaE}I*YxUXzKy3$-R?i3;6^rEEscEE4YT01DU}m1C2#%;I>(A zEd2D(hq;tql^)-LW7<<+YSW+9Bs9A8c2g}r#%pqnX+nss?E!*k;#)1bVmzzr&UFuT z`K&)uQ?W}B7pc5arE7kf=iWCf8hmi0d;e$Zw?s0*N`Ev=y$gTR6?S!$bYY=X*dYFX zgX)DktEM~4M)mCa^inCGU-o#?B(V2#IkcxU!l_2fo`h-e#&=E&SpG%xDBYg8aU^flf#qW((LkEZ+P`pb)R!&PKltx)f%0M zkfaD)?b1|mAb`A7Wtu*bC4wGZFn87qkI==?cNPip=X#Gms@n#nrIod@>SBbKiepbU zxiBlwxBU6G86R!vOTUNz@_O*OL~mJ$Nwr)>?A4V!eczp=y9Z)tv_q0(BFbOXn%7n0TAi!=D3-CZ#YdY?}0oEPc z6S#@nSS_65WNH)hJw6+W z*VwriUxe(wJ5#yL60LILq+~OE}*Uwb3d@v(WmdEx&cVSP& z^#1At#6@8IFv(%OI#j;NM?jDE}iy|$WvZJdSarAO)adMBNPSX22>`F;&O;dX?;_QqtoHOxy!GT({R zdY^}xpP-eyX6`$%+o#M|k2ED~3CWTEtkIRk^QMPqY|2TP+EBDwUSk!E1G&wJG}C77ORj*8Ng^NoV*JTdDWcIfG-g&z)yG*|_R z*7$rcP(0uZ2DqXBAdk!P(pu8CzKav2|%-z3iWbbRRy14S~Q;F{#jDXuFsZfNh|4xT;x54^EjxcT?bu-dl}k;azRYk(nXqYkq+0LyuDC*zR{qgLO$Net zn`bOVg1{Q7uHuesIYa!ia-#hkr2q0jO}~-xM<7}&{Yeat7`#%1yVTeeY|UbZfp$ut zheu+}y2z}p$K5&D%u*_AxS8pweTs*Zp3qzhyZF}2+jif0qQ3_$CgrqLHl=r<&biHj z)9(@v<2S=D{Cu8nI=J8Ogz@mmdq`f>gUMXTWzsuM2eM=9MY)god&V|IcaUw5&~g|u zUDuP7y?$OSdCPvM&APO6==0MJC7kD(J1aS;s|2M;L@1%JTadnPJWOzsr0L1JMp#R4 zAdFpTs7p1bKT#L7jIukam)=RtU1NMgcOeLGqyr3@%g0Iz?phLV2W(?ps==xg(E2F< zlr8J?791V)(Z<|IH$-p>2O?+V;SRC-vfVjUKISiIRm6%=r-+FdX@0ocK}0;(`58jC zS+1&iP&+>8lO!p?=lZIQ-wU}SraZ-KDyIz%=~2NIFM~=-7aN{hS)<*CTg&*h7f*fT z4&C5SIP9|K$Yd7oN{Es;m8#}xwJ zB1wX0ycCbA2A_T%`ra(B)~%(2v23^G01n>+Ry;tmWC18X*-=xDGfrxgdtIX*RO`){ zgqahp*1#}&#$l50!CJ`DUrfER4XQ8II)9&) z2NZ`ZWP$3aKC18o>zL7`8pXhQVV)eDp+tGnVOwl#@~^$$e@OrTVeAc)XM|lNOR;7T)0XO$+j_noo}n5gU}P<2aTbyjiF_*YV_3x z>?XA^t450`lcDHeH`-`?=xKb!F=7n97Zm&*28PCOn`xVAQ>DI+px2-mE-a8zV%qCR z^a0r$bOQOfC5978+blAt7lP5ac{A%YA-|10LpT`OvikV_EtTGy%+G{SIony)xIB*} zJuxna8zMA9VYlmZ}y^#)-9o7}TsLmmAk zt17jrs6;NeUTYIhdB^X}ZwCE9AIvPbYqSVo*kp^f5FvfA`YI z(DiZ?mtFl5vVu9WIr~J42(r5QBDRz_NRi1iCEw?JsGdSQWq?yPpa_7UlG7^GELXWS zFA->zYT1gM;%E~}5G?6Ue zboOV8V?NP7}Osq5MNO)wYA-^S>R;<0+M+rrN=}M3dpVw%NBf3ghSt?pJ8Ot`SEPy=uDUaLV$ ziHqhj;)6p<+%ez6e21hyAn!R%V(=uG{=nTMup|bOi=L$KwoFOpnZB?(`d*rr^5}(x z$AHR!!n@EV`wa(#_PzO<^!9`V+>Nmq5-ty5CuC2nEiA?;qNlV=8EZe&PgWc~g-!0W zQvCwZu2m?GS@T$bvJ^vUsT8T3R(Yv=e~M+~eQx2WwJfT^>TDjDW^weFUH@4@75j>Y zUG5-#@l@|Be*VZAQ#6XeF(5iZ3c_V4AkU&x{4LO!V5CNsV?JMN!-LXi55cLU+B+&aq z-sT>lEm4seZ{{IM;#b-QXhXT$$6uPe`r2qMLj~Xac-rU1 z@ChA)`&fD=YW1wXr=$P!wVm7-XK^mdDonv~WoF-A2yzhEZiUT5>30C7|M|Jx-=L3M zfEF@7*M!{AELe9>0DY;jSW!ht^E%{i_V@jN5~(G&cqssPwDHc4ah{N zvPZKt-k9H6XN@8-X5Gjo zuJj=J)M2dKXF zX1DpOld!rU(84(xB6eQ{X?;{$f57e-J+-AqGQu=@lW*u7P@6Y$)lT{6p_t@!PbIkD zS1jmlxrl*x%XCRu*e)P4@wDU8r%5h&&)Dj9wLS=l_|~5u&A>d`Hlm+}#pL~5yFTTZ zAXl`xOf~9#_}&MoLpxxy1>v@=EbErmfxH^%7k#JEcL$Fo*=S`Go^eQoCxG3S{FB)b zHdSLNyJ0jNrmO6q~?C9%?z|JY3eL&)3erobnk{T zU6x$NtzKF02aVgvF$8 z&v;di<7Wi_+61YUQB#9vzo9xK@w?8Klo8;@ufp5B$qNEwHPt7E2;Z?i?lF=AFf&9i z@C;?m{igE4594UM7rsdalGzze5?G~;^#yQT#xP9vAg;&4935F`KBEUyk8M4($-{@I zrQKNY9tIL=TB6tu05Uu6&e#!b(wvxQwbEBzbD!48^pW1xo`lMChII0;ZR5+q@1dvn zvU#jXY2%m085NrhEq;H)^behVPvrQPsqFH4D8Uq(=K>8jVb>P4E3%Z133n z19&|%^L9IW#5dc6;}nvx2^?e zg?~}=2||64r0@}aCVGOi6LLq=Dd!~beDynN{1jk+m9f_CyqJfk$);_n|A_$UBDy(f zzWq+xj{eHm0<9obi{7JN(X%IBEV>}7uQ>kuOQEM$ir^1A&k`%!j}_}Z`MNxdCU1rC z`#AWN#FSC>c=1-dR@{D}ozfl851588x873)X^1x$eYW@&>^`1bID5;%9>#9q?eB#V zj#J=qastNnty{)?BWx#C0peShF|;w3uexGruZ|asbrMz<;Lr(hcurj}WJYzuPm-EI zyJJg@fcjdr^CiH5aLe-m%H-m^<7WU?OVQoD+7jAGnyuyFH|xlzTZ7co3tbe!-!R_>smnkMbPtOpeyBl1|nMilNYNhXjG{lbc-c|2n zO8tMGf68jVqOG@*=H$5*cM^=A2CgVyZrI>&5Tieo&TZ%G;|VIKiqQ|?=JJMH4OeiQ zpXYEwDJdCsdhK^~4Q$N&KccySQtDW#H7p~!-80TuCl6tPpW1j0P80AL^%mH#Sh!{< z+v=wgE#ACuyYcu_S(0|i22e08{$~ZzKll;KeM%9V9Lo_ZR8IGz81cnBAyHI$> zKa6jVgYxvR-sLggC5iFlGB$sGOnrL!W1QW;$}DYN3DdYwsd%+^f=$1>R5e!Pj>2Vo-uS@Y%%$<~C&DT3}* z@qx@0F~1yNoCeGdS#MDdb`fgk4pH3WmmTiK^1E@ETyF&d`1Yg2gWKe+{p9DtwzW72 z*t)BTWJ*XV4l13weM`rg8{AM4laQFgqP&px7R0`%08e!b(X(EUTQdDCS?ZU|<3PMm zMFfK0)5Sns@d;l`j!kPhwh7e&<1Jf`R6QAyKi>kPy8MaJdvt3B8}EOEL@V*0l+J#> z-ymIklnF|l{HkSfJd7Do%f8J;t}-dCRsGfF{-0T=`s4V0p)pVPRnW`^5|j5Os*<06Tw1~| z+$m|9dpYWM2G4m_Jw95~2ZIsH!gJYBH5l|1MJ%l6(03kjC2AdO)_$|w0>K#Q-S}W= zbf>%9>1!$-^h&*kiU^>O1CEs`>NY6<)lZ(xZn-D-M^l@Iq+hocS|}?!rqLcn(F+R; z&o#OE=RO!+er;`}4P?C^Qs!X$bCcb3O-)y-{#P0yAiY)|1#HXt7B^3T;?>CrRQnmt zXAwHYWKC06MtI1%Z;3rh=a@?98F(an_ljP>Hd$m)s#vAS6KbC0w>tuxg$Pnb?zt`p zF1E1Zf~nk}fleB)A7{0rx!Ncog4yPD$&C`?m+q5xFzCs$MTz{CO6J43w}z&$rz%O) z$`@2?*VI^}AVA?Xruh>5b03y#lPwg6T1!K%xKL;+M1CF1tnqXsheZ#cgi|K^YS$OF z5;@pzeoi=T{FE*~$iIRmY$HHbn9=Rqu~Vk&$JOz1PR=0<^QrFgHqTQuNg~G2O^jPX zOm-%pFP91b&A0dO*=#UxxjS)}^RE3jDfEArc2h4w|GHQ7M+$dZ3V62cGAZ2xU?A~- zger1x!Ock@((Qa;0QUZWI1K&?os(*~OMzT%ne$vSyZqAvQX{IxT+gbT{EJPW&nJDQ%*BK2nCd+VkjWa za-06Y9Nqu21bjcM(*W!v`T>Qz5obgeFl_-cwz1m3S)%`CW!-B-us;jXL;d%kL}wiH zQzKKOERJAAe6*bi+4HvqeA(=#^M9<||8=3XqdY`0#D)W#jsG9)y?0nsYq|#-1jGVF z1f&xc6cG^V(h?gWBA|lOi3);r0jY+BBE3XVKv4+2NQp?Vp%($^AVnZSr9*-c0wnRS zJv004;@)#-&N{iyz2E!(O6xQ~YJL&8HK->GaX+!de?MRrdWY;t zu9j$p;UnvT=0g845QQnaW+>9QCM^mo*Ov8)oIe_GX#EVIe+Tp(ge&!P777|GnK*^T zsEO=KBlJjHp@j--?8wpa^0EwzwRPzNs=9;dO(##y@g~cM?;Cc5%>Jp}{&*nJ3iih3 zv6V+g5vXP!1n{+Hya5VZ#3Z1a1dzIa9Qx}Kj?ZJcfJF8T1?VSe-2zDRcm=1w(#C%r z+nH2l`Cs)tzgZkFO9;BRRh|5$KG~MH%iQ8>v8D`t@Q_6NC|QJ3>^-3%J^re7zzkDc zjf`c^#02HPM&7Ez1K%Bhcx?K($uxv;$faNjMX8zRp0$|{CCcm-qUMrCz_x7KgxP-Z zSV<5Gyz*d5emI+mZNRddBW{!Ipk`AOjKYyQ&!}EJS_fR8dZx`~&v-)%ch!-dZrvRVyAvRRZ0HSi{nE!>FhK5_$6nIX#4V6$N!Yq<$5p6BZi4jdb(g=HGgCs~U) zdiwfRUpNdt{|3aJZ+=VSYWCOIh%f-n7jV}^4%N}+0u&fls7t2rY`%kH6Q=2P6WfD~ zra-i0AyoW8S^1IHavt&q^UjN!)Z2*L<&<}!9Sf%Rd6{_$hx zKk{X{?toio0g9eQHaWm8_89`)UFM0~2mF|aih{IN zD>eJd*$KdTRz;^|a54l-F17gko)Z(?Q-$?N_lE+WK`=X*|FMv?OcSaPF;iW7!>?TB zvun&6!}iH@4cpLr_Gp~+d{?eqW(*VgtoazKc=D$!zq-&zUIiG!6w582PF zs9|#_N>B~A53|ha13}W33;8Z~|cSQKmkHX-^J&HhE*YxG}&DFp0-<{^e1a zRJdUEJR}^uZy4u<-D{uJk^-UFWjUaDsMl)!&`qBDQnh|1CpsCpQR94Y-wiMrXqPA> z!4vj}7LgJaBXrTl{upUBc)srA557+(d5CQi^Eg&-ol8W$XGh}kEBwtD2SvUB>RQ$n z;eJg=85MdZ%}@uV3@r4YIwAK^k+f7`ItmUYIG}>24!xqegsO?0tsJ(!G<>pdH*XRm zeRQtQ4+1E|`Vm={xAD`xhsFAgziI9Kq_^{T{(cqTN6LQH)ym#m23$#S ztIH2YTtFpZ__tk5ng4(1TsVFcj^{e%p`W zH}lwd7=Ct|4L5(A5)Mo>$vqGvX)R=WLjrO6C$h`eNN9tfkp?0Ukna$t0F(H|iWYsU z=<0*t@85*XxL7UmMW5om)$FpvU4I;f7jxJ8&F<5C-$HKn!vWE^-yG`i_J73Bf2XMa z4yASXvNv{yr8Pev73S*~OOYd-OPJ6i-Fl)2(W;eUJ_}#h>=cwS0fDU9TclvW*{w1C zEUkvl4k*ht^J7908Z-LBi#sQ?7|lM7S$JRkk`d4A22NYu_ZhH|iTdd(u+t)dg&l^= z64?vi2dLi*4Hhb31oADWD3P~@ZoJYb(gKE7&E&X69u0_1o_7ynsTA25sAr{scXg4y z({2dVJk`eX?G%rXt7EsU;*(+nH79AXwqQeFZHW`UkTcu zOKCsWIe$_5{b!#0C)|AhnUDSXwFsbK`KN^yYyxg3%fDi|`4polB~lXv-l0X5BUE>< z+oQN~4X^mb%Zqx_Qo3@|sqo9k%buQ5nfp&pN4Gx(sXctaxWNAxZc(Pq zzN{C)!kGmBBN34+K##Q-2tjLU*R>0bFMbiNtB7vDWoHPPJI_2vMVg>$r{5RdmRhaJ zE*#Df((F5(Ao_9$B(n)jPE8hRs6GchK)VfS-<_PLVicfQh2qAk9LfPB>3Ksc;+ar-j zm$Z6Ds3v*TEh9I<`v%JE1m339<-P6^o7Ec&cjHDLzh2M#2q+$f!b(`0Fi%K8$!gzd zfV_iFCZO;b+*(;xBR2XW(LHKs4@;SEFqe`2RTYt==@F-YDz5)W4eTHNUm}Jy*NA1H zUa#=3HJhRFv-4MN7PUp_id90C#_8Rb5){#1KO&fLG7Eq1`AZlc zOk%?3&btfOTvV23*MoYTsO!DDF&zXef8skDwL%x9ni9WJWht?zS~HboSq42~U%Ql4 z&bMG@#FS@6KZ7q7DG}P6AO{il#Mg9Tl4VjN+38!eEVX1@XWZC&F-Bc$Hr71;Y=1&= zf0AZ~83qux6h`*q1k?EL4vr4c*W<3ggE8fGEoN0d)kT8U9e9FZ%-j z{nFh3kD5mR=-8=0UT_EfQ6=AC{!CWHsr08!|1a6z-_2Qn|K2|%3KX{Voc?dv7dQ?z zlRLuphTf@?9QR?Q(7n*^diaoao9U`dC2Lgd>rEkvFW9j;ve6)tld4@!ksv9xV-Bea z720#w%>{2tvpJ-9X^y6~cZ=wsdR_%$>*&^=VZGzL)BgK~#I>FbyPY?7>r)>mEOj1q zDb(~HsJV2Mm;DJkzOG%K;oAwo6rsIY>1nAP)tFKxuN&PlB{4HINB4Pbh#{_GLZ>&* zQLh4lruu7rE>N<#6-wVU8~4y6}BJZGgPp1EF$0o4KkPF(o|m`84Rv)ahZ_sX3mbw)5f`;tFnzR;0F| zCX(ALRxl+xG_7X!deR-P2k1FTTLPl@DBHDCGff1COw7f`p(U@+95I-sMH-IE`HJjO z5qu9GQhtUfL)*)38ChU0N>|uW_>ZW0ouw1T3A>LQ!;&3|dU+)1H+L;xC zabUC zEJ)=8r(MQ(w(Dh({nPvD2abJ{mpDXMB1t-QcsosWm4z2mrNosDX!E^yEx%aC9KlT3 zD4gg6LeNIkPxl`?4*}Pj4|(#|+s153ARk|}bbDO#;>XXLNyAck4>+GDL@g$>MKwRNmF<%-LkeVh6HFQZ|{%N zBkUtg)~1GBF*vqzW4VzNBcV#?O{VhX)00I4ma zK5p-n_c?pU>$-{^7W$ca4jqzP=u+){f@{WGhqOen_8Z`_5ABUfSzF|GfBG59t2V{` zPVc?}v=4Z>liPU{Q)Le8@OS&(`wJ|L7?LkoP0wS)9O>u0RWXgao7 zGB)qa6I#adZv9T!6CPPLS5Ap4=0w+H7uqByQP$Jpqp-tN#)85RcA z2g#uI)Lopf=8KqW;4&~{=1`pS68p<09No}#fy(=)lLopu1ZYu}~x za=lyOEP||`XOqHv7M~8lu8?2b3i|fFH{xUR`dZIJ%Pw0J22V&gA%%%rVRQY`WlmC( z88$EJy*x?pGTvLUFvn_phziPzFH2mSz4f7LmkZfjvY^5f37C^nu&us>Tq9Tv=ftx; z+fe~8t7v`A9^F-90k>9^2Jag0-p{ztcl!JmSAdSS{}t}-C6dF;92#{W_|COj0P#EB zkxd~F+lt8B71dA6N`o8Eee6cGFfli?&c+4}T9L;oWd)NI+Zj}pnnLc>1I|Sv;Oop( zUU~mx_tPu66cJ?GJ>{91odI%!-n^vWm$JY)yY?b+iEm5f2qHKJW-Wj@NYxpvKj{F# z-(DH+9?2KZk#nlF61c*3%x145$A`1iUgBt%`bQQw)m^lThwcj8s|i#d^4)vA0a@mn zC})>?=H?qY>tIpv1#Nf|@;FJ1DhebUVlndAXviU8>J^A|AnUVr-usYnn-ng(o_SQ5 z$0echIa}+C1QvNcL^)FtGU4Vs*_rsG!Y{c=-$5(tBnLAJH7w!VP>2NP{3;j+VT(w9{smXujC0A>k6Iq2`Do+*1fklCk@e^;S(sQ;>SfBChK znunPfWw3_o`uGjmJS%@$zkC8Z622ZM4HV-xXJ8w*hTVYEGisYhcDcieY(AMbV{AL_hlxa_qJ#n@;QMyL`F2$pBmgOn=%%~4dQ$a52|M*p;sDV zVN`blCX62!D&J+c_RenHGG^s-LxTi0zm}ntd%L(Qyi@I7{`eQB{s zR!ndTGui-Hs%IFEj<%@y1viup3CMh0iJowT%gk(7szV8VfWx)1RNA8DIZ9|ILfwLE zu~+sx$YJsN!tHe4$ge(nT!N2;zgF9C8<2;I(&V}DdTuy`gA%FFgFNwWfZmm}D17bE z{p9+=!z-%JdzbgiKICHc-9YgwSUAjt`XeM}PzSs*Wu*Z}`2q%){UQ+>2gS0m{>!U7j+|V%o31cq& zm^3@{A)YjW?*W=vWjqc#wULE#22WxYG2XA6RX94vKXwQi#DQUp{`&F zB{1StQQ%J*4fr%I8_Kx|;iYr;W)gzOVh=(;zv(Ha4^c7HVKqJ}eUKv|XMJgpt$&XJN#J<&TXroGtr3`;f;U?;b z*Lv(;e1PHr`}5$VQ^ z7Cp9RNm7hYr>eliko$%<#T9fF9dvddxJX)>^WA-}8`xDT!zvfxulpF&T(jb0KpgV2 zJVvY)J~WgW+hzCAqTRjtLQ{fwTTcta)2vY3-NF8g=raw7oLy4{8|iBqNwpEhUC%#p zDUX6~CLOdZ-Oe(!6EWd?noZzBPhaD8W?_{XsFdbF$WXQc=~`G58sd#@OmdFSc~IT@ z8k;i)zw~v?G^<;-Q?llB*I}-wkyRlB2ZH>O3+Y*dy66|%?gXPo6dxssJV+EnOUlDb zz4fmJiq;H4NB3-B-Y=_G?s{yut6$%A73QGtFH7&k90XV+%V$Qq%m#}z({i+rjiXOz zhZ_Sk}Jn>%Th2YQ%f6a=%A9+zm|k#0uwQY@RBOoa-HJM$=EJzOCu z$wP%@Csfh!_`Um&#_g3*r5}*Ocft2lP%~cxvX}#Vb+sLx5X(>V_!}b37{&beAGQ7l zM;XN;5VRyU_1#NQ|8a8On}v2wAJL#)8G^+Z@*L_VP<(j!sNKNiJ*IjkUuY9>OnXUe z2k?$7+5~mRrYR_KBp$#Q9eA*n{dM<7NwxO9+NXPJ`jtwGPg@^23!3ucX!XltC99Zo zn9#CMCF~nf&=$SkcphX+Y=y^m$H@s7xLcacx{O8QQ7tZqniq~5^v>mEbPx4>u5Z@%HnzMz&mkKx(5l}&Zas$+ENHMB_eS|hvt+M zyN2)Dv%CX8tRI)W(fM)y!(zvKSa@LI`3~jAEi9Pc3ENjIQM9o0;n|FzBH#%*r}N6; zMUZpEW#!v~n@mPUgRXA3?-rA$qhMD09QM=idIPBthc_3bqp1>d_D8x0I2_kiOVm`u z<{3c+=lO1=usKkbhU(=PcgW!x-ib4|Pd4&T-M^6UV8rIkk;W7vbDC%Jvm1!Xnl+w9 zVbh7LE~&21d>c%Tp_q{lPmoiHVy)79DGv0*uhbQ}0tqAH{T62}1dhG*78h@1nu&SH zEY_y)4^<`VuP}I!tnd|pss?{^Uen!SHCP_+Q1P)#D%-(^LB(cASDCDs_;<(=>PcP) zQv`_8bk1HLZEV-61mlD%U_xfq&=f_MKgFihvjS35jhlsO?07)@bo|X!; zrkSM7&Fy|1C_2ysdP zX^CtSsxd6Nu_PTPE^qoI3Bkj-ye8^A``$#EAWx6{kq2l>nVVH^#s*U zX96}ir#0S|l@-_TUcE9Jl@_~o$~}rXDsZo|v#(Lr4gEy3$ur&+E7~)V!<4RODDSY^ ziMztZg%m4`@{x>z%$Ij}@3rZDW^VyKbnl!Kt`)?FTe1N+0yeZ-5ep8@B(g+P6^gX- z?T(lhel{rhDk-SinjG*!OC&nzXb2XW{wlf!(KtJgl%Nk-LQ08>ImA`{Q2rCcEz;N} zp7E@*k1}mt$3mvgb=pk6uSA{)MeWcOY#oH6GmrG|IUb@A&hUBVI*7)sqj5lS2nf6R ze`&p7lTj{aN6=)yS1skAvor8Q4!WZ&qL#TN^GwGEa*JCB>cGzFS_AUXdhc=$NvWr8 zdf7(?vv%K;URkgx(HNteu~a8=6Ky=;hy~yz1|EC#FstuEfb`_qFK7*EyyhC^Rz3Ul z77#BAwroc`u8m|)uge0ssL>jf3Q@25-wDC-Gm_)Kvc2tG2jub+z|3i|JrZxTG)*Gl zHiu!!3Y9OjevKCSPwwOI8teh7bYO~>tJrc5BH^8+l5Ww$ad9E9)36DV@hhtgty(z< zZd(2dwFkV{@_X9tg+2?;5!u@q* zAAppdea^5c#l2pKX@-oCk7(LbdS@bAMHp;}B_MUWk>D_vVgz6i&@^szD1cDX0aDub zT*ie(iLa0!pLkP<3j(-t2hE~aNC)v*lU7GX$GJNU=8+~0G@w@A)k~qi`wsHK#rsSL z0UBrrDb0kf2s$5kT|f0co>)?j$!q>1Fmd}}WWIZxXrv^A$Wz~l^_pZBa^>!=^0gz> z(^8cyZQ*_k(_5SCm=!;~e$38jD>j&XgA&xpAKKR8f}pY0a_3l>+21Ztg(&LeS;uFX zU3eCI{Ha^RL-RvmPl{Ge6RNCzg&QyaQN&`}j()NXy-S+BP_7BlywbR34&LaYh8O@%whDLy0WnhzAj?|V)QQ!<0EU9eQ#l-$?I zWPRHyryUNVUQ)>}B#t(Wdsg7pp*~WC`r$G;C&gS++&jOQ+`LYc?0g+;0ZXxJ^OBF( z^K3wSwC9;mAYylKBFu-7M*+lf$}uASX)|RKyxnQho9Lhlu<59;IfwX`7TLCifNY+##ehRh98LHLDdN4Zf z5l_!>u=UBia_?P5(siucET8evdW*HKkeoIFe0TxMIY^%_wD(NXP1r=Q@RUeu*TrzV zwdbibBff)nZ!9JxiM64}wr=1mb#w%D;Wtx!KA$t5H9lrzwG290j6Ccxf+`2Ms$v8` zP3BW18offMFB6xX_d2BeM=k}caRz#_e{7^59qh$QUoYMmNhIisnb!_UZqTldkk|A* zV%GPaU?(p(#2-HOMH>{GQkk=ua&%xr+40_xhkzNMOOBbtmDQ7m4&nNj&+D0>Q?bl3 z-3WPFS;~W1<44#q--Aa^y%xIIS442_dqEZ??zd5Uo%E{ppi&ELLZB(>y5u9=p8549 zgCh1p5BF-`s#(ZUt4T!Z_6x{`eiO^n!_uYtY{1g~WoB2=;*pUxNq%eD0{urb)|v&= zr$kB5TD{?6^-zQk#r7n+!227X{4B>?>v~e)`Y-}0hPPR6r12>Ws- z<(v>|@#6%%AKLU|cu)tax-%=Bdc+~RSUpKH@sOF>cz1DC+_|mXE<~FCi5Hmo3u>cE zsR&QfNHi(3dFmh~bf!o3QaS0eyJD>a~zWNX~- zK5=Go|MGr?E(*ByJBYhX*ImbjgQ9eyocJ||?}eqEX|t|E#EY)fcoPtUSQV9G)@PId z(y~canw@&Nk9v#H%Smz9tMVyxVJqFAcey{_IO_chFRQm>*hJ2?+vRmwJ33FlOPQbP zsw$go+sTx#i;l|xPe{_0CNYP8qC&>tQR9r{mm>-enZ<*W)&pnGIE#MxIBV=2u7BzeJC2d@ou9Lg4XGdmiiqGP{##M=%wFeDpX;O3uy$i`li3S999UiNO z70gCnD!$95*x#|cr#%?N48YpiCvU;|=zWmGNI|oBbc}{BVju{s zJgO-P(>%@MePr5rs1{1j+10;3gzT{{?s5`rd4EwZr2CcO>DRMj+RA!? z!&Ey$;p0ZAEOF5r-Zn1jYTo1Gq+D5ERq-r1NvvU3?T`=6A%+N9IWg-JIcge=4Wxpt zBpy_^j3bM^-q^FT)w*VOzd3enf85Q;+9*)XbK4|ypoA%;8+SQGF;3G^gZbr)8IVex zLz~(KQcf%MC?%lKTMM5UQlNIh9lxGeusGiH&2-GHyVOds0q~%ELZxd|H~~7;r|2WqZ3hygK0!D>H1!mvlMh z@T=>BcRrW98zB3pj#iM2mdl9WjLQb^^sivcoc0o=pdT5yt4m3-{Qe*i`(Dt3$+vp5 z;4)|!NvXvN+{C{Xvf-phxPql#dN@+;E2{A%?ljNQ*e&FBeT6}zURbFXHDP*em)^Vu z+qts2q3|Gi0$@UM-{JmNv9gk@M3pO>3r%G%19RK&I0xAAZ^mgm-xxc;_JR;sAC65a zTT!o9K~CVnh!ZSpfKK#>Pqh{W)$2ItXP=A1QeF+wiQw~_!FFzg_cbgqcJtMazGu)@ zIW6wonH&>oWQ_x`_}j1Onr5h-Etfgd;a~MHYcCc2UuEjIFp7V%%2X?bWyUB7Aa_o* z(xv-1w!z_uTOQ)(Wj)+<4*D9JM>nu!HwSI#TB z=C&vme+T(0^RS5E)cM@Weu&{+BjgAE-e?7NF+yUq_|ZYA6valiobJ-aUSjvUF1r0% z(1#J&qm&F~f&RSZFUTiX)jC#lu@V%XpGr*vjxJJ1#!>8)E^Vk@1>WL{W9{Hq!Lz5& zPraO-~8y@l{M9TGH}xNB5NtcgrEXmMRF9)7mM z8#Bo^&sywcD`}=`u@}s{lU^&<0wAswx&TdK{-T;9@6*%mbdKtQyCs8O=HqJvlZS2f zPkNn4v@t~Hi7{w7?oVmHjAKMdWOUONM+Vz{;nB4g1fx&6i~JI8#b)W*9qHlRAK#>& z>pHCc6D-7|pDKl4+k=;2+=!s*-#{;*MJN`p1RE0$%$_!n=ZsSbY1!4(XRh8#Cs$6h9fwMu7FVQdxZVy(rSuo9$QtrkYS;@^V;Yl zTs?dGT%0xo$aRRQ3h~_$1J^ih8pg1ewxzVP28y;5N;^Q`?2YhNjsyR;SH}tAroJZ#(avH>sx6gD=4+~oCJ^I2%5p9 zXBP&$eVICig9u#*UE)rZ96K`gne{FrTZ3s+8Gc)AL83>v`Fd?hP4&Bqs_J((kL|L< zO=fI(?>#|3f6AhN)iW#GWJyuZ#QwVuUVw(b){2j_6O|c{ z4jaE~l0~#Bu`3L>`e51rLhCG(dq|8P27i|r~XP8iTK_W{k4r?lh%0KQx|O^>{~b9_l-a1C3q z<}B$nS6d?KR3sWE*JOM*;;?L$pzCs7QZ5)TN&|KSMkHhgFCyDE=&i4kA14?GH2%zJ11z9 zD6!4l4UXiWej*QJSag}Kx{Gcaz2JytZn`qRbZnyZ3A6v^-L3PYPZ>nMequ2$rl^vF z@FM}lUV`M560toD%N9KPWNygd2ESjfrE}Wz!=y}aF}v+67x(UUSXxMnA@Chbl272{ z%5Og&a@%0&koWPowJ_pV6P>FzVNHX$nwVvw)7)!oE2yU_6Ww6T_2tN?2FkjApc24y z17M~;pPXN#6zkXOczwRbB<-60dQE>1A_ zMj|Dyjl1d#pvTn8*Ah4vs(cO3nHw=S1d&BQjRyqlB7{JP+E7q>8{k! zyAPgw?m=W7nTjR%D$eGCHsNJjO@3wsSi>0N#LSA~SkcJ(z!k}nD=X<&G7xW>!)^qO z*Ikunq}cbi$Srv$?R~j7UnD`Pf2^%UFwVV_b?#ZOrJY;NM2D9IaFbJ~IEg%=kIX_o zx#lbo&8}$ocE`R+aqT@`??Y4qVw*RF+(h~0LJ)H9z3g{>LEL_8eV}P8O~e@M~;?9a4j~Z8e#ChJr)WhSU8U}&2%M`T>bA_q3|rNH`u&AO$5P4Af}xa&&qo$GsiikBpm|S=W{YxKsC5G3L%ECgEZm zw3FD3O~&jM2Nf@|z$C88rX}XKDYyD=1yaRl{G=OToMpW%-svb_-8_??BUx|sa}PER zv^u`euVp>V6_DaX4 z%lWgaQ(+E~I;ay>@Zbbm(vR8Ci#;IQY#ukpVMi`rurvu>mN*=SD0yAfZN(yn_H}9x z*?3jQI$yGo@XT(n4EUoZ_t|A39vhi}1+9|_>jzU9b~WwGuTxEE7zOb&c(k6%a`%~+ z4Uu5`M)p;VjEHa*_eQYQG_VpKme&E&0_eF{?$+x2r-3n za&mI~-5!*WyCc+_T_z4>o&s8cKe}PsRFFrNOyrj8eF-Sd_CeU_QkMiMI&Ia^VtN02 z1FBg%v75mpF}kYdla8+8D?1TAl;qtbR1Qj`hr(PqUGPB#e^g6pBH2|yHPMQyFM5Kn zceiEq^#x#Z7^S#pC0C0@Aw|92!1#)R+FSBBuy2|5zZJDcu13{iUM_AJC8C;Z5i)?2 zsTpC5egs%wkhp#r;f9-A1dY|t<@Q?w-Ewvkv7N^45;;vV22%Ex8$XV0wgL?ARo`gt zFVn;|2rH`pHU(%I1V`bhfJVme2mTd4TGoXkh6{GW$C*GS5thcYH@l~aFWx@{bv-_MMOHT4r5-B`Tb1Y|Ilz*VGk}##C zqp_%lE;Nfr@0_0I!4ZoS0a=HPrO3CBl_od&CAx6!63Q*7w8#U|JYT=1h4+(W_m`O}-kdCP_wB9vB z5mDqbmB8}g!yrMcZTgUO3mP&(-SK-G+10-RbvrV`9@UJVI@inBhOeY)+1k3~j5j+t zFnNyjT;IhMCcyB{eTUCd+PUazy3^fEzD9^{3`1cN~CTnm_gSk2FH`4^%_O zO@lw6jE}}6JEzwjV0!^A=WG4AjS&7#(|?*k@N1$$S`k1U6kelOC1C)x?o+=Cnj8G{ z=|AF@>!(&H0Pno?k3!@Bq1XK@D?j~(A(Q_1=>u7Xs1ANW7NhR-cTk_M#T(O6Y5*1x z^O@48G4-+Zz=+pewq>S6a9fAJg9ej_{j}g`P>nLU&+SOyU6VTBL3&=dHiiBnyURc2 zOq~7Kcij1}`W^f%k^7hJFP(0?U$?*q&BFgzcq{yku+6OcA6;ehH@|3){ki1>cNapB zl8o;Ss%IXqg%f%-cfJpOS|V}#)Hn^qHDR1@m#5lCON}~!cIh|@pt#PDK#!tkE<^BI zR7DI>>)TO4he0SDEEL1X_NeaA_q!1D8*mYT0BT1Cf=Pxk1t@w196|*{e*gCJ9BPL$ z+-fOeTa_F_sUk@LGNI9rXW5W8B(DYw8{U;Ocgu>SUDPC$&!>2PzCL(Cr!AV|kOmZc1}T{DAkAVDBcOYC z6li&?m%+YX_)lCLiO(pC6D%B1rE`EBf)S5{i5YYOeFBQc-`)bI?hB%xA`g8By=0MC z_H)%hL<7={`6LA&moZxcdw=JIyrk>0OeYz8pN7FbSUW)PPk{_cKjPhFlQtZ1hPLO= ztMn9C;0A5mJazbb%>~bj3UV|u9f+Eh4xYU2;64mQ`r_aGYJSFS{>e(|8k;>pH-bb= z|6V%{NGpG@uLeTC`EeFo$NpXq`p<9mpO5A5Y`Oke=J~=OO?ToOJU3ha8?+OCBir)t zC)2?49zwi+%dCfUW-9sIH%Dot$b{{7?4l^ydA^96IyI3x8BnJ%dg{T<^+xHeaM?GP!0#X@1`=m8(IJJOqBGlLdnNg$ zRc(1$#i>?3Ue<0E{sH+f@{_k)s!px?iD>vZ=aXDo85@y;qw+2xDl40-Id4uxPocde z6-QMbiN1As3AJI?>~Dazs2Js?>~B|)jf|Pql{uCEoUMgnB=g=4^J#zN@dqeQc@v=2 zH(B;6N-6OOn~0{tR$qtJQ_$nRH|3_e2Mp|5$0Gdz*l^}qY%-m1ycwXcHeN!Wpd~2; ztxT~DtQH}P(JD9GMej}w2c9nS17Xu8=6V4P(fZRh;qRa^*dS`-o&5%Y2bx91_5xxe?3$Ge+^bM1@iSj6A-=5R0>Ei{tVSOsQtxH z^H-8~&wq0&zq%X@>xnA>j2*ad7;(#)^T3QMVAQwyt^E%MXKY}CddtvmATY1%rS*RY z6~J?`L4cRKbd;qO5wkptUy;+MXb=E@ol8=U7B`y~M^0Ivym;k?NXTrqE8JW2;iCSv z(Lj1Losq=StUuP?JOv>^6BW?~6Fm_qAkJSGMu-XC0S^17;nX7X;$nW0nl-p_EtcEaW(f z!wlwtcWmiFFvo&Q+#>)Gl6rgSMxETPin{u1T!Gq5NYe`C z?YFLN=L+pAvv{%xzJvV5EmwR6A?>+T1ENtA^k6><4<$XZ++REF3~#kmnINxRZ95rO zdX?{b=?%WOvO!;19?XJvc$(pi8gCG=Q34Uy)_cgi=F+g0#^9#s&{15?ppct+R)G-Ac7Rx*l*Ys>dvZamsk|0S-Jz>J7CHAH8mIn#6NG_?_Z%;5Ery&1L_w59eBa zV9RsFS(9?gr&*Nnr5Q7H@D%$E}A#JB4Rc~4qgGAaKodwOWgxt2M36k0V0G|3< z-;Pa0+3-?riI%%JDYk{ciHHrpg4frUlJ4W4kNcD*X~vp#I5k!XI(`23Eb3l;dO@-7 z`}{<$;z#uYz#MO$od$;@VBbN{L%p}?o%C0L=IJw|K96s6K#_U-iWeHSj9I|#m?-$3 z1OGavUHQM5#`R}E?0;zde{oE^?TIBwi8aro-`FSk+ABAeeQCPPyTjIEn?}u#7>3zF zo>YrQ3#XATHJ0DZm9)aZOSKwAAiiKzDKqjGEoI~KCwCIOd0Ck-Zd9pe zRchZ}=Qau_8t9L$I0K9~O52=yrHI6l#7v9s58h1yUjns7!G$0{T@6GDgvuq+IG!o1 z7(m=2goX>bU0j#~SKn#yP!oO)=Yxr zb3GtGIiK$!NCJFKX+1xFJ$`hBkF<7U=(q)pY%}E4RBq)2_(5I|5%6KLq4Jj~wP45F zjYEzM(%C5@m+l#%$T<;~Gcz&XF(=w&RZ~Lzp_%FR=26Ho{tT5;9LvV3vGc4>n*xc~ z=0rJ8F~z7v@n_@qqa2(}1>p!E!1j2zdY;Z1f!sg$vhg!RyU>Tn{Ggrsh;69Ln9giM zmD{@|T$ydSsp8Fd8W|MdUj2@95DCqP>?{^lOmj7f z3mmf$_pUqVI<|l_b1>csjVfh3@zSNNN3kQ1A-HwYbtc9C^Jq6v$7T9J(en8s+zcpnAZKVV1YBM~S|_aaY;ST`i9|axnXD5H0Nu+yeVWa+zi36yuPG z%eWlgGL*m5j|6FYD6H!vS3 zW^RY`pTIUj_6pTpt!s;9THPloG$0{Jhi*;)jn{otHaz+E0F@6a3A0HacCLa8kI}6;VzC~Ff$!SKOU84 z{Y;M--DlJ=&l;dgT8M>TYt1x1FC!FRYejqU9Cfw)4c8r3usvw|KH#11U>Jg029He; zK4iHZuLbm3e)Ws?BvUh4xaazwQK%oYj zEU~zFiV^UwtT88Pg+n3qUSD(+RgNd9R!6BNT6!f(QY8?cRb}$o@c1JJGdE@=OBc?r zlMkD}G>4w}Mpvh^pvJU;DSAT@o3_-@jXan9(GLJnbAL|0pmBy>E=K$(VcRboxNsz7 z1bWN?xxcK|mLweQ#P;b+{^bVIo580Vt0A@GnNv;GT0pk&E8)go4EQ_f%dsi-@1S?6 zY=Bs24LFM-wuwNA>88-^^sd;B3q3*CGKP^dEs}z3@G}4s?g3%%U!@%X5t;r+)qJd% zrlk6)XNt>93Kn**c5bn44qU2PsEq2{TgKgiYMLR6(b!WUK<1qZq27WJE7kUY2VKem zvTULq3xJ3{FXZ=c`7(!j=-EERO>2I|iX8hksJZbcEg5*xfa`gIpEJy#x7Ytj{Ed5W z&5wphIeiv7z`Op-o}s^~2K-YsGy{_ng~@JiKkbH8mK!EK$TNMVo1Z)i?MAvr+#^|A_Xx}yoKszTopLr_1-H7c0JmB3I5O*>Gii^ z#2e_)(Q8W?6jg#&nCEL-U1BChgi+KdSjC#%XO5I>MxH0J=e?z%$c~hnT#?vt>C&)5 z9sxJKHHG$UejTcHTs6 zThreIB=M6U3U{wl%9{5GJ*qoC(EX^PeM%&zc5^P`F|rBkCsLkI8YN6+ResJR?Kv+C z0?`*wECAG%#UrsIyWRPl=iYcFeSMX4@qW#al+!(4=C3brXzHy8_>$PPhGekIM>-e9;z0b-&O32}*lP(NP=%hp`VG^eDhDO(>KTZwk>X4W z^=Rklr*7;QZG+AW^W~!&^>Mc9O5KMjQ%eT14DUbAd#%0pWW9U9ecAPv8U29Hj6|zJ zDusm*8gbfh0O#5p*{|{i&I7F5_1rAm@0tt7w-Gp_)B#{u~u3S~avL7$-C2 z)2OfaqFrIq4pt0nu;31;plINEc5$T5EDhJeoSlYg8gg^x(RCJ%Gz2@v74JQwE`SHx z&!u6*LBYlSPN@t5qJpQ{-L$C=@5;+U-ZO_x@y-MzmtAvj3C{VSHvNlw{GMO*M{oVA zqW-6HoZ#oUAFEd>uph@5f2nB2c|iE6nj~Dy1Z=|n0??l4%UR@6Oc5S3(kpYaeIK*a zpN*y+zi6~?sg&&n%=?>V4@q@)HEl+ka*?WFg%2vCUMF1^7Znz$sM;go^d_62ZQIVF z>E%zEbFs%mPdFq9gK0JO!uGzOCVeemdEtdL-wSkQyyJOsEyloSMyPiEG1@er#MzPr zZ^lg=1e|;}Yd$_}03foZ<5ES{1Dv)lZr`*deI6bNI3BgooUZwXj?h>Aar0-?s1DvdCR%M_VBgfFcWDoMF1&Do2u+k;}aTgjJt zA?!E4E#q@z&xIzRS0;5AXX^})2`JJg+TnGa;;LIE(3g}8r1$y}NTE^ceMK~oaIX1F zC20qiglf9yxpdEMR>U3(yl(@{BW(9Qj59>{&FIO?fC7g!{&>^ zw~O6va`t{zbqe;Jl`bv6n%Aby*W5?dz#?pD@kmjGbWNbDbOe%bU{(I$r!x}=$lZ-6EpEO8hN)GrES!cH)>n<>lgrfC2)LCC*#P$d=%Anv*_qyNkC2D+!Q1Ve zM@~=qXdkl^c`(~%5`Mi#Pi2$vwh$1*(HHb~CwM)P2ZzB}QFxAiH4{}6yXq*F0xvAv z$iNaSLB_v@)wRv-!cjq27@wM02IZhOEgk{G?^a2ds>50p9~xU+#V0+`C>{90>R*Qk^659)(f#v~@9SdN!UpJnA{{MfnS$^^1{+~){wx~cP7cCowm!cGt z46X*XtE^^W7y137%Zpe?s_(~_H8%^MZPM#QUTiIX7CZQw23!!2`uKI1<%8crqU@+$ zzOr80IbBOnm7CELtrG_65qhS{fO7-70lOng2l{4>=5U~8_S?snJsJQxuoUx6t|s17 z&6dvFK4U~Xi~{o9nIdVR)AQTMSet?#gI3d%p%35Mn|{FL=yiUTH1cw7Y1I~a$abgXp6MR(|oXTrLZ z^K3j(CHYOuY?4bb!Ya*sR@YCfxwTKfhQi(Bt*h!dyd48M=1s^8dr$ zdq*|drt70YL<9t+cY=bV(nNX<7Mh5NNRbv5kR~9~YXAk577+!d1u243A~iG{b_V1j1_HXvuf3V0}#w70(-sidR>%Pi$saK0(cO864 z-5Mbg4OhxbGJ^5qs_&PRfObU87()95-0I(T?UBWrO}TNuA3DD==KfE@NB?wB{L{Zb z2-bXh=W>1zo1N!cs2dg9T{nEHspGm8+;lH))~^?~nuB_0E+ z%#K~EV)&^DE#5eSu?=bS00B@cUC5QVF6Ej5l1++tk3wgvkobf*wvGisKD)8HTq&(= zuUtDc@6hHisD!UMZ>TEIcH|8G@&~DTE7(k~{js6bn^khL2<}FGIet5p7C!b7%|{n| z=(HfEYA_EIzU8ZkF-Rq}UE^I?PFRCUQrjVej?iI#{*5FJpf)ecMcY8CEZ68i+*yKE zo;ql}uB{J5lsT|5Niw`LI?vw`2-qW+!`x-uAlq_J!jF9Ck7*tXHIX3C(Is=+ztMuV z{0`4pZvqnSFf;KrFq5K$uh-sPnEzt%F_C4a6X(`YvP2wOYEedpl9}6sIysL{4yT3 zid>}I%65++2CRJ}uo$CU-jPdj!>KA3XBnDb6MKc|*6fbNQ~0Sz3AE{o&=UloSBoRT znko`LnLo@OGI!Vd>uunVrw$Pg5*&;O`mu`N%Xq@O^2ggFY$RBVR8PN#pU9vw8xvT3 zO4H)Ys$(ukDSkS+KC|5uZnb|5{`Hmza04ABn9mlwOdTP=LcSFXO4a75AKwjdmd>kz zsPFqdSi@63>S3-NDllhh;ujr^39U!epfMlhjfF@7%fj zIc-b`AfZMe_!SZ$NQiBvuTz8_)m0U~1G>MEUH4fmTeVOhh@^KLL}mGqN{Hqi_y*c3 zL7p=#3hIQrNcp_^?|rL;3cq;kp4C9B49qjbW&G9!#ne+k)OoD zM(?M1*zaj5hNrib+@}QtpAugUX_^OL?0_v~yq(M3*>_0aPX}szbT`mOGha%sRrCLd1$JoWMMF(e+pw>$=z9Pz z9dT`2Dlolx8)4W7RfV-=ACy2AE6I|;*9fo@XMsC2T8m-@>vJZU&^W*S1htPK4iGk< zr^lOi<@f^GTf-scNxf}Ywk#x8k9O~J(YVBN=`ub2TXr!ApIS%u5L$iGM6LCgtQ%cp(o>p5)l}h<;ndpiDECbz) z-8wWTA^5ZiHBI3=`HS)l!XmP$UCeZ3sY*yMa6S1Q@V$x-sykQk#GzisPZ0_mzoEHOMj$$>3Fv8rA9EP^USjKl+L}^143lZtBHiiTbYM$g`2li`N$}ou^j0R(WDK`9LUDbvWPuDewDoE)g$bG-%?go^q^p zDR&x_SLrr;0z5veT4*Nb1I{Y+)vOA72d}9@LB(AR=wvTktz*Lryy(rA6Wyt#4j-s` zAa;F(Qmhz zuvXe3PgBDhUxc8xOsGL0%pDF`!$VrvE?WQ&Rf7^A2WE*P)m7G5emv{L%Od6c&!lRg zuXn8X3tQ4t3rhmMOwhx=7KHU;&5P^4h4o2uShUG!C-{8$1X6gU7c5XM<;6zBg6Vhc zZaRJ(@bt}sniw_2jPYRwc!pR+=}uf%n5KWvvt12Dj=go~`Ib+#v$?9GugE4M92_Ie zv0YMZ^Mcn7VqvR&*3B50be#BT`E8nDDz~Qy18DanJ#T)33WePE_0XOGgX0;jE05es zL;C`~+PK9aEFOI83XM0+44~|N<^nnxB#>OO&0+cBugiL?%0|?nOdF}P(}#sK*T7c& z1=C^2;N$v(g8N%vPXCAXzF(7#K?BHf?|wn~UC}I0t2GwR7d8nGe`B0|2xCfdXi;KVCGof9|cvInEpwELw_n^GlGxVqKZ17x~O?6T@aZDxu6zW{&P4D|gy6eP$xeXz>I;a|HwfR5U*$rle>KI%FpE7l~bpO8*jTi-dX@(2)d?D znqcBNhTwucuyYfDRnm@;xmq7(zV4}EOGuhTyFtvv`}I}qqG(K7557B+tmxvMnjI9(j-OaAmB zmCg>@Cf`=tB@w>PAjTQyv!Mev={k3*F5s4EiT5o}!0^W^?^92qvu9Dkn}RHEAVncF z`_)z$VB=kkvrWwvaWEyKp4L&YhZihn87Nix>TbIz0cEUE9iyrzR!3ZXvf%@6hXd=8}c6tKS?$-zU5Kkxl`;?lj+Rplk`w3aTwSM zBh-a6WW!|xZ(e?mSqzS|P0|Z)uXUIvtXL>wMjK`<;@V4S+=150Vo~1;#WOkCEf~LB zx)z3VkOT71$)rVUD!Cm${!Zrf#Bp|e{~L_YreV$3AkOXQ6rCoOQ}^yWl7A z*{gC55?O)*Zi|=XL+h&GO9XJ+2KTc#z*NgI&_Zv%PWba8Ti{Z5F1oP%`}Ug5v&w-r z5B8GdA42>^cY-6djp3vp#OviJ>=;ST^`cx0 z@2lSyTY^-S?!0|4Yi!d#d?Rwp;gpqBFhOlyYDN7F{k%Alh14`G0iYFRV*vNkdG`Fd z5|qPGBhOdhe3TY>Z9*{JLz2`rq&ei_gAtuM6UgPd?#=$@0wx~7iD1x8a!fqmmnJ|Gzaw@U3!m#)yXiQ*qy zfntVSwycAfc&;NZ_TrJ$G7wSvgB8HI3n*$*KHO{ z-ppCnp%#Cv7+CWi4b3oe@wmG(-^#DM$ary;ao*w|1Y7-A2Lil;%@q8xv8(y!2$j2m zW*p^3;rE3d$i$%wfCvY8AZ7ymCF0btTjhTelRxxp=)rq;=;jd&uw4g1wz(|bQmX+F z#vLHe?WO>jWGC8XxwP(6+!|x&y2$iK_GyRGUW2TrXZ$ z|EF{_|GMSXY1q-l0By+{q6Ahq0WDUIayL-YM_u;emNs-5zUIknDCwd)H(bqaR~lix z0JN5|;k^B!XM=F@a%`^t*}{#tUo05L^e9%paXNfeR80BfLxTrNKHq#5%^Gazj7<6z z4 zHm=X}j$ONIA|KRqT7H7V4M+b;eBA%?JPN~ zkVFN`fnlj{LT!RRK>_vV*rKzFa)XbKSibGNM<)obTu&oHT4}6x{n}@j)h!x|T}M1T z97>noChtfonQty+UpSv4{QOIufWQXSK1l!NcsO?I)bOdzZP-}v?5pwfr%yM0jQS>& zdF;(8p^NyUC2X!27K%Th!75=+Ylf;0S!V6LsrV=m+c2+Y(DRhALu10H z>p}L@h#_EX*JVryA=M8n3t_N~t?G0W4Qcv>ms}C@J;#=PdcxG%vd*%v#uF>eYm;{{ z*w%KRPH`#jeqSj&%>+MC-g6CKdbCgaJ~j?SrrbD)kf`sQAg7R`$ls8!VL|g?&XOA_ zE#}vm+nsvU2D^UPOgiGIEbs8j?FM4nGuzHIJIl7+D~7e}YVs9e(+>6hE$GdaKh8!4 z?O}UqOuZEKu?3n;^`|Kx1Zp{81Pb49_6*w}ffj~$LJn;g|jIi>@)X5elDqKvnmC5-QSHR$49_oKpaSsvT) zOJ#lkuOtos=LC%ZOg{R*Jr@ilb^ODbZt*0?^AVW_nRz|E>40H{tpj)b@B9A0FV6dufbTDQ z|GzTsdM!BkUz#-}_=kNm^fqMDs~|LkatH{Tq({&?JbVBZAeZHv*D!|QEJbbc#pN@( zXx|OZZc-fiTOpN|Y>5nv@h~!~8g?kJ4HNA(%hZ!A>p!`~EIVWSg7Hh^mSs2_)s2Aa zkYgLSUCb8m^UJy^r7AhNrdZsTIo5S@!Tm$|*)2x;fL)Vw^tCYCme+D>t%L<$H{ z0Qg{sgj6~}Q!jo9^dbeNQ}l=Ad^392615nH5swH))dkIVix-Dm6yTNzSp<@C#PNlvKhR2Q@6YL_jvuHQsM*I$x8`gYY6I|@xZ|LpE zbC7umm$Z<|@0qPAIT)>jri8h>DdN_|&uqT{AWVBbpOu99M8(w{LKyfque2{ zV5ff!n=$?*w)YLZThLS7N3_Z3Rw$6H&;pF6wyOq1Es;w!(TZehe#VVjtH+pKOUWPF zEoeJ3WEM`gnXbNt1qAkCf)s{{;MJ|~q^8W32w|c?{$1gwEGxu4Gv#xnN z>%|?8#T+j?N%m%MyRCVuaP;<)asal9_r_A633Io^a^+dJO@;Fl>TGO*ZDDpkk5Cm2 z;{++JtIrDb2zjLybG+)xu%<%k=nB?G{-R%DPo;jX-_2VD{f9w3U)^C<`oITfT~fBr zE+o>oruy?WW!OAZI#bg_&wWbU>-CXiZ0dLREz_yT2;jsjXi~Nc@qXcq1|*gyG_u`b zo?=`*e4_ET+-v}=s#S!!jLcHs6;D*vsPfxGX z;ea!zoQ|q2xJ8aVR7e>dtiOd1Qpv3uUbfWCdZAt0zhk83Wdg|1%5doN+KUn@%Kc2@ z$tEXW#U2KmTDULBjUI7@;4&2{l6U%C>ucUS9XTS;X@8a5K@-FVS_L8V+_UqwGAA?y zb+>fSBQr2FxzJ4)nmkY)W+?!7jq=cT;vY=*KIBoY_TQS|oQKrG)JUM%*xv=Ryt+n=b<${;wkD|M6L0{{p4G5bat34tdaq*jftGn1S+Y-rG6$-(Psts``I&H$#oG|asCV&R47MwL z!I;=a!VJcYv zfCH8YBkKYb3{gNZ+G_wz5afuz?a}~W7(awNut05QWeK z=gw3c5tBi_I&@EZ$_ygi(d>6!=p`E zYYVApjoQ77E4F-8mF#4ojfdi)G!En6+_V8OX{#Ty7*7S-vzD_M@{V=h(}bW~zNdOJ z0&1tp4ls^Z+*l}+D-qFV0b|aW0aL~M#%vDCWir&NZTeN7?sX{Rg*`fa7Qq0S;r=Aw ztV*+%1}-%taEh-_aY_1H@kK@~^Qk(9TQN=(HVs@a3?(H-=z;BRAs^PqM+QoTCuT6> zG6dJS4c|Wr1Oo&sOWkqGutkjAR zd}2c9CrCQNVP0bXQ7|!y{qNR(5Y`&B1m4!A0Zk~5dzu>jPO)6RIE7oU1lhmVXaAPCJ&Kn~Fa$H|viEsXE0 zmdVO$k4YTj3>}e6e4FvkdnA1mPWI9gr#;1VV5ld7xe*)~p8n0Mr0E|Yux*ISQX1Sr z03r4`vdD}DVNkFny+!$L0F3lT?09Cc!c|L``;>le$=29r^=mg7q(jGU=b1)4Quy3Fp_mu*;e&4N&S`V8*?#9q!N1Y!C098VmEN> zFY!G8TrK^-h8MrcLH`xW`u{q;BG0!_12P;p{RGW2qyYpjU_!R9jh*Is*}8fm>XPp@TVO7-v`QWF5p41O_%x2-B> zjP3)Y$q<^5Q5$006TuB5@b&^HH89kR=_g7a2o<)2sSE(FH5UhWH`z;@(!TAl{WceE z|M&k6!BsUH0xl3sME##2I$)H&ckYA_jr?;RWHv_#8}oJs*WYloGd zT447)N-s7Ls%TahA)ggwDzmLJUrL8K{Kn|}oA*b;D0kh13liI3ZLE$FFRb^>D#y78 zm6T*j=sM3L+6RGV=;u;as)~!vY&C!Q-TT5=scs2q`>mpW*$@AWKUV-G zkmS^9+Nbfo;p$&X$ahEn*Cp^b&f}kdN+v^SnwGTs!mZMCf3bLS_g1+2o3FyJ&-_1l zOg|6($M91qFc0{L(dao$F2Ce$7Y6-BAbNd^29d^0dCgn?u|2v&Fx5QBVIe^vd>(FY=GX0)Mu@FdeC$gtY9-(2yS{^&Q?> zHSMaXTv&Kunj`(ydlclpPz3WhIOnc_lxsD4$`#1;>^!+ z`L6b$=LPEN?LuF!Y5+VX^Quo*r>zHMlvq?QVC`B`T2`Z3#LqZsSRGOokD@D9dIOkl z+@SmOPmtv^^m#Wy_BEjyOe|m%TCs#(hX=hk4xVpZ%;u#t*U@+wGI#%z#RpUG>iuVm z8+U$p!Duf8@W9L3r{Z0P>B45hWY-rZ54ooPFla>;A2zq$XWl>g_w?^FDh5y?G6VbH z?-m&UD;L-Or-^od7HR$}-2A^-F8)i+^Z$bd#vEEERO|69rd5uMto~!Z1UJ0BSCaTe z7SG`)rmZnEgGWm($g$AAiz*+y_Vo~NfeD84c?10~4Z%6ujGt!v3CbVs;>3w7v>jPQ zn|AfUMn&A0pwG+#E2f+FtFjp%U#}zH>|jk$ue>nc;3@Q&XF(&tp=Q-NVWgL)4!Bi8 zW+ht>pv?|5r$0H(irnuv);2_ztx5EkevPmC;xuI>cFbT;;gvA+eBOqBs-f!ir}Qnw z_j&ab*n>it3qWxy(H3>J7bV(f;L7pt&48TMxGTxv!S)XP>kHrM%~Ypz$FnbVuh{$K zhuZhk1VU}eK2A83#9b(Nl^mB_>y@GHEazLO#@eXcX~%6_x&&W|-dACZjgYT($R~EL z_Rd0%Q6w(A`27f3!73wo9$G^nu zKnfqylsS58y}Iv%E7af_gSw*U#TQXMUBxXo1{d~XYtD&KLI>8KG6B?UJ+Na5=HcSO<^*6_o z?pXi}cqS_T;y`*^O{vy_lQC=P#Ivs<*D|gf-V2h<5w>7nL~9;ohO3h|+SFAZ%QeDg zWrP?4%vo*YHgvorb!%RQqp5uMjA5+rR0{Mgxz2o!rGGP3HZ zA5(7jjqj^3Bs1=wdxGAPN(Ir4q}y*OdU+aagi;Um`*zjXpF&w^QdAMLLl9Bu8o3oo zhPPuG@|Nq%UX>ip@7uVmE9cGq@Gz_3hmCUqTmZM6NvsrsWyC|`l)TuepkbPb+w`bH z;DBxQQ~iK9Vp5t$PuF=K+L>lNKiXwoN@VE-3!*Pjtf;34HKk_l?hRE<+Sm?y z0Zi$NoamS_EU%^Wb9G*Gp^J3zhYFQMhnet72M>zMVz|J-!Z0wAWgw@Rq}-@Cel~A& z>A2=9lQ1V!Q`f>LTaQ4Bjr~T=@aER&8~ffg2gc4}ZS8|hniy4qNKZ8xY4moQg&o(; zaksXwI*~M%?lR|ns>-zE@QKixmiebMjtL{iM4z^71#$-EvK19fKs;M-6e7WnlLy-UKT9 zcriG9I4=Ms8WI7~%TYjgfUknJc2H#iz%K>QPUBbu06+J|gS7+vUf(B7BTRh@QdK}L zG#MK4SzspzPDf)FUxeNTa6l~#c@0b;Vpw2(5@di&^CSdD#O^~vV1YDWFRu-74}9$` z>`a(lfc8Uxl8{KYp*@rA0FXg|NuLCu7VJU<)B&eS0)(s&ke5V|dAq1x_<|mZMx-v% z`qMUJ|Fz$6#EgI%$^f zB`Jf!%tHf3v?{vrxCNy439{w&#O#%}dfke%SZkZbmq4JSUkS?wmJ1h?+8>X38qQa9 zKCz$4FObir<54*5DrlQo>T}@%63u8`dplf~d`?+hOu4~hfJQ0vr;zp!va5cWr_Eww z=VUU&vhTn4UN*fBDFuZb@NAh+Q@I030aYFt!pGqSX>K7IQQDlmc8n@NxD_MfuD(lS zzD>9={Y^O?`P9!`_p>|Xm>a)C!kQ4yKD-tX{_H66Y(52;Oz^-%OUb?)^weuZZ(i0> zz|p5PEEBlu(jI4z(aeP*gd5;B1JiBz*cSf&+u9TaSx?*?%f@v0`XqTt2YlNUDuG-p zC$itrS9e}T5ta2^HEUzua$DT3qnByW&|~xYyt{D~Qw{B;bmjAcE9`5CVqm3(_y=pR zQx*jkva_C1&m00S@d2{XU_Y7Yl(iMq?I#c; zFZ_Vt^C5Spu(KpTGRJ{9y%rxPEagtX;A&o0+=;=AEWEx_+cUhmpZ!=W8Q})n?}*#G zu6IzaBo!p6s?Fjm+sO>~dEanazL(>?+QL?W*HPLSU(XK{QL4-U_Zemx-;BCl(MQ(_ z^5Y{^9qr;=v^9#XPcer|8-)>9TJ~W=StX%@70*W(Rj$^FQp0}3cYF`t%5KU*T*vwx zB`~9!C)}BY=MKwAIiPHb>7f18xIJ%(lU%%t;mKui)lX2O#57P5yj`lXn30u-*FFs1 zJbV8jPHCkZuDk+_<&p9%Rfh*lbLlgI@i4vCBK?v(VCD({M?gfVjWpl;L+Bf@QK{=T zpET`59mv(8bSF^c3dafmojp(|&Jq9_qWr1KgF!=$xoHgkzn?(X1vigi^W65^?4dp1UcVErS zgOJx+cf(S_=FN4cUSdl6qT`9@0dZ%u zDa1MTxvs-(sgXri=7-BpIa8IjpchSwmlN2L)ts>AtKt0JLWYHb&a=GLqS}Vino=6l zi3dY6s7zvy)birpq$#B&2`jvPTm%Th90m$401hNjP2(1%Qu{CNt+o9bjYNS zQSDLhmt3pHOW}?K^W^)`xJr$(O*zH6YC7-b1!Q?MvbZRR{S{CNy7eaP=K|ya49SXU zC3pJsH&UIG$F#tmIS>AgfvJ2z6-1<5vu(^aS?%Mg@N5Y~c323AK3R3>>_@-LjnF=O zP+Se;UYTb9je5KxxX3%jPt?S4`eJHaEdrv}&lqIu8dpT99MMS}NE}^*T3PmjQ47=% zD$oN}qyRwLq6hTyNaSm(x5h^bfXhgAsYk`x3Oftm1P}wB9snUbPSyGlxL`r>E6Yy_ zOuJeWw@zvjkBK=eUmnjW0~)*Ukzsd&W{LsapVt-ixy`fiJNLGRK_7i zQ{Au=3(>!lfBy9N7nJSSV2=N4yV-v*s70)!`q9P*k+8W)02p>q2I}2HrsA((AyJ>M zr-M1R{en!|*|9dw(=4Hv`C>iO%tsoHBu>sP(!c|A=-RlG4<(+9wcR02=!IAoTXtou zqKF%-3ltr4?`(L+%s3oOmTI>kJ-%Ugs@u)_ZqzCGTs#@CVRSqXBCp1eWi4NlmYApG>|itD}m~6x@*d8qP%x zB!>`vTJ}-#no>oV75!baEYwbHdalJ4pTGTb>$@_Wf$9TMHcg{upuNo?c3({Ctaol- zW5l&5l9qhIOvcX6zDVX5;vrp*Z8=|#%ogc=Zv}CfuiEC*z5rw(Ty(EvJ;DwEZEa~I zf;9yc)j+QHbhczCA3R3DO@2|Cmy3_WZ8_zmD_-~6;rJOV(8Uw6TJ8FzND6V@l-o3;4{f#h#I2GuYA>C>Q>xXu80I`lnfnrTy4r+2cQ$|{Zs zZO1MoHF^^|XT_@PiC#~fB1bGMo?aGx?|gnz_=kLpGP9!`So{3ZlNoO%GV1Fm{rIZ% z8%&`>p-e={P9)Z0<`ews8tvrL6k{r1ZIni_ohVCPwb~1d6Ynf_o}Wy7U(FNGAaO7& zRYHKQ*}b=+b?i>_gqjsz4ww}Y`Zln>CPSB`kug}y`&qT;`=+{QhfbF`J?PfQ&G-t} zhEbj?J6c<_k_lWm0N2CXBpZ3QqH{_WdEU`B9s*?%$>d*orOEX3U32~n04n(#=xTWoo+iGQAn&PsNpej@} zUiw~=Z(GBlJYCWwoykngJ`4yp7agh&EM9=gPf)+h()G-{EQJ9N6EX|jMdts2KZDG7 z|FJHc#7^j~gj%0Bv#$#lg{IGFT zZ}Z|($GOwwJ<8QdX{0MfX}l!|$4lwp{n8ov;*$6=zYXl~(tT*ktjZd+NIGU<1~7Zy z{wmAk6>2U~7@$dgo{*I3z4KwE6e!LT`Jt>-$$=x2UkB``dQ&?1SWqH+KYo7!Yq-fl zg`d#F7b}pXifQ-faRb#%w~vI}zsIJJ3QnM^9h4#S{WI{45!>xPC?|M~{f+ntm9xp{ zU9-o@2v5L;AYE$S35d76F==4jxu)dqm=l`Y+bkI3sZANAy|TkNPABp8G6Mah+Xh#a z_v2R;gq^H93HXMT&u5W~8w0?%km&6G!MaOFK-3r)pg38 zyU4`{%?qkG5CKW5s-!jZYB*VPCx5sMTiA(6On-n}I`Q*)qEFbB$Ge^E42y2L#G&MnZT=+@1W$oGNz%qZPmBiYWPUuw|F!|K(>W*8v zOlS0t-4(HBipf)C$4`(?h10@PoKD!;0K6C563w-Kq^kQaM_abvoO>lo%~MKn1#j8f zeO9vpE*f~BNj|G$g?HI|C29xn4BSsKnPU)=NodEl~I zu)FMIyC%*qXil%O-xnYR;o*a{)jWdPbrzD>6Z<1iK0KK~i&RbMiYRbM zmdbTez!8lKu5m_ut?XZPinMf-VtWZQa#75|rTG>sA7p$w_BuL5_xV zhguGbWVlJ&b0@3#jL3e0C_v35T*1t7nDqp+BKrBNgF3klP9Fz~cBp;5Jp5%%yq92V zCuqI)v%tn3}ljgT0!%esOg$B?(87AUVVHMxj zt>%QL1+1G6;WZJbJ_dX54+YUK(1!WCGr(gjt@Yu1+Gb&RRj17n)xD_PluqwG|KE$l z6h~s=T1IF^1~eANvRxv};Z+@y@wkEU$HBO){&iLD?F@*=q~Q&L@S{um_YJM8_0&{n z<`Kzjox|u#pfJ3KBCxNeg)r*}5sh#)ufJZ{99=ZK{w0j@HAs9S(ilDiUAqgztYNA; zlJ7{Qyeuc^=0|dgQ48WSlL94C_Uu#TqEFKLv9u=|7*Jt0y|vsdmsYe^Bi#2(twI(%Si` zT!@)2XZLO|Za2pWzO`_v!RyVv#+7V!8|m5XfHrHD!bVT#DH}Y-&^WZ=Oh3&ozc7tF zjZOs?0S46GcOYXM#+y`3wDqx$3kE9ZtJ)SIAtAy$8#ct8t;OOAc#8aqN1WU`Ra;n{Fwk%k~s#I_<&MRu05H;SS&Vz2pqU|%7 zXM!JA51Q%-qynkxB}0b=brVjo-O_&$U$vmdFYIH@(f0g3M{Z8f!#wW&KMxoG+h&+@ zf9Fs7A6b?Hxkw!0XQ&1P7!NzPfop`r`YdaQUy54h*|L+53- zMBPWj&xZF^P*vc~^%C008;R^~2=4J`GMeHJ0it)j`CUVoKDTtl2Jc1MXfV->s6wAk zi48QKrG$iPG!A>+?O#P|o+!HA>=z^!C;9qo;zy0+Dh@Rva8q+_iNHK#vNy#wkiAn| z3>_uMK#px0SgcW>=$9KUEwjFo#owLyLE=i2$LNWz=XFX;k2*rLH^wk_0lzz(Wj~%V z!SY!lW_e;GftNZ7WGzb$<)(&--_K2tPR%XfI;pRIZdUg&XTGm^woaKsvwCz{3dKLv z z$-fuFg-#|+et9dh{Pwy!YJEz5&+XaEtPvXppbvWjpq*>9vw_ZSqH^kHE}{(OsiuW1 z^@$z5YeIorKkSZvVV9MD9A%Y~6xFyv6Kv>6o8qV?N9I$724!jd<1M=|EK0ELhMO6- z%*a;%-i;SIXHOVKH9hV)T9U{JC9-$G#>{5k1%fT_@e?2L?#SBuTkA4YX?ZNj$Im zu)4}j8q2KH)in{EWRf1L zKE=KN?qvCr!d=jx-G&l^ZY z7>wJG8P#rob6&fZ=PE_oOY3s>?S0WGRJshV;7bgJ<+^J#5tZv^dO6&E0%HwELX`@u z_pjW)vgNU$tCslqgjjY%(vc(j<`ktsl{T~(xf%Zxlt4Sv=cx&dP?#=_FYW8a7In@~ zjgRT@T%M4lRyuDE)i{f1665xzIn{-4n!O4se24TmsoeU$H$b8%OqJqH#t`}gJD+5C z^)WUpoysWE#^JZu3K`u&58b;P>4B(T2nXaYLV&D7QO92+igaL#+|>QhZ@F|FUufVo z#TPnwjWYs7&+n~4J^M0u3U{9QTy=HB!XAbwIlduI$HQFMc>qsFt`f3r?+j2vcXGMB z0HVNm1zl0)m3Ia7y*0p@A_=DEhM{*X`w;XG))hCR&~(-HQAX}XiY!M9VjRy9Yf(Dp zFE@{h9~N9!5mn1C%F;cv*`d6A>)t>^z(1|by!D$?gBn$OKgGB{rnA)_rE;Ou%ZoMF zP@@0iE#0F`_e?-^J;wJ!r2)Guc%nAj<92)YK^u$jvZvFJda*V>FL`%XQ2CsM`Kf@j zt#pr}03W5<2-rwBF2iw%$XUYNR;bhn{xQY*My!RglHb|gp+?k;BbgrsYcxTbV-7pjshV(xySGNdoQ20KUiG$b{t$|1n>>9(L{7La(u1(#yfSCN<#$ z!=xl=X!0moaN>?5q0Ez!e+SyjO}C@Pvsq00f)U)f@@^M!MNrH~6hQSQ5IkH`rpD}%F=&r%iQDt}`LK{UDi?lO|n5@{04xjOKZlY!_B z_awG{o`_D7y29`JgT_A05BqZw{ybc6sfN2qnUNJhXqXt$yzXJ!RV=g2VDKGO)43W}5cbC-d0gBygiQ9LhJg?6s*K@=o) z+GX$roVEAMqsc(c5KFE1;r(DH-}`rqLT?b9p50(!K^(W%j|k>^qcOthM4=1swKxQk z!|6rLD?C;-Iqj3(@Ol9mJXwB_NMm#@FYyQLmm1H-6+Z0A5b`0%fsFM@U&-tRPKZl{D=pSwDlsh$ zT2m&iUQDOgF+_FTtF7As2`quQ|=x(76{!3$;ci5#xgLmG`gxXQ=ESt7Ka8RmTzetz#NbuTVB&heW zzK|dOT`=u;=VF;EXpI(L+mU?vU2!J6%ns96hmqGK0R!;Tb7Xzer$)}8>Eqf0K*1!- zwYtyqwN7)6+JB6rvouT}XeR1Zny0aGn^)wBl-8Q3Idj%zr3=$`1A>VVXBhV6!rg|0 zLfP5w_2;((Z?+$~5NOx@QGO-+)s7Z$?US9W8ALOS+Yn&--|_!Eu^RZ zAmI*iyi^`@&*b=&6tJBGNfY<+336jQuWFjKU_(W{o+19o_lr5~sTf?OVtm^(KY*OT zuVgAnkkxedPMmOxExd{ZQ@NnHE0Hk;AINwc%X|H&^Je4IZZoNMgY+H-?Z@rc06rDh z*~zB>o#Zl#U28gO-OteWy-Y~)WF0=i(SZFLdy~aC1eGh6rhCYWpw&WkrBc78rlE)dLVy&vCB$RrLi?|hpD>+&&OV~&!|Sc zi4J^EJ;itj+>)D$y`^>L#8n0EXwcjK_(b5tq2fX>A2gHzE`9ac<(LqAJJ)iJ&!Zhs zsV&5ArM(%j-KUWNpfiQI=#MvOlC8aRg0kbgmc`s?~@`)3u ztnk%mi2BxYo*CF*y*j{a`K$MqD;i#hc!+lPEB-xgOy$#hTEu-B0bT&@RAWlMrPjclu+gDjmU*}%&bFXgU9YcULJNvpX z+`lkB|Md93FMw9*kNO#O#UF|-70%O`chp@%_4@`*bwih?xGkU3Mll@K$QTSA)hr}u zslIA=U}?luwz%MGxroH;r#6N6njUUTF)L9iHeCeRDU$!{g$KvO=FW@2I0)bD1+1UhNRL zDVe=(Sd-~TlC@Ma*@$>qm;8;e zp-W7QopbKrF$f*JEss_{T--5=e^gO{@V4`Fl9{Ee_2-MAU`3vG~DaDy7T4(QC^yx#_- zL5$R@2J?$VM}(x^o&Vl zILCyW0-(el-dBL;B>6qdO1X2Y=3My+OUdUs^pQ(;ipyu(W#3PHG4vbML8U;>cfo{rYwJP4S1g*9+F?dOna2H}=h4opEispXagP_unan zJXKwv{EWXn{iyxs?Qc21o$nUkRkz;!sKzCYKb`_Nc7=Y?igQc*QG){@;05-^BCT@%}J> zsJr)0oXCZ>UnW=FUYF){Ugt@H`EH@J4GfYtVe6Y}*ca4Te)v-*-}dE1$yzl_xzMfc z*3Oo9j!(Wb=UULJrIPM@_iWplWtKTl{x{=BU=>gkQT%&_>EAeu>%UijyXj|dRB!dm zYQrDt2esS}-}RKt=X*IJ)2eV@^yK_Jj(8(?edStjp?ax1YhIlVnX_cqbkWlyO_;}T zhF>y8-W(2Wq5yY?&jqa@;Hu}gTX_Mv#gY>^o^X9n{V$HySKWbg;1l$K+X~}80#__4 ztq;FGF92y{45Kq}5y}MH`+ti~?SP6De&_#svj0Q~)-ra!W2p{MYa6W1LJeAU_WJtc zT|n1{{3~6Gxc1pMPc4%gR@JP=v!iYOwEeh_;|pQbg)O22(jthB+$-)WdDNoL;t`34EO&td@MoZmwr7f zdj4ya{i)2i>34ogm%p6|Jgri{rT#lmJ@9Dm#pnX-*JUhzwWWTF?V8{7i7;EFHmv_s z`_}ozPjc^XbN|b*PY~#g-@t4A^m@_w`*tqsUv=?cz_-osYI?zw84JV@*#ni<1J5JB zh%Rvbx=A3&)pFNr&rjT5cT@fg(+}VrdHwtU3_o_6qw#+|>#|$z{?qx^`JJE4-rrXK z*AUO~pW*#qkPpQ}@_>id^~G-y+qy8SChOWB^@Ts!J@>Z<%|E=4zprd^_$;-%LjF2y z%8p1dg;qUY=8*bZu4~1^m9C#PUa1H9Ms5^!TEURO#~`vLFjftDOCl?8`9Fv6^>1AA9Mv;9Ca$8>I zOkA^ZlT|x#i+17!$2S?wPv-y7n74b`kLllhV*Nd?=$6KsJw763?Od5`be5+f-i-)@((&z0a*G%Wsm~}?aqs?h|toxl4dCl5ip1-;M z+v-QN`$yONN6&Xp|M>d8*w^YprlXu^=eNH(uXO5aaby3Z`bU;ep3ZErn~2n$BK2M^ zCZy&sN$jDx=i20FKtwdtEB+~a|F-(ua29jBD!p@)P`lBKE;0j8RjScXQ-9`xPX`6(th9mxpGDIwe?oO1**uBi~k9pwfH zC;xu^E2%vB_v@e6_DC}QKio~;|NJKZ{UY$d9EJ;@C;xu^^CH-pYk&m?lAJw~E0{+e zhzQ%!5Tr#KyjHeX$#l}gDgQoJ|Goqqkwau7U_#}%Hy3zm@8Qq(j~#l6%mIzFAejW5 z4@!ZpMviA-GyeUuf9(E8c~<$K-~7K{0H>xHGQgSS1vn>Mt4EfSN6N5_MCN6-c5p84 z2ZvEQhDQ*NaF7~xAlA}wGz96F28HgPn4h!R`ucnG`mF8nTEOiKsD_vctRRppfzjG* zw7P}1N=DlQ1X@(rVtS;HOnV~We)IP7Yrs8T$nCV1YfGxVC2LM3a{M?}lp$CK9Q!pU rdkm7o6Gw>YK8*l1j6c=_hhTuiFq4-7cb|7o+y5)J7I>Zx`~RB&kn;59 literal 0 HcmV?d00001 diff --git a/vllm/distributed/kv_transfer/kv_connector/__init__.py b/vllm/distributed/kv_transfer/kv_connector/__init__.py new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/vllm/distributed/kv_transfer/kv_connector/base.py b/vllm/distributed/kv_transfer/kv_connector/base.py new file mode 100644 index 0000000000000..6089e3babac3e --- /dev/null +++ b/vllm/distributed/kv_transfer/kv_connector/base.py @@ -0,0 +1,122 @@ +""" +KVConnectorBase Class for Distributed KV Cache & Hidden State communication + +The class provides two primary abstract methods: +1. send_kv_caches_and_hidden_states(): Send KV caches and hidden states +2. recv_kv_caches_and_hidden_states(): Recv KV caches and hidden states +""" + +from abc import ABC, abstractmethod +from typing import TYPE_CHECKING, List, Tuple, Union + +import torch + +from vllm.sequence import IntermediateTensors + +if TYPE_CHECKING: + from vllm.config import VllmConfig + from vllm.worker.model_runner import ModelInputForGPUWithSamplingMetadata + + +class KVConnectorBase(ABC): + """ + Abstract base class for a KV connector. + + The class provides two primary abstract methods: + 1. send_kv_caches_and_hidden_states(): Send KV caches and hidden states + 2. recv_kv_caches_and_hidden_states(): Recv KV caches and hidden states + """ + + @abstractmethod + def __init__( + self, + rank: int, + local_rank: int, + config: "VllmConfig", + ): + raise NotImplementedError + + @abstractmethod + def close(self) -> None: + """Close the buffer and release resources. + + This method is responsible for cleaning up resources related to the + connector when it is no longer needed. + + Raises: + NotImplementedError: This method must be implemented in subclasses. + """ + raise NotImplementedError + + @abstractmethod + def send_kv_caches_and_hidden_states( + self, + model_executable: torch.nn.Module, + model_input: "ModelInputForGPUWithSamplingMetadata", + kv_caches: List[torch.Tensor], + hidden_or_intermediate_states: Union[torch.Tensor, + IntermediateTensors], + ) -> None: + """ + Send KV caches and hidden states to the connector. + + This method processes the input tokens, KV caches, and + hidden/intermediate states for a given model and sends the data to the + decode instance. + + Args: + model_executable (torch.nn.Module): The model executable containing + start and end layer information. + model_input (ModelInputForGPUWithSamplingMetadata): The input + metadata from vLLM. + kv_caches (List[torch.Tensor]): List of KV caches (keys and values) + for each layer. + hidden_or_intermediate_states (Union[torch.Tensor, + IntermediateTensors]): + The hidden or intermediate states associated with the tokens. + + Returns: + None + + """ + + raise NotImplementedError + + @abstractmethod + def recv_kv_caches_and_hidden_states( + self, model_executable: torch.nn.Module, + model_input: "ModelInputForGPUWithSamplingMetadata", + kv_caches: List[torch.Tensor] + ) -> Tuple[Union[torch.Tensor, IntermediateTensors], bool, + "ModelInputForGPUWithSamplingMetadata"]: + """ + Receive KV caches and hidden states from the connector. + + This method attempts to retrieve KV caches and hidden states for input + tokens. If all required KV caches and hidden states are received, it + will bypass model input, else it will fall back to normal vLLM model + forwarding. + + Args: + model_executable (torch.nn.Module): + The model executable from vLLM modelrunner. + model_input (ModelInputForGPUWithSamplingMetadata): + The model input from vLLM modelrunner. + kv_caches (List[torch.Tensor]): + List of KV caches for each layer. + + Returns: + - hidden_or_intermediate_states (torch.Tensor or + IntermediateTensors): + Concatenated hidden states if all required data is retrieved, + otherwise `None`. + - bypass_model_exec (bool): + Indicates whether the model execution can be skipped (True) or + needs to be redone (False). + - model_input (ModelInputForGPUWithSamplingMetadata): + Optionally adjusted input metadata for re-execution when + `bypass_model_exec=False`. + + """ + + raise NotImplementedError diff --git a/vllm/distributed/kv_transfer/kv_connector/factory.py b/vllm/distributed/kv_transfer/kv_connector/factory.py new file mode 100644 index 0000000000000..3e2bb436d24b5 --- /dev/null +++ b/vllm/distributed/kv_transfer/kv_connector/factory.py @@ -0,0 +1,20 @@ +from typing import TYPE_CHECKING + +from .base import KVConnectorBase + +if TYPE_CHECKING: + from vllm.config import VllmConfig + + +class KVConnectorFactory: + + @staticmethod + def create_connector(rank: int, local_rank: int, + config: "VllmConfig") -> KVConnectorBase: + supported_kv_connector = ["PyNcclConnector", "MooncakeConnector"] + if config.kv_transfer_config.kv_connector in supported_kv_connector: + from .simple_connector import SimpleConnector + return SimpleConnector(rank, local_rank, config) + else: + raise ValueError(f"Unsupported connector type: " + f"{config.kv_connector}") diff --git a/vllm/distributed/kv_transfer/kv_connector/simple_connector.py b/vllm/distributed/kv_transfer/kv_connector/simple_connector.py new file mode 100644 index 0000000000000..4ace03ff1184e --- /dev/null +++ b/vllm/distributed/kv_transfer/kv_connector/simple_connector.py @@ -0,0 +1,312 @@ +""" +Simple KV Cache Connector for Distributed Machine Learning Inference + +The SimpleConnector transfers KV caches between prefill vLLM worker (KV cache +producer) and decode vLLM worker (KV cache consumer) using PyNcclPipe or +MooncakePipe. + +But the logic can be extended to support other pipe and lookup buffer. +""" +from typing import TYPE_CHECKING, List, Optional, Tuple, Union + +import torch + +from vllm import _custom_ops as ops +from vllm.config import VllmConfig +from vllm.distributed.kv_transfer.kv_connector.base import KVConnectorBase +from vllm.distributed.kv_transfer.kv_lookup_buffer.simple_buffer import ( + SimpleBuffer) +from vllm.logger import init_logger +from vllm.sequence import IntermediateTensors + +if TYPE_CHECKING: + from vllm.worker.model_runner import ModelInputForGPUWithSamplingMetadata + +logger = init_logger(__name__) + + +class SimpleConnector(KVConnectorBase): + + def __init__( + self, + rank: int, + local_rank: int, + config: VllmConfig, + ): + + self.config = config.kv_transfer_config + + if self.config.kv_connector == "PyNcclConnector": + from vllm.distributed.kv_transfer.kv_pipe.pynccl_pipe import ( + PyNcclPipe) + logger.info( + "Initializing PyNcclConfig under kv_transfer_config %s", + self.config) + elif self.config.kv_connector == "MooncakeConnector": + # Check if MOONCAKE_CONFIG_PATH is set + import os + use_mooncake_distributed_pipe = os.getenv( + 'MOONCAKE_CONFIG_PATH') is not None + + if not use_mooncake_distributed_pipe: + raise ValueError( + "To use MooncakeConnector, you need to pass the ENV: " + "'MOONCAKE_CONFIG_PATH=/path/to/mooncake_config.json'.") + else: + from vllm.distributed.kv_transfer.kv_pipe.mooncake_pipe import ( # noqa: E501 + MooncakePipe) + logger.info( + "Initializing MooncakeConfig under kv_transfer_config %s", + self.config) + + self.lookup_buffer_size = self.config.kv_buffer_size + + self.producer_buffer: Optional[SimpleBuffer] = None + self.consumer_buffer: Optional[SimpleBuffer] = None + + self.producer_data_pipe: Union[PyNcclPipe, MooncakePipe] + self.consumer_data_pipe: Union[PyNcclPipe, MooncakePipe] + self.producer_signal_pipe: Union[PyNcclPipe, MooncakePipe] + self.consumer_signal_pipe: Union[PyNcclPipe, MooncakePipe] + + # 2 pipes for every rank in the world + port_offset_base = 2 * rank + + # In disaggregated prefill, the prefill vLLM only uses send pipe + # and the decode vLLM only uses recv pipe + if self.config.is_kv_producer: + + if self.config.kv_connector == "PyNcclConnector": + self.producer_data_pipe = PyNcclPipe( + local_rank=local_rank, + config=self.config, + port_offset=port_offset_base, + ) + self.producer_signal_pipe = PyNcclPipe( + local_rank=local_rank, + config=self.config, + port_offset=port_offset_base + 1, + device="cpu", + ) + elif self.config.kv_connector == "MooncakeConnector": + self.producer_data_pipe = MooncakePipe( + local_rank=local_rank, + config=self.config, + ) + # We only need to initialize MooncakePipe once + self.producer_signal_pipe = self.producer_data_pipe + + self.producer_buffer = SimpleBuffer(self.producer_signal_pipe, + self.producer_data_pipe, + self.config.kv_buffer_size) + + else: + + # the current vLLM instance is KV consumer, so it needs to connect + # its recv pipe to the send pipe of KV producder + if self.config.kv_connector == "PyNcclConnector": + self.consumer_data_pipe = PyNcclPipe( + local_rank=local_rank, + config=self.config, + port_offset=port_offset_base, + ) + self.consumer_signal_pipe = PyNcclPipe( + local_rank=local_rank, + config=self.config, + port_offset=port_offset_base + 1, + device="cpu", + ) + elif self.config.kv_connector == "MooncakeConnector": + self.consumer_data_pipe = MooncakePipe( + local_rank=local_rank, + config=self.config, + ) + self.consumer_signal_pipe = self.consumer_data_pipe + + self.consumer_buffer = SimpleBuffer( + self.consumer_signal_pipe, + self.consumer_data_pipe, + self.config.kv_buffer_size, + ) + + def select(self, input_tokens: Optional[torch.Tensor], + roi: Optional[torch.Tensor]) -> List[Optional[torch.Tensor]]: + + assert self.consumer_buffer is not None, "Please initialize the "\ + "consumer buffer before calling select." + return self.consumer_buffer.drop_select(input_tokens, roi) + + def insert(self, input_tokens: torch.Tensor, roi: torch.Tensor, + key: torch.Tensor, value: torch.Tensor, + hidden: torch.Tensor) -> None: + + assert self.producer_buffer is not None, "Please initialize the "\ + "producer buffer before calling insert." + + self.producer_buffer.insert(input_tokens, roi, key, value, hidden) + + def send_kv_caches_and_hidden_states( + self, + model_executable: torch.nn.Module, + model_input: "ModelInputForGPUWithSamplingMetadata", + kv_caches: List[torch.Tensor], + hidden_or_intermediate_states: Union[torch.Tensor, + IntermediateTensors], + ) -> None: + + input_tokens_tensor = model_input.input_tokens + seq_lens = model_input.attn_metadata.seq_lens + slot_mapping_flat = model_input.attn_metadata.slot_mapping.flatten() + start_layer = model_executable.model.start_layer + end_layer = model_executable.model.end_layer + + model_config = model_executable.model.config + num_heads = model_config.num_key_value_heads + hidden_size = model_config.hidden_size + num_attention_heads = model_config.num_attention_heads + head_size = int(hidden_size / num_attention_heads) + + # query_lens contains new KV caches that are added to vLLM. + # so we will send them to decode instance + # FIXME(Kuntai): This assume that all requests are prefill. + for idx, slen in enumerate(seq_lens): + start_pos = sum(seq_lens[:idx]) + end_pos = start_pos + slen + current_tokens = input_tokens_tensor[start_pos:end_pos] + + keys, values = [], [] + + for layer_id in range(start_layer, end_layer): + kv_cache = kv_caches[layer_id - start_layer] + + key_cache = kv_cache[0].reshape(-1, num_heads, head_size) + value_cache = kv_cache[1].reshape(-1, num_heads, head_size) + + current_slot_mapping = slot_mapping_flat[start_pos:end_pos] + + keys.append(key_cache[current_slot_mapping].unsqueeze(0)) + values.append(value_cache[current_slot_mapping].unsqueeze(0)) + + keys = torch.cat(keys, dim=0) + values = torch.cat(values, dim=0) + + self.insert(current_tokens, + torch.ones_like(current_tokens, + dtype=bool), keys, values, + hidden_or_intermediate_states[start_pos:end_pos]) + + logger.debug("[rank%d]: KV send DONE.", torch.distributed.get_rank()) + + def recv_kv_caches_and_hidden_states( + self, model_executable: torch.nn.Module, + model_input: "ModelInputForGPUWithSamplingMetadata", + kv_caches: List[torch.Tensor] + ) -> Tuple[Union[torch.Tensor, IntermediateTensors], bool, + "ModelInputForGPUWithSamplingMetadata"]: + + # When bypass_model_exec is set to False, it means that at least for one + # request its corresponding KV cache or hidden state is missing. + # In this case we need to do prefilling to recompute missing KV cache + # and hidden states. + bypass_model_exec = True + + input_tokens_tensor = model_input.input_tokens + seq_lens = model_input.attn_metadata.seq_lens + slot_mapping = model_input.attn_metadata.slot_mapping.flatten() + + hidden_or_intermediate_states_for_one_req = [] + + input_tokens_list = [] + num_computed_tokens_list = [] + start_pos_list = [] + + # enumerate different requests + # FIXME(Kuntai): This impl assumes that all requests are prefill. + for idx, slen in enumerate(seq_lens): + + start_pos = sum(seq_lens[:idx]) + end_pos = start_pos + slen + current_tokens = input_tokens_tensor[start_pos:end_pos] + num_tokens = slen + + # collecting data for rebuilding the input + input_tokens_list.append(current_tokens) + start_pos_list.append(start_pos) + + ret = self.select(current_tokens, + torch.ones_like(current_tokens, dtype=bool)) + if ret[0] is None: + # didn't find any match. + bypass_model_exec = False + num_computed_tokens_list.append(0) + continue + + roi: torch.Tensor = ret[1] + keys: torch.Tensor = ret[2] + values: torch.Tensor = ret[3] + hidden: torch.Tensor = ret[4] + + num_computed_tokens = roi.shape[0] + num_computed_tokens_list.append(num_computed_tokens) + + # check if both KV cache and the hidden states are received + # If not, need to redo the forwarding to compute missing states + if not all([(num_computed_tokens == num_tokens), hidden is not None + ]): + bypass_model_exec = False + + # update the end position based on how many tokens are cached. + end_pos = start_pos + num_computed_tokens + + # put received KV caches into paged memory + for i in range(model_executable.model.start_layer, + model_executable.model.end_layer): + + kv_cache = kv_caches[i - model_executable.model.start_layer] + layer = model_executable.model.layers[i] + + key_cache, value_cache = kv_cache[0], kv_cache[1] + ops.reshape_and_cache_flash( + keys[i - model_executable.model.start_layer].to( + key_cache.device), + values[i - model_executable.model.start_layer].to( + value_cache.device), + key_cache, + value_cache, + slot_mapping[start_pos:end_pos], + layer.self_attn.attn.kv_cache_dtype, + layer.self_attn.attn._k_scale, + layer.self_attn.attn._v_scale, + ) + + hidden_or_intermediate_states_for_one_req.append(hidden) + + if not bypass_model_exec: + # Some of the KV cache is not retrieved + # Here we will fall back to normal model forwarding + # But optionally you can adjust model_input so that you only do + # prefilling on those tokens that are missing KV caches. + logger.debug( + "[rank%d]: Failed to receive all KVs and hidden " + "states, redo model forwarding.", torch.distributed.get_rank()) + hidden_or_intermediate_states = None + + else: + logger.debug( + "[rank%d]: Successfully received all KVs and hidden " + "states, skip model forwarding.", torch.distributed.get_rank()) + hidden_or_intermediate_states = torch.cat( + hidden_or_intermediate_states_for_one_req, dim=0) + + return hidden_or_intermediate_states, bypass_model_exec, model_input + + def close(self): + self.producer_data_pipe.close() + self.consumer_data_pipe.close() + if self.config.kv_connector == "PyNcclConnector": + self.producer_signal_pipe.close() + self.consumer_signal_pipe.close() + elif self.config.kv_connector == "MooncakeConnector": + # MooncakePipe reuses data_pipe for signal_pipe, so we only have to + # close the data_pipe. + pass diff --git a/vllm/distributed/kv_transfer/kv_lookup_buffer/__init__.py b/vllm/distributed/kv_transfer/kv_lookup_buffer/__init__.py new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/vllm/distributed/kv_transfer/kv_lookup_buffer/base.py b/vllm/distributed/kv_transfer/kv_lookup_buffer/base.py new file mode 100644 index 0000000000000..bad119a1aa929 --- /dev/null +++ b/vllm/distributed/kv_transfer/kv_lookup_buffer/base.py @@ -0,0 +1,108 @@ +""" +This file contains a new class `KVLookupBufferBase` that allows developers to +think of KV cache operations as inserting new KV cache entries (`insert`) +into the lookup buffer and querying existing KV caches (`drop_select`) +from the lookup buffer. + +All distributed communications are abstracted behind this class. +""" + +from abc import ABC, abstractmethod +from typing import List, Optional + +import torch + + +class KVLookupBufferBase(ABC): + """ + Abstract base class for a lookup buffer. + + This class provides an abstraction for a key-value (KV) cache lookup buffer. + + The key of the lookup buffer: + - input_tokens: token IDs of the request + - roi: a binary mask on top of input_tokens. + - Purpose of roi: Since KV cache may only be available for a subset of + tokens in the input (for example, when vLLM is connected to an external + KV cache service), roi specifies the subset of tokens that the KV cache + is associated with. + - NOTE: roi can be further extended to describe which part of KV the + current process is holding (each process may only hold a part of KV + due to TP and PP). This is not implemented for now. + + The value of the lookup buffer: + - key: the key tensor in the KV cache + - value: the value tensor in the KV cache + - hidden: the final hidden state generated by model forwarding. This allows + vLLM to bypass further model forwarding by transmitting the hidden state. + """ + + @abstractmethod + def insert(self, input_tokens: torch.Tensor, roi: torch.Tensor, + key: torch.Tensor, value: torch.Tensor, + hidden: torch.Tensor) -> None: + """Insert into the lookup buffer. + + The functionality is similar to the following python statement + ``` + buffer[input_tokens, roi] = [key, value, hidden] + ``` + + FIXME: in the future, we should only have two arguments, key and value, + where key is a tensor dict and value is a tensor dict. + + FIXME: we should transmit both sampler outputs and the hidden states. + + Args: + input_tokens (torch.Tensor): token IDs. + roi (torch.Tensor): A binary mask on top of the input tokens + key (torch.Tensor): The key tensor in the KV cache. + value (torch.Tensor): The value tensor in the KV cache. + hidden (torch.Tensor): The final hidden state tensor generated + during model forwarding to bypass model + forwarding. + + Raises: + NotImplementedError: This method must be implemented in subclasses. + """ + raise NotImplementedError + + @abstractmethod + def drop_select( + self, input_tokens: Optional[torch.Tensor], + roi: Optional[torch.Tensor]) -> List[Optional[torch.Tensor]]: + """Select and *drop* KV cache entries from the lookup buffer. + + The functionality is similar to the following python statements + ``` + ret = buffer.pop(input_tokens, roi) + return ret + ``` + + If `input_tokens` and `roi` is `None`, it means selecting any of the + KV caches in the buffer, return, and remove it from the buffer, useful + when offloading KV cache to KV cache storage service. + + Args: + input_tokens (torch.Tensor): token IDs. + roi (torch.Tensor): A binary mask on top of the input tokens + + Returns: + List[Optional[torch.Tensor]]: A list of tensors. Can be None. + + Raises: + NotImplementedError: This method must be implemented in subclasses. + """ + raise NotImplementedError + + @abstractmethod + def close(self) -> None: + """Close the buffer and release resources. + + This method is responsible for cleaning up resources related to the + lookup buffer when it is no longer needed. + + Raises: + NotImplementedError: This method must be implemented in subclasses. + """ + raise NotImplementedError diff --git a/vllm/distributed/kv_transfer/kv_lookup_buffer/simple_buffer.py b/vllm/distributed/kv_transfer/kv_lookup_buffer/simple_buffer.py new file mode 100644 index 0000000000000..fe8d8d7375f36 --- /dev/null +++ b/vllm/distributed/kv_transfer/kv_lookup_buffer/simple_buffer.py @@ -0,0 +1,242 @@ +""" + Implements a distributed key-value (KV) cache transfer mechanism. + + Key Features: + - Distributed KV cache transmission using PyNccl pipes. + - Non-blocking `insert`, blocking `drop_select`. + - Use CPU signal pipe to avoid racing condition + - Handles buffer size constraints and provide backpressure mechanism to + stop the prefill instance when the decode instance is slow. +""" +import threading +import time +from collections import deque +from typing import Deque, List, Optional, Union + +import torch + +from vllm.distributed.kv_transfer.kv_lookup_buffer.base import ( + KVLookupBufferBase) +from vllm.distributed.kv_transfer.kv_pipe.base import KVPipeBase +from vllm.logger import init_logger + +logger = init_logger(__name__) + + +class SimpleBuffer(KVLookupBufferBase): + + def __init__(self, signal_pipe: KVPipeBase, data_pipe: KVPipeBase, + buffer_size_thresh: float): + """ + signal_pipe: on CPU + + NOTE: on-device recv will block all threads in the process, making the + KV cache producer unable to listen to new request while transmitting + KV cache. Luckily CPU recv only blocks the current thread so we use + CPU recv to listen to new request. + + data_pipe: on device (e.g. GPU) + """ + + self.buffer: Deque[List[torch.Tensor]] = deque() + + self.buffer_size = 0 + self.buffer_size_threshold = buffer_size_thresh + self.buffer_lock = threading.Lock() + self.signal_pipe = signal_pipe + self.data_pipe = data_pipe + self.request_handling_thread: Optional[threading.Thread] = None + + self.normal_signal = torch.tensor([0], device="cpu") + self.end_signal = None + + def _matches(self, tokens_roi_sender: List[torch.Tensor], + tokens_roi_recver: List[torch.Tensor]): + + # tokens_roi_sender: tokens and roi of the producer (in the buffer) + # tokens_roi_recver: tokens and roi of the consumer (query) + + tokens_sender = tokens_roi_sender[0] + tokens_recver = tokens_roi_recver[0] + roi_sender = tokens_roi_sender[1] + roi_recver = tokens_roi_recver[1] + + if tokens_recver is None: + # consumer sends an empty request + # semantics: DROP SELECT * LIMIT 1 + # so any of the data in the buffer can be drop-selected + return True + + # Assuming that roi is a binary mask on tokens + tokens_sender = tokens_sender[roi_sender] + tokens_recver = tokens_recver[roi_recver] + + # simple common prefix matching + min_length = min(len(tokens_sender), len(tokens_recver)) + if torch.allclose(tokens_sender[:min_length], + tokens_recver[:min_length]): + return min_length + + return 0 + + def _send_tensor_and_dec_size(self, + tensor: Optional[torch.Tensor]) -> None: + + assert tensor is not None, "Use self.data_pipe.send(None) instead" + self.buffer_size -= tensor.element_size() * tensor.numel() + if tensor.dtype == torch.bool: + tensor = tensor.float() + self.data_pipe.send_tensor(tensor) + + def _get_element_size(self, data: Optional[Union[List, torch.Tensor]]): + + if isinstance(data, torch.Tensor): + return data.element_size() * data.numel() + if not data: + # cannot perform `not data` on a tensor + # so this check needs to go after the check above + return 0 + + raise AssertionError(f"Unknown data type {type(data)}") + + def _add_to_buffer(self, input_tokens: torch.Tensor, roi: torch.Tensor, + key: torch.Tensor, value: torch.Tensor, + hidden: torch.Tensor): + + if isinstance(input_tokens, torch.Tensor): + input_tokens = input_tokens.clone() + if isinstance(roi, torch.Tensor): + roi = roi.clone() + if isinstance(key, torch.Tensor): + key = key.clone() + if isinstance(value, torch.Tensor): + value = value.clone() + if isinstance(hidden, torch.Tensor): + hidden = hidden.clone() + + buffer_item = [input_tokens, roi, key, value, hidden] + + with self.buffer_lock: + for data in buffer_item: + self.buffer_size += self._get_element_size(data) + self.buffer.append(buffer_item) + + def _is_end_signal(self, signal): + return signal is None + + def drop_select_handler(self): + + try: + + while True: + signal = self.signal_pipe.recv_tensor() + if self._is_end_signal(signal): + logger.info("Received end signal!") + break + + input_tokens = self.data_pipe.recv_tensor() + + roi = self.data_pipe.recv_tensor() + assert roi is not None, "Please provide the roi when sending "\ + "drop-select request" + roi = (roi > 0.5) + tokens_roi_recver = [input_tokens, roi] + + matched_length = 0 + + # perform input tokens and roi matching + # FIXME: this matching is O(n), ideally it should be O(1) + # but this buffer size won't (and shouldn't) be too large so + # the fix is not urgent. + with self.buffer_lock: + + for _ in range(len(self.buffer)): + + temp_length = self._matches(self.buffer[0], + tokens_roi_recver) + if temp_length > 0: + matched_length = temp_length + break + # rotate the element we just accessed to the end + self.buffer.rotate(-1) + + if matched_length > 0: + # need to clone the tensor + # in case the tensor is freed before sending finishes + matched_item = self.buffer.popleft() + for tensor in matched_item: + self._send_tensor_and_dec_size(tensor) + + else: + # no match, just send None + for _ in range(5): + self.data_pipe.send_tensor(None) + + except RuntimeError as e: + if 'Connection closed by peer' not in str(e): + raise e + + logger.debug("Closing drop_select_handler") + + def drop_select( + self, input_tokens: Optional[torch.Tensor], + roi: Optional[torch.Tensor]) -> List[Optional[torch.Tensor]]: + + assert self.request_handling_thread is None, \ + "drop_select should be called by the KV cache consumer "\ + "(e.g. the decode vLLM instance)" + + if isinstance(input_tokens, torch.Tensor): + input_tokens = input_tokens.clone() + if isinstance(roi, torch.Tensor): + roi = roi.clone().float() + + self.signal_pipe.send_tensor(self.normal_signal) + self.data_pipe.send_tensor(input_tokens) + self.data_pipe.send_tensor(roi) + + input_tokens = self.data_pipe.recv_tensor() + roi = self.data_pipe.recv_tensor() + if roi is not None: + # convert from float tensor to bool tensor + # as PyNccl does not support sending bool tensor + roi = (roi > 0.5) + key = self.data_pipe.recv_tensor() + value = self.data_pipe.recv_tensor() + hidden = self.data_pipe.recv_tensor() + + return [input_tokens, roi, key, value, hidden] + + def full_handler(self): + time.sleep(0.001) + + def insert(self, input_tokens: torch.Tensor, roi: torch.Tensor, + key: torch.Tensor, value: torch.Tensor, + hidden: torch.Tensor) -> None: + + if self.buffer_size > self.buffer_size_threshold: + # log outside the while loop to avoid this message being logged + # repeatedly. + logger.debug("KV transfer buffer is full. Handling...") + while self.buffer_size > self.buffer_size_threshold: + self.full_handler() + + self._add_to_buffer(input_tokens, roi, key, value, hidden) + + # when calling the insert, the current process is a sender + # need to launch the request handler and start listening to request. + if self.request_handling_thread is None: + self.request_handling_thread = threading.Thread( + target=self.drop_select_handler) + self.request_handling_thread.start() + + def close(self): + + if hasattr(self, "request_handling_thread" + ) and self.request_handling_thread is not None: + self.request_handling_thread.join() + + else: + # TODO: have a explicit close signal and have a explicit way to + # check if it's requester + self.signal_pipe.send_tensor(self.end_signal) diff --git a/vllm/distributed/kv_transfer/kv_pipe/__init__.py b/vllm/distributed/kv_transfer/kv_pipe/__init__.py new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/vllm/distributed/kv_transfer/kv_pipe/base.py b/vllm/distributed/kv_transfer/kv_pipe/base.py new file mode 100644 index 0000000000000..4b0cb44cc5b81 --- /dev/null +++ b/vllm/distributed/kv_transfer/kv_pipe/base.py @@ -0,0 +1,65 @@ +""" +This file defines an interface `KVPipeBase` +that provides an abstraction for sending and receiving tensors, or None, via +distributed communications. + +All classes instantiated from this interface are assumed to be a FIFO pipe. + +If your distributed communication platform already supports key-value lookup, +you can bypass this interface and directly start from `kv_lookup_buffer`. +""" + +from abc import ABC, abstractmethod +from typing import Optional + +import torch + + +class KVPipeBase(ABC): + """ + This class provides an interface for sending and receiving tensors, or + None, by distributed communications. + """ + + @abstractmethod + def send_tensor(self, tensor: Optional[torch.Tensor]) -> None: + """Send a tensor, or None, via the pipe. + + Need to support sending None -- important for error handling. + + TODO: add a `key` argument so that we can use traditional + key-value database as the distributed communication mechanism behind + the pipe. + + Args: + tensor (Optional[torch.Tensor]): The tensor to be sent. Can be None. + + Raises: + NotImplementedError: This method must be implemented in subclasses. + """ + raise NotImplementedError + + @abstractmethod + def recv_tensor(self) -> Optional[torch.Tensor]: + """Receive a tensor (can be None) from the pipeline. + + Returns: + Optional[torch.Tensor]: The tensor received from the pipeline. Can + be None. + + Raises: + NotImplementedError: This method must be implemented in subclasses. + """ + raise NotImplementedError + + @abstractmethod + def close(self) -> None: + """Close the pipeline and release resources. + + This method is responsible for closing the communication pipeline + and releasing any resources associated with it. + + Raises: + NotImplementedError: This method must be implemented in subclasses. + """ + raise NotImplementedError diff --git a/vllm/distributed/kv_transfer/kv_pipe/mooncake_pipe.py b/vllm/distributed/kv_transfer/kv_pipe/mooncake_pipe.py new file mode 100644 index 0000000000000..8e4358672b74d --- /dev/null +++ b/vllm/distributed/kv_transfer/kv_pipe/mooncake_pipe.py @@ -0,0 +1,272 @@ +import json +import os +import pickle +from concurrent.futures import ThreadPoolExecutor +from dataclasses import dataclass +from typing import Optional, Union + +import torch +import zmq + +from vllm.config import KVTransferConfig +from vllm.distributed.kv_transfer.kv_pipe.base import KVPipeBase +from vllm.logger import init_logger + +logger = init_logger(__name__) +NONE_INT = -150886311 + + +@dataclass +class MooncakeTransferEngineConfig: + prefill_url: str + decode_url: str + metadata_backend: Union[str, None] + metadata_server: str + protocol: str + device_name: str + + @staticmethod + def from_file(file_path: str) -> 'MooncakeTransferEngineConfig': + """Load the config from a JSON file.""" + with open(file_path) as fin: + config = json.load(fin) + return MooncakeTransferEngineConfig( + prefill_url=config.get("prefill_url"), + decode_url=config.get("decode_url"), + metadata_backend=config.get("metadata_backend", None), + metadata_server=config.get("metadata_server"), + protocol=config.get("protocol", "tcp"), + device_name=config.get("device_name", ""), + ) + + @staticmethod + def load_from_env() -> 'MooncakeTransferEngineConfig': + """Load config from a file specified in the environment variable.""" + config_file_path = os.getenv('MOONCAKE_CONFIG_PATH') + if config_file_path is None: + raise ValueError( + "The environment variable 'MOONCAKE_CONFIG_PATH' is not set.") + return MooncakeTransferEngineConfig.from_file(config_file_path) + + +class MooncakeTransferEngine: + """Handles the transfer of data using mooncake_vllm_adaptor and ZeroMQ.""" + + def __init__(self, kv_rank: int, local_rank: int): + try: + import mooncake_vllm_adaptor as mva + except ImportError as e: + raise ImportError( + "Please install mooncake by following the instructions at " + "https://github.com/kvcache-ai/Mooncake/blob/main/doc/en/build.md " # noqa: E501 + "to run vLLM with MooncakeConnector.") from e + + self.engine = mva.mooncake_vllm_adaptor() + self.local_rank = local_rank + + try: + self.config = MooncakeTransferEngineConfig.load_from_env() + logger.info("Mooncake Configuration loaded successfully.") + except ValueError as e: + logger.error(e) + raise + except Exception as exc: + logger.error( + "An error occurred while loading the configuration: %s", exc) + raise + prefill_host, base_prefill_port = self.config.prefill_url.split(':') + decode_host, base_decode_port = self.config.decode_url.split(':') + + # Avoid ports conflict when running prefill and decode on the same node + if prefill_host == decode_host and \ + base_prefill_port == base_decode_port: + base_decode_port = str(int(base_decode_port) + 100) + + prefill_port = int(base_prefill_port) + self.local_rank + decode_port = int(base_decode_port) + self.local_rank + self.prefill_url = ':'.join([prefill_host, str(prefill_port)]) + self.decode_url = ':'.join([decode_host, str(decode_port)]) + + self.initialize(self.prefill_url if kv_rank == 0 else self.decode_url, + self.config.metadata_server, self.config.protocol, + self.config.device_name, self.config.metadata_backend) + + self.remote_url = (self.decode_url + if kv_rank == 0 else self.prefill_url) + + # Initialize ZeroMQ context and sockets + self.context = zmq.Context() # type: ignore[attr-defined] + self.sender_socket = self.context.socket(zmq.constants.PUSH) + self.receiver_socket = self.context.socket(zmq.constants.PULL) + self.sender_ack = self.context.socket(zmq.constants.PULL) + self.receiver_ack = self.context.socket(zmq.constants.PUSH) + + self.buffer_cleaner = ThreadPoolExecutor(max_workers=1) + self._setup_metadata_sockets(kv_rank, prefill_host, base_prefill_port, + decode_host, base_decode_port) + + def _setup_metadata_sockets(self, kv_rank: int, p_host: str, p_port: str, + d_host: str, d_port: str) -> None: + """Set up ZeroMQ sockets for sending and receiving data.""" + # Offsets < 8 are left for initialization in case tp and pp are enabled + p_rank_offset = int(p_port) + 8 + self.local_rank * 2 + d_rank_offset = int(d_port) + 8 + self.local_rank * 2 + if kv_rank == 0: + self.sender_socket.bind(f"tcp://*:{p_rank_offset + 1}") + self.receiver_socket.connect(f"tcp://{d_host}:{d_rank_offset + 1}") + self.sender_ack.connect(f"tcp://{d_host}:{d_rank_offset + 2}") + self.receiver_ack.bind(f"tcp://*:{p_rank_offset + 2}") + else: + self.receiver_socket.connect(f"tcp://{p_host}:{p_rank_offset + 1}") + self.sender_socket.bind(f"tcp://*:{d_rank_offset + 1}") + self.receiver_ack.bind(f"tcp://*:{d_rank_offset + 2}") + self.sender_ack.connect(f"tcp://{p_host}:{p_rank_offset + 2}") + + def initialize(self, local_hostname: str, metadata_server: str, + protocol: str, device_name: str, + metadata_backend: Union[str, None]) -> None: + """Initialize the mooncake instance.""" + if metadata_backend is None: + self.engine.initialize(local_hostname, metadata_server, protocol, + device_name) + else: + supported_backend = ["etcd", "redis"] + metadata_backend = metadata_backend.lower() + if metadata_backend not in supported_backend: + raise ValueError( + "Mooncake Configuration error. `metadata_backend`" + f"should be one of {supported_backend}.") + + self.engine.initializeExt(local_hostname, metadata_server, + protocol, device_name, metadata_backend) + + def allocate_managed_buffer(self, length: int) -> int: + """Allocate a managed buffer of the specified length.""" + ret = self.engine.allocateManagedBuffer(length) + if ret <= 0: + logger.error("Allocation Return Error") + raise Exception("Allocation Return Error") + return ret + + def free_managed_buffer(self, buffer: int, length: int) -> int: + """Free a previously allocated managed buffer.""" + return self.engine.freeManagedBuffer(buffer, length) + + def transfer_sync(self, buffer: int, peer_buffer_address: int, + length: int) -> int: + """Synchronously transfer data to the specified address.""" + ret = self.engine.transferSync(self.remote_url, buffer, + peer_buffer_address, length) + if ret < 0: + logger.error("Transfer Return Error") + raise Exception("Transfer Return Error") + return ret + + def write_bytes_to_buffer(self, buffer: int, user_data: bytes, + length: int) -> int: + """Write bytes to the allocated buffer.""" + return self.engine.writeBytesToBuffer(buffer, user_data, length) + + def read_bytes_from_buffer(self, buffer: int, length: int) -> bytes: + """Read bytes from the allocated buffer.""" + return self.engine.readBytesFromBuffer(buffer, length) + + def wait_for_ack(self, src_ptr: int, length: int) -> None: + """Asynchronously wait for ACK from the receiver.""" + ack = self.sender_ack.recv_pyobj() + if ack != b'ACK': + logger.error("Failed to receive ACK from the receiver") + + self.free_managed_buffer(src_ptr, length) + + def send_bytes(self, user_data: bytes) -> None: + """Send bytes to the remote process.""" + length = len(user_data) + src_ptr = self.allocate_managed_buffer(length) + self.write_bytes_to_buffer(src_ptr, user_data, length) + self.sender_socket.send_pyobj((src_ptr, length)) + self.buffer_cleaner.submit(self.wait_for_ack, src_ptr, length) + + def recv_bytes(self) -> bytes: + """Receive bytes from the remote process.""" + src_ptr, length = self.receiver_socket.recv_pyobj() + dst_ptr = self.allocate_managed_buffer(length) + self.transfer_sync(dst_ptr, src_ptr, length) + ret = self.read_bytes_from_buffer(dst_ptr, length) + + # Buffer cleanup + self.receiver_ack.send_pyobj(b'ACK') + self.free_managed_buffer(dst_ptr, length) + + return ret + + +class MooncakePipe(KVPipeBase): + """MooncakeTransferEngine based Pipe implementation.""" + + def __init__(self, + local_rank: int, + config: KVTransferConfig, + device: Optional[str] = None): + """Initialize the mooncake pipe and set related parameters.""" + self.config = config + self.local_rank = local_rank + self.kv_rank = self.config.kv_rank + if device is None: + self.device = self._select_device(self.config.kv_buffer_device) + else: + self.device = self._select_device(device) + + self.transfer_engine = MooncakeTransferEngine(self.kv_rank, + self.local_rank) + self.transport_thread: Optional[ThreadPoolExecutor] = None + self.none_tensor = torch.tensor([NONE_INT], device=self.device) + + def _select_device(self, device: str) -> torch.device: + """Select available device (CUDA or CPU).""" + logger.info("Selecting device: %s", device) + if device == "cuda": + return torch.device(f"cuda:{self.local_rank}") + else: + return torch.device("cpu") + + def tensor_hash(self, tensor: torch.Tensor) -> int: + """Calculate the hash value of the tensor.""" + return hash(tensor.data_ptr()) + + def _send_impl(self, tensor: torch.Tensor) -> None: + """Implement the tensor sending logic.""" + value_bytes = pickle.dumps(tensor) + self.transfer_engine.send_bytes(value_bytes) + + def _recv_impl(self) -> torch.Tensor: + """Implement the tensor receiving logic.""" + data = self.transfer_engine.recv_bytes() + return pickle.loads(data) + + def send_tensor(self, tensor: Optional[torch.Tensor]) -> None: + """Send tensor to the target process.""" + if self.transport_thread is None: + self.transport_thread = ThreadPoolExecutor(max_workers=1) + tensor = tensor if tensor is not None else self.none_tensor + assert (len(tensor.shape) > 0) + self.transport_thread.submit(self._send_impl, tensor) + + def recv_tensor(self) -> Optional[torch.Tensor]: + """Receive tensor from other processes.""" + if self.transport_thread is None: + self.transport_thread = ThreadPoolExecutor(max_workers=1) + tensor = self.transport_thread.submit(self._recv_impl).result() + if tensor.numel() == 1 and tensor.item() == NONE_INT: + return None + else: + return tensor + + def close(self) -> None: + """Cleanup logic when closing the pipe.""" + self.transfer_engine.sender_socket.close() + self.transfer_engine.receiver_socket.close() + self.transfer_engine.sender_ack.close() + self.transfer_engine.receiver_ack.close() + self.transfer_engine.context.term() # Terminate the ZMQ context + logger.info("Closed the transfer engine and cleaned up resources.") diff --git a/vllm/distributed/kv_transfer/kv_pipe/pynccl_pipe.py b/vllm/distributed/kv_transfer/kv_pipe/pynccl_pipe.py new file mode 100644 index 0000000000000..98222fa67e492 --- /dev/null +++ b/vllm/distributed/kv_transfer/kv_pipe/pynccl_pipe.py @@ -0,0 +1,276 @@ +""" + This module implements a PyNccl pipe for sending and receiving + Optional[torch.Tensor] between distributed ranks with advanced + communication features. + + Key Features: + - Supports sending and receiving tensors with metadata + - Handles both CUDA and CPU device communications + - Implements a non-blocking tensor transfer mechanism + - Manages buffer size and provides backpressure control + - Supports distributed process groups with configurable parameters +""" + +import threading +import time +from concurrent.futures import ThreadPoolExecutor +from typing import Callable, Dict, Optional, Tuple + +import torch + +from vllm.config import KVTransferConfig +from vllm.distributed.device_communicators.pynccl import PyNcclCommunicator +from vllm.distributed.kv_transfer.kv_pipe.base import KVPipeBase +from vllm.distributed.utils import StatelessProcessGroup +from vllm.logger import init_logger + +logger = init_logger(__name__) + + +class BrokenPipeException(Exception): + + def __init__(self, message): + self.message = message + super().__init__(self.message) + + +Metadata = Dict[str, Optional[torch.Tensor]] + + +class PyNcclPipe(KVPipeBase): + + METADATA_LENGTH = 16 + MAX_TENSOR_DIMENSIONS = 14 + METADATA_DTYPE = torch.int64 + + def __init__(self, + local_rank: int, + config: KVTransferConfig, + device: Optional[str] = None, + port_offset: int = 0): + self.config = config + self.local_rank = local_rank + self.kv_rank = self.config.kv_rank + self.kv_parallel_size = self.config.kv_parallel_size + if device is None: + self.device = self._select_device(self.config.kv_buffer_device) + else: + self.device = self._select_device(device) + + # build distributed connection and send/recv implementation + self.group = StatelessProcessGroup.create( + host=self.config.kv_ip, + port=self.config.kv_port + port_offset, + rank=self.kv_rank, + world_size=self.kv_parallel_size, + ) + # add a barrier to make sure the connection is initiated properly + self.group.barrier() + impl = self._get_device_send_recv_impl(self.group) + self.device_send_func, self.device_recv_func = impl + # set target rank + self.target_rank_for_send = (self.kv_rank + 1) % self.kv_parallel_size + self.target_rank_for_recv = (self.kv_rank - 1) % self.kv_parallel_size + + # transportation-related variables + self.transport_thread: Optional[ThreadPoolExecutor] = None + self.buffer_size = 0 + self.buffer_size_lock = threading.Lock() + self.buffer_size_thresh = self.config.kv_buffer_size + + def _get_device_send_recv_impl( + self, group: StatelessProcessGroup + ) -> Tuple[Callable[[torch.Tensor, int], None], Callable[ + [torch.Tensor, int], None]]: + + send: Callable[[torch.Tensor, int], None] + recv: Callable[[torch.Tensor, int], None] + if self.device.type == "cuda": + # use PyNCCL for send / recv + comm = PyNcclCommunicator(group, device=self.local_rank) + comm.disabled = False + send, recv = comm.send, comm.recv # type: ignore + else: + # This send / recv implementation here is NOT intended to transfer + # KV caches (and should NOT be repurposed to transfer KV caches). + # Currently it is only used to transmit control-plane messages + # for PyNcclBuffer. + send = group.send_obj + + def my_recv(x, src): + x[...] = group.recv_obj(src) + + recv = my_recv + + return send, recv + + def _select_device(self, device: str): + logger.info("Selecting device: %s", device) + if device == "cuda": + return torch.device(f"cuda:{self.local_rank}") + else: + return torch.device("cpu") + + def _make_metadata(self, tensor: Optional[torch.Tensor]) -> Metadata: + """ + Create the metadata as a dictionary based on the input tensor. + + Parameters: + - tensor: The input tensor or None if no tensor is provided. + + Returns: + - metadata: A dictionary with the following keys: + - "dtype": The data type of the tensor or None. + - "shape": The shape of the tensor or None. + """ + if tensor is None: + return {"dtype": None, "shape": None} + else: + return {"dtype": tensor.dtype, "shape": tensor.shape} + + def _prepare_recv_buffer(self, metadata: Metadata) -> torch.Tensor: + """ + Create a buffer to receive the tensor based on the provided metadata. + + Parameters: + - metadata: A dictionary with keys "dtype" and "shape", describing + the tensor's data type and shape. + + Returns: + - buffer: A tensor of the specified type and shape, allocated on + self.device. + """ + return torch.empty(metadata["shape"], + dtype=metadata["dtype"], + device=self.device) + + def _send_metadata(self, metadata: Metadata): + """ + Send the metadata dictionary to the target rank. + + Parameters: + - metadata: A dictionary with keys "dtype" and "shape". + """ + self.group.send_obj(metadata, self.target_rank_for_send) + + def _recv_metadata(self) -> Metadata: + """ + Receive the metadata dictionary from the target rank. + + Returns: + - metadata: A dictionary with keys "dtype" and "shape" describing + the tensor. + """ + return self.group.recv_obj(self.target_rank_for_recv) + + def _send_impl(self, tensor: Optional[torch.Tensor]) -> None: + """ + The actual implementation of sending the tensor and its metadata to the + target rank. + + Parameters: + - tensor: The input tensor to be sent, or None if no tensor is + being sent. + """ + metadata = self._make_metadata(tensor) + self._send_metadata(metadata) + if tensor is not None: + self.device_send_func(tensor.to(self.device), + self.target_rank_for_send) + + def _recv_impl(self) -> Optional[torch.Tensor]: + """ + The actual implementation of receiving a tensor and its metadata from + the target rank. + + Returns: + - buffer: The received tensor, or None if no tensor is received. + """ + metadata = self._recv_metadata() + if metadata["dtype"] is None: + return None + buffer = self._prepare_recv_buffer(metadata) + self.device_recv_func(buffer, self.target_rank_for_recv) + + return buffer + + def send_tensor_wrapper(self, tensor: Optional[torch.Tensor], + tensor_size: int) -> None: + """ + Wrapper for _send_impl to handle exceptions and update buffer size. + """ + try: + self._send_impl(tensor) + + with self.buffer_size_lock: + self.buffer_size -= tensor_size + except Exception as e: + logger.error("[rank%d]: Exception when trying to send %s, msg: %s", + torch.distributed.get_rank(), str(tensor), str(e)) + import traceback + traceback.print_exc() + + def block_if_full(self): + """ + Block the current thread if the buffer size is larger than the + threshold. + """ + while self.buffer_size > self.buffer_size_thresh: + logger.debug("KV cache transfer pipe is full. Waiting...") + time.sleep(0.05) + + def send_tensor(self, tensor: Optional[torch.Tensor]) -> None: + """ + Sends a tensor and its metadata to the destination rank in a + non-blocking way. + + Parameters: + - tensor: The tensor to send, or None if no tensor is being sent. + """ + if self.transport_thread is None: + self.transport_thread = ThreadPoolExecutor(max_workers=1) + + if tensor is not None: + tensor_size = tensor.element_size() * tensor.numel() + else: + tensor_size = 0 + + self.block_if_full() + + with self.buffer_size_lock: + self.buffer_size += tensor_size + + self.transport_thread.submit(self.send_tensor_wrapper, tensor, + tensor_size) + + def recv_tensor(self) -> Optional[torch.Tensor]: + """ + Receives a tensor and its metadata from the source rank. Blocking call. + + Returns: + - tensor: The received tensor, or None if no tensor is received. + """ + if self.transport_thread is None: + self.transport_thread = ThreadPoolExecutor(max_workers=1) + + future = self.transport_thread.submit(self._recv_impl) + + try: + tensor = future.result() + except Exception as e: + logger.error("Encountering exception in KV receiving thread") + logger.error("%s", e) + logger.error("My device: %s", self.device) + import traceback + traceback.print_exc() + raise e + + return tensor + + def close(self): + """ + Close the pipe and release associated resources. + """ + if hasattr(self, + "transport_thread") and self.transport_thread is not None: + self.transport_thread.shutdown() diff --git a/vllm/distributed/kv_transfer/kv_transfer_agent.py b/vllm/distributed/kv_transfer/kv_transfer_agent.py new file mode 100644 index 0000000000000..9ce97851dc849 --- /dev/null +++ b/vllm/distributed/kv_transfer/kv_transfer_agent.py @@ -0,0 +1,75 @@ +"""A centralized entrypoint to perform distributed KV cache transfer. + +This implementation is a shim wrapper on two APIs exposed by `kv_connector`: +1. `send_kv_caches_and_hidden_states` +2. `recv_kv_caches_and_hidden_states +""" +from typing import TYPE_CHECKING, List, Tuple, Union + +if TYPE_CHECKING: + from vllm.worker.model_runner import ModelInputForGPUWithSamplingMetadata + from vllm.config import VllmConfig + +import torch + +from vllm.distributed.kv_transfer.kv_connector.factory import ( + KVConnectorFactory) +from vllm.logger import init_logger +from vllm.sequence import IntermediateTensors + +logger = init_logger(__name__) + + +class KVTransferAgent: + """ + A class designated for distributed KV transfer + + Target use cases: + 1. Disaggregated prefill + 2. Remote KV cache storage + """ + + def __init__( + self, + rank: int, + local_rank: int, + config: "VllmConfig", + ): + + self.config = config + + if config.kv_transfer_config is None: + raise ValueError("KVTransferConfig is not set in the VllmConfig," + " cannot initialize KVConnector.") + + assert self.config.kv_transfer_config.is_kv_transfer_instance, "KV"\ + "TransferAgent should only be used when kv_connector is set." + + self.connector = KVConnectorFactory.create_connector( + rank, local_rank, config) + + def send_kv_caches_and_hidden_states( + self, + model_executable: torch.nn.Module, + model_input: "ModelInputForGPUWithSamplingMetadata", + kv_caches: List[torch.Tensor], + hidden_or_intermediate_states: Union[torch.Tensor, + IntermediateTensors], + ) -> None: + + self.connector.send_kv_caches_and_hidden_states( + model_executable, model_input, kv_caches, + hidden_or_intermediate_states) + + def close(self) -> None: + self.connector.close() + + def recv_kv_caches_and_hidden_states( + self, model_executable: torch.nn.Module, + model_input: "ModelInputForGPUWithSamplingMetadata", + kv_caches: List[torch.Tensor] + ) -> Tuple[Union[torch.Tensor, IntermediateTensors], bool, + "ModelInputForGPUWithSamplingMetadata"]: + + return self.connector.recv_kv_caches_and_hidden_states( + model_executable, model_input, kv_caches) diff --git a/vllm/distributed/parallel_state.py b/vllm/distributed/parallel_state.py index ccbe00386c5da..5b9236f8c56b6 100644 --- a/vllm/distributed/parallel_state.py +++ b/vllm/distributed/parallel_state.py @@ -27,18 +27,24 @@ from contextlib import contextmanager, nullcontext from dataclasses import dataclass from multiprocessing import shared_memory -from typing import Any, Callable, Dict, List, Optional, Tuple, Union +from typing import (TYPE_CHECKING, Any, Callable, Dict, List, Optional, Tuple, + Union) from unittest.mock import patch import torch import torch.distributed from torch.distributed import Backend, ProcessGroup +import vllm.distributed.kv_transfer.kv_transfer_agent as kv_transfer import vllm.envs as envs +from vllm.distributed.utils import StatelessProcessGroup from vllm.logger import init_logger from vllm.platforms import current_platform from vllm.utils import direct_register_custom_op, supports_custom_op +if TYPE_CHECKING: + from vllm.config import VllmConfig + @dataclass class GraphCaptureContext: @@ -904,6 +910,14 @@ def get_pp_group() -> GroupCoordinator: # kept for backward compatibility get_pipeline_model_parallel_group = get_pp_group +_KV_TRANSFER: Optional[kv_transfer.KVTransferAgent] = None + + +def get_kv_transfer_group() -> kv_transfer.KVTransferAgent: + assert _KV_TRANSFER is not None, ( + "disaggregated KV cache transfer parallel group is not initialized") + return _KV_TRANSFER + @contextmanager def graph_capture(): @@ -1052,6 +1066,26 @@ def initialize_model_parallel( group_name="pp") +def ensure_kv_transfer_initialized(vllm_config: "VllmConfig") -> None: + """ + Initialize KV cache transfer parallel group. + """ + + global _KV_TRANSFER + + if vllm_config.kv_transfer_config is None: + return + + if all([ + vllm_config.kv_transfer_config.need_kv_parallel_group, + _KV_TRANSFER is None + ]): + _KV_TRANSFER = kv_transfer.KVTransferAgent( + rank=get_world_group().rank, + local_rank=get_world_group().local_rank, + config=vllm_config) + + def ensure_model_parallel_initialized( tensor_model_parallel_size: int, pipeline_model_parallel_size: int, @@ -1158,25 +1192,31 @@ def cleanup_dist_env_and_memory(shutdown_ray: bool = False): torch.cuda.empty_cache() -def in_the_same_node_as(pg: ProcessGroup, source_rank: int = 0) -> List[bool]: +def in_the_same_node_as(pg: Union[ProcessGroup, StatelessProcessGroup], + source_rank: int = 0) -> List[bool]: """ This is a collective operation that returns if each rank is in the same node as the source rank. It tests if processes are attached to the same memory system (shared access to shared memory). """ - assert torch.distributed.get_backend( - pg) != torch.distributed.Backend.NCCL, ( - "in_the_same_node_as should be tested with a non-NCCL group.") - # local rank inside the group - rank = torch.distributed.get_rank(group=pg) - world_size = torch.distributed.get_world_size(group=pg) + if isinstance(pg, ProcessGroup): + assert torch.distributed.get_backend( + pg) != torch.distributed.Backend.NCCL, ( + "in_the_same_node_as should be tested with a non-NCCL group.") + # local rank inside the group + rank = torch.distributed.get_rank(group=pg) + world_size = torch.distributed.get_world_size(group=pg) + + # global ranks of the processes in the group + ranks = torch.distributed.get_process_group_ranks(pg) + else: + rank = pg.rank + world_size = pg.world_size + ranks = list(range(world_size)) # local tensor in each process to store the result is_in_the_same_node = torch.tensor([0] * world_size, dtype=torch.int32) - # global ranks of the processes in the group - ranks = torch.distributed.get_process_group_ranks(pg) - magic_message = b"magic_message" shm = None @@ -1186,17 +1226,21 @@ def in_the_same_node_as(pg: ProcessGroup, source_rank: int = 0) -> List[bool]: # create a shared memory segment shm = shared_memory.SharedMemory(create=True, size=128) shm.buf[:len(magic_message)] = magic_message - torch.distributed.broadcast_object_list([shm.name], - src=ranks[source_rank], - group=pg) + if isinstance(pg, ProcessGroup): + torch.distributed.broadcast_object_list( + [shm.name], src=ranks[source_rank], group=pg) + else: + pg.broadcast_obj(shm.name, src=source_rank) is_in_the_same_node[rank] = 1 else: # try to open the shared memory segment - recv = [None] - torch.distributed.broadcast_object_list(recv, - src=ranks[source_rank], - group=pg) - name = recv[0] + if isinstance(pg, ProcessGroup): + recv = [None] + torch.distributed.broadcast_object_list( + recv, src=ranks[source_rank], group=pg) + name = recv[0] + else: + name = pg.broadcast_obj(None, src=source_rank) # fix to https://stackoverflow.com/q/62748654/9191338 # Python incorrectly tracks shared memory even if it is not # created by the process. The following patch is a workaround. @@ -1211,12 +1255,23 @@ def in_the_same_node_as(pg: ProcessGroup, source_rank: int = 0) -> List[bool]: if shm: shm.close() - torch.distributed.barrier(group=pg) + if isinstance(pg, ProcessGroup): + torch.distributed.barrier(group=pg) + else: + pg.barrier() # clean up the shared memory segment with contextlib.suppress(OSError): if rank == source_rank and shm: shm.unlink() - torch.distributed.all_reduce(is_in_the_same_node, group=pg) - return [x == 1 for x in is_in_the_same_node.tolist()] + if isinstance(pg, ProcessGroup): + torch.distributed.all_reduce(is_in_the_same_node, group=pg) + aggregated_data = is_in_the_same_node + else: + aggregated_data = torch.zeros_like(is_in_the_same_node) + for i in range(world_size): + rank_data = pg.broadcast_obj(is_in_the_same_node, src=i) + aggregated_data += rank_data + + return [x == 1 for x in aggregated_data.tolist()] diff --git a/vllm/engine/arg_utils.py b/vllm/engine/arg_utils.py index 90b4798f17a13..64cc4592c2861 100644 --- a/vllm/engine/arg_utils.py +++ b/vllm/engine/arg_utils.py @@ -9,10 +9,10 @@ import vllm.envs as envs from vllm.config import (CacheConfig, CompilationConfig, ConfigFormat, - DecodingConfig, DeviceConfig, HfOverrides, LoadConfig, - LoadFormat, LoRAConfig, ModelConfig, - ObservabilityConfig, ParallelConfig, PoolerConfig, - PromptAdapterConfig, SchedulerConfig, + DecodingConfig, DeviceConfig, HfOverrides, + KVTransferConfig, LoadConfig, LoadFormat, LoRAConfig, + ModelConfig, ObservabilityConfig, ParallelConfig, + PoolerConfig, PromptAdapterConfig, SchedulerConfig, SpeculativeConfig, TaskOption, TokenizerPoolConfig, VllmConfig) from vllm.executor.executor_base import ExecutorBase @@ -108,12 +108,11 @@ class EngineArgs: # notice. distributed_executor_backend: Optional[Union[str, Type[ExecutorBase]]] = None + # number of P/D disaggregation (or other disaggregation) workers pipeline_parallel_size: int = 1 tensor_parallel_size: int = 1 max_parallel_loading_workers: Optional[int] = None - # NOTE(kzawora): default block size for Gaudi should be 128 - # smaller sizes still work, but very inefficiently - block_size: int = 16 if not current_platform.is_hpu() else 128 + block_size: Optional[int] = None enable_prefix_caching: Optional[bool] = None disable_sliding_window: bool = False use_v2_block_manager: bool = True @@ -121,7 +120,7 @@ class EngineArgs: cpu_offload_gb: float = 0 # GiB gpu_memory_utilization: float = 0.90 max_num_batched_tokens: Optional[int] = None - max_num_seqs: int = 256 + max_num_seqs: Optional[int] = None max_logprobs: int = 20 # Default value for OpenAI Chat Completions API disable_log_stats: bool = False revision: Optional[str] = None @@ -142,6 +141,7 @@ class EngineArgs: tokenizer_pool_extra_config: Optional[Dict[str, Any]] = None limit_mm_per_prompt: Optional[Mapping[str, int]] = None mm_processor_kwargs: Optional[Dict[str, Any]] = None + mm_cache_preprocessor: bool = False enable_lora: bool = False enable_lora_bias: bool = False max_loras: int = 1 @@ -167,7 +167,8 @@ class EngineArgs: scheduler_delay_factor: float = 0.0 enable_chunked_prefill: Optional[bool] = None - guided_decoding_backend: str = 'outlines' + guided_decoding_backend: str = 'xgrammar' + logits_processor_pattern: Optional[str] = None # Speculative decoding configuration. speculative_model: Optional[str] = None speculative_model_quantization: Optional[str] = None @@ -194,6 +195,8 @@ class EngineArgs: compilation_config: Optional[CompilationConfig] = None worker_cls: str = "auto" + kv_transfer_config: Optional[KVTransferConfig] = None + def __post_init__(self): if not self.tokenizer: self.tokenizer = self.model @@ -203,15 +206,16 @@ def __post_init__(self): if self.enable_prefix_caching is None: self.enable_prefix_caching = bool(envs.VLLM_USE_V1) + # Override max_num_seqs if it's not set by user. + if self.max_num_seqs is None: + self.max_num_seqs = 256 if not envs.VLLM_USE_V1 else 1024 + # support `EngineArgs(compilation_config={...})` # without having to manually construct a # CompilationConfig object - if isinstance(self.compilation_config, (int)): + if isinstance(self.compilation_config, (int, dict)): self.compilation_config = CompilationConfig.from_cli( str(self.compilation_config)) - elif isinstance(self.compilation_config, (dict)): - self.compilation_config = CompilationConfig.from_cli( - json.dumps(self.compilation_config)) # Setup plugins from vllm.plugins import load_general_plugins @@ -361,14 +365,23 @@ def add_cli_args(parser: FlexibleArgumentParser) -> FlexibleArgumentParser: parser.add_argument( '--guided-decoding-backend', type=str, - default='outlines', - choices=['outlines', 'lm-format-enforcer'], + default='xgrammar', + choices=['outlines', 'lm-format-enforcer', 'xgrammar'], help='Which engine will be used for guided decoding' ' (JSON schema / regex etc) by default. Currently support ' - 'https://github.com/outlines-dev/outlines and ' + 'https://github.com/outlines-dev/outlines,' + 'https://github.com/mlc-ai/xgrammar, and ' 'https://github.com/noamgat/lm-format-enforcer.' ' Can be overridden per request via guided_decoding_backend' ' parameter.') + parser.add_argument( + '--logits-processor-pattern', + type=nullable_str, + default=None, + help='Optional regex pattern specifying valid logits processor ' + 'qualified names that can be passed with the `logits_processors` ' + 'extra completion argument. Defaults to None, which allows no ' + 'processors.') # Parallel arguments parser.add_argument( '--distributed-executor-backend', @@ -414,17 +427,24 @@ def add_cli_args(parser: FlexibleArgumentParser) -> FlexibleArgumentParser: choices=[8, 16, 32, 64, 128], help='Token block size for contiguous chunks of ' 'tokens. This is ignored on neuron devices and ' - 'set to max-model-len') + 'set to max-model-len. On CUDA devices, ' + 'only block sizes up to 32 are supported. ' + 'On HPU devices, block size defaults to 128.') - parser.add_argument('--enable-prefix-caching', - action='store_true', - help='Enables automatic prefix caching.') + parser.add_argument( + "--enable-prefix-caching", + action=argparse.BooleanOptionalAction, + default=EngineArgs.enable_prefix_caching, + help="Enables automatic prefix caching. " + "Use --no-enable-prefix-caching to disable explicitly.", + ) parser.add_argument('--disable-sliding-window', action='store_true', help='Disables sliding window, ' 'capping to sliding window size') parser.add_argument('--use-v2-block-manager', action='store_true', + default=True, help='[DEPRECATED] block manager v1 has been ' 'removed and SelfAttnBlockSpaceManager (i.e. ' 'block manager v2) is now the default. ' @@ -468,11 +488,12 @@ def add_cli_args(parser: FlexibleArgumentParser) -> FlexibleArgumentParser: help='The fraction of GPU memory to be used for the model ' 'executor, which can range from 0 to 1. For example, a value of ' '0.5 would imply 50%% GPU memory utilization. If unspecified, ' - 'will use the default value of 0.9. This is a global gpu memory ' - 'utilization limit, for example if 50%% of the gpu memory is ' - 'already used before vLLM starts and --gpu-memory-utilization is ' - 'set to 0.9, then only 40%% of the gpu memory will be allocated ' - 'to the model executor.') + 'will use the default value of 0.9. This is a per-instance ' + 'limit, and only applies to the current vLLM instance.' + 'It does not matter if you have another vLLM instance running ' + 'on the same GPU. For example, if you have two vLLM instances ' + 'running on the same GPU, you can set the GPU memory utilization ' + 'to 0.5 for each instance.') parser.add_argument( '--num-gpu-blocks-override', type=int, @@ -584,6 +605,12 @@ def add_cli_args(parser: FlexibleArgumentParser) -> FlexibleArgumentParser: type=json.loads, help=('Overrides for the multimodal input mapping/processing, ' 'e.g., image processor. For example: {"num_crops": 4}.')) + parser.add_argument( + '--mm-cache-preprocessor', + action='store_true', + help='If true, then enables caching of the multi-modal ' + 'preprocessor/mapper. Otherwise, the mapper executes each time' + ', and for better performance consider enabling frontend process.') # LoRA related configs parser.add_argument('--enable-lora', @@ -884,7 +911,7 @@ def add_cli_args(parser: FlexibleArgumentParser) -> FlexibleArgumentParser: '--override-pooler-config', type=PoolerConfig.from_json, default=None, - help="Override or set the pooling method in the embedding model. " + help="Override or set the pooling method for pooling models. " "e.g. {\"pooling_type\": \"mean\", \"normalize\": false}.'") parser.add_argument('--compilation-config', @@ -904,6 +931,12 @@ def add_cli_args(parser: FlexibleArgumentParser) -> FlexibleArgumentParser: 'compilers, using -O without space is also ' 'supported. -O3 is equivalent to -O 3.') + parser.add_argument('--kv-transfer-config', + type=KVTransferConfig.from_cli, + default=None, + help='The configurations for distributed KV cache ' + 'transfer. Should be a JSON string.') + parser.add_argument( '--worker-cls', type=str, @@ -950,9 +983,10 @@ def create_model_config(self) -> ModelConfig: use_async_output_proc=not self.disable_async_output_proc, config_format=self.config_format, mm_processor_kwargs=self.mm_processor_kwargs, + mm_cache_preprocessor=self.mm_cache_preprocessor, override_neuron_config=self.override_neuron_config, override_pooler_config=self.override_pooler_config, - ) + logits_processor_pattern=self.logits_processor_pattern) def create_load_config(self) -> LoadConfig: return LoadConfig( @@ -995,17 +1029,15 @@ def create_engine_config(self, device_config = DeviceConfig(device=self.device) model_config = self.create_model_config() - if model_config.is_multimodal_model: - if self.enable_prefix_caching: - logger.warning( - "--enable-prefix-caching is currently not " - "supported for multimodal models and has been disabled.") + if (model_config.is_multimodal_model and not envs.VLLM_USE_V1 + and self.enable_prefix_caching): + logger.warning("--enable-prefix-caching is currently not " + "supported for multimodal models in v0 and " + "has been disabled.") self.enable_prefix_caching = False cache_config = CacheConfig( - # neuron needs block_size = max_model_len - block_size=self.block_size if self.device != "neuron" else - (self.max_model_len if self.max_model_len is not None else 0), + block_size=self.block_size, gpu_memory_utilization=self.gpu_memory_utilization, swap_space=self.swap_space, cache_dtype=self.kv_cache_dtype, @@ -1038,9 +1070,12 @@ def create_engine_config(self, # long context (> 32K) models. This is to avoid OOM errors in the # initial memory profiling phase. - # Chunked prefill is currently disabled for multimodal models by - # default. - if use_long_context and not model_config.is_multimodal_model: + # For multimodal models, chunked prefill is disabled by default in + # V0, but enabled by design in V1 + if model_config.is_multimodal_model: + self.enable_chunked_prefill = bool(envs.VLLM_USE_V1) + + elif use_long_context: is_gpu = device_config.device_type == "cuda" use_sliding_window = (model_config.get_sliding_window() is not None) @@ -1048,7 +1083,8 @@ def create_engine_config(self, if (is_gpu and not use_sliding_window and not use_spec_decode and not self.enable_lora and not self.enable_prompt_adapter - and model_config.task != "embedding"): + and model_config.runner_type != "pooling" + and not current_platform.is_rocm()): self.enable_chunked_prefill = True logger.warning( "Chunked prefill is enabled by default for models with " @@ -1065,8 +1101,9 @@ def create_engine_config(self, "errors during the initial memory profiling phase, or result " "in low performance due to small KV cache space. Consider " "setting --max-model-len to a smaller value.", max_model_len) - elif self.enable_chunked_prefill and model_config.task == "embedding": - msg = "Chunked prefill is not supported for embedding models" + elif (self.enable_chunked_prefill + and model_config.runner_type == "pooling"): + msg = "Chunked prefill is not supported for pooling models" raise ValueError(msg) @@ -1097,7 +1134,7 @@ def create_engine_config(self, disable_logprobs=self.disable_logprobs_during_spec_decoding, ) - # Reminder: Please update docs/source/serving/compatibility_matrix.rst + # Reminder: Please update docs/source/usage/compatibility_matrix.rst # If the feature combo become valid if self.num_scheduler_steps > 1: if speculative_config is not None: @@ -1126,7 +1163,7 @@ def create_engine_config(self, " please file an issue with detailed information.") scheduler_config = SchedulerConfig( - task=model_config.task, + runner_type=model_config.runner_type, max_num_batched_tokens=self.max_num_batched_tokens, max_num_seqs=self.max_num_seqs, max_model_len=model_config.max_model_len, @@ -1197,6 +1234,7 @@ def create_engine_config(self, observability_config=observability_config, prompt_adapter_config=prompt_adapter_config, compilation_config=self.compilation_config, + kv_transfer_config=self.kv_transfer_config, ) if envs.VLLM_USE_V1: @@ -1209,31 +1247,28 @@ def _override_v1_engine_args(self, usage_context: UsageContext) -> None: """ assert envs.VLLM_USE_V1, "V1 is not enabled" - if self.max_num_batched_tokens is None: - # When no user override, set the default values based on the - # usage context. - if usage_context == UsageContext.LLM_CLASS: - logger.warning("Setting max_num_batched_tokens to 8192 " - "for LLM_CLASS usage context.") - self.max_num_seqs = 1024 - self.max_num_batched_tokens = 8192 - elif usage_context == UsageContext.OPENAI_API_SERVER: - logger.warning("Setting max_num_batched_tokens to 2048 " - "for OPENAI_API_SERVER usage context.") - self.max_num_seqs = 1024 - self.max_num_batched_tokens = 2048 + # V1 always uses chunked prefills. + self.enable_chunked_prefill = True + # When no user override, set the default values based on the usage + # context. + # TODO(woosuk): Tune the default values for different hardware. + default_max_num_batched_tokens = { + UsageContext.LLM_CLASS: 8192, + UsageContext.OPENAI_API_SERVER: 2048, + } + if (self.max_num_batched_tokens is None + and usage_context in default_max_num_batched_tokens): + self.max_num_batched_tokens = default_max_num_batched_tokens[ + usage_context] + logger.warning( + "Setting max_num_batched_tokens to %d for %s usage context.", + self.max_num_batched_tokens, usage_context.value) def _override_v1_engine_config(self, engine_config: VllmConfig) -> None: """ Override the EngineConfig's configs based on the usage context for V1. """ assert envs.VLLM_USE_V1, "V1 is not enabled" - # TODO (ywang96): Enable APC by default when VLM supports it. - if engine_config.model_config.is_multimodal_model: - logger.warning( - "Prefix caching is currently not supported for multimodal " - "models and has been disabled.") - engine_config.cache_config.enable_prefix_caching = False @dataclass diff --git a/vllm/engine/async_llm_engine.py b/vllm/engine/async_llm_engine.py index 3224577c567f8..f50e20cf70323 100644 --- a/vllm/engine/async_llm_engine.py +++ b/vllm/engine/async_llm_engine.py @@ -1,4 +1,5 @@ import asyncio +import copy import time import weakref from functools import partial @@ -6,6 +7,8 @@ List, Mapping, Optional, Set, Tuple, Type, Union, overload) from weakref import ReferenceType +from typing_extensions import deprecated + import vllm.envs as envs from vllm.config import (DecodingConfig, LoRAConfig, ModelConfig, ParallelConfig, SchedulerConfig, VllmConfig) @@ -25,7 +28,7 @@ from vllm.model_executor.guided_decoding import ( get_guided_decoding_logits_processor) from vllm.model_executor.layers.sampler import SamplerOutput -from vllm.outputs import EmbeddingRequestOutput, RequestOutput +from vllm.outputs import PoolingRequestOutput, RequestOutput from vllm.pooling_params import PoolingParams from vllm.prompt_adapter.request import PromptAdapterRequest from vllm.sampling_params import SamplingParams @@ -74,7 +77,7 @@ def _log_task_completion(task: asyncio.Task, class AsyncStream: - """A stream of RequestOutputs or EmbeddingRequestOutputs for a request + """A stream of RequestOutputs or PoolingRequestOutputs for a request that can be iterated over asynchronously via an async generator.""" def __init__(self, request_id: str, cancel: Callable[[str], None]) -> None: @@ -83,7 +86,7 @@ def __init__(self, request_id: str, cancel: Callable[[str], None]) -> None: self._queue: asyncio.Queue = asyncio.Queue() self._finished = False - def put(self, item: Union[RequestOutput, EmbeddingRequestOutput, + def put(self, item: Union[RequestOutput, PoolingRequestOutput, Exception]) -> None: if not self._finished: self._queue.put_nowait(item) @@ -103,7 +106,7 @@ def finished(self) -> bool: async def generator( self - ) -> AsyncGenerator[Union[RequestOutput, EmbeddingRequestOutput], None]: + ) -> AsyncGenerator[Union[RequestOutput, PoolingRequestOutput], None]: try: while True: result = await self._queue.get() @@ -154,7 +157,7 @@ def propagate_exception(self, def process_request_output(self, request_output: Union[RequestOutput, - EmbeddingRequestOutput], + PoolingRequestOutput], *, verbose: bool = False) -> None: """Process a request output from the engine.""" @@ -265,7 +268,7 @@ def __init__(self, *args, **kwargs): async def step_async( self, virtual_engine: int - ) -> List[Union[RequestOutput, EmbeddingRequestOutput]]: + ) -> List[Union[RequestOutput, PoolingRequestOutput]]: """Performs one decoding iteration and returns newly generated results. The workers are ran asynchronously if possible. @@ -300,6 +303,9 @@ async def step_async( ctx.seq_group_metadata_list = seq_group_metadata_list ctx.scheduler_outputs = scheduler_outputs + finished_requests_ids = self.scheduler[ + virtual_engine].get_and_reset_finished_requests_ids() + # Maybe switch from async mode to sync mode if not allow_async_output_proc and len(ctx.output_queue) > 0: self._process_model_outputs(ctx=ctx) @@ -311,13 +317,13 @@ async def step_async( self._cache_scheduler_outputs_for_multi_step( virtual_engine, seq_group_metadata_list, scheduler_outputs, allow_async_output_proc) + else: + finished_requests_ids = list() assert seq_group_metadata_list is not None assert scheduler_outputs is not None if not scheduler_outputs.is_empty(): - finished_requests_ids = self.scheduler[ - virtual_engine].get_and_reset_finished_requests_ids() # Check if we have a cached last_output from the previous iteration. # For supporting PP this is probably the best way to pass the @@ -419,7 +425,8 @@ async def get_tokenizer_async(self, return await ( self.get_tokenizer_group().get_lora_tokenizer_async(lora_request)) - @overload # DEPRECATED + @overload + @deprecated("'inputs' will be renamed to 'prompt") async def add_request_async( self, request_id: str, @@ -501,7 +508,8 @@ async def add_request_async( sampling_params=params, tokenizer=await self.get_tokenizer_async(lora_request), default_guided_backend=self.decoding_config. - guided_decoding_backend) + guided_decoding_backend, + model_config=self.model_config) self._add_processed_request( request_id=request_id, @@ -522,22 +530,30 @@ async def check_health_async(self) -> None: async def build_guided_decoding_logits_processor_async( sampling_params: SamplingParams, tokenizer: AnyTokenizer, - default_guided_backend: str) -> SamplingParams: + default_guided_backend: str, + model_config: ModelConfig) -> SamplingParams: """Constructs logits processors based on the guided_decoding, logits_bias, and allowed_token_ids fields in sampling_params. Deletes those fields and adds the constructed logits processors to the logits_processors field. Modifies sampling params in-place and returns the modified sampling params.""" - if (guided_decoding := sampling_params.guided_decoding) is None: + if sampling_params.guided_decoding is None: return sampling_params + # Defensively copy sampling params since guided decoding logits + # processors can have different state for each request + sampling_params = copy.copy(sampling_params) + guided_decoding = sampling_params.guided_decoding + logger.debug("Building guided decoding logits processor. " "Params: %s", guided_decoding) guided_decoding.backend = guided_decoding.backend or default_guided_backend processor = await get_guided_decoding_logits_processor( - guided_params=guided_decoding, tokenizer=tokenizer) + guided_params=guided_decoding, + tokenizer=tokenizer, + model_config=model_config) if processor: if sampling_params.logits_processors is None: @@ -891,7 +907,8 @@ async def run_engine_loop(engine_ref: ReferenceType): # This method does not need to be async, but kept that way # for backwards compatibility. - @overload # DEPRECATED + @overload + @deprecated("'inputs' will be renamed to 'prompt") def add_request( self, request_id: str, @@ -904,7 +921,7 @@ def add_request( prompt_adapter_request: Optional[PromptAdapterRequest] = None, priority: int = 0, ) -> Coroutine[None, None, AsyncGenerator[Union[ - RequestOutput, EmbeddingRequestOutput], None]]: + RequestOutput, PoolingRequestOutput], None]]: ... @overload @@ -919,7 +936,7 @@ def add_request( prompt_adapter_request: Optional[PromptAdapterRequest] = None, priority: int = 0, ) -> Coroutine[None, None, AsyncGenerator[Union[ - RequestOutput, EmbeddingRequestOutput], None]]: + RequestOutput, PoolingRequestOutput], None]]: ... @deprecate_kwargs( @@ -938,7 +955,7 @@ async def add_request( priority: int = 0, *, inputs: Optional[PromptType] = None, # DEPRECATED - ) -> AsyncGenerator[Union[RequestOutput, EmbeddingRequestOutput], None]: + ) -> AsyncGenerator[Union[RequestOutput, PoolingRequestOutput], None]: if inputs is not None: prompt = inputs assert prompt is not None and params is not None @@ -1048,16 +1065,20 @@ async def generate( >>> # Process and return the final output >>> ... """ - async for output in await self.add_request( - request_id, - prompt, - sampling_params, - lora_request=lora_request, - trace_headers=trace_headers, - prompt_adapter_request=prompt_adapter_request, - priority=priority, - ): - yield LLMEngine.validate_output(output, RequestOutput) + try: + async for output in await self.add_request( + request_id, + prompt, + sampling_params, + lora_request=lora_request, + trace_headers=trace_headers, + prompt_adapter_request=prompt_adapter_request, + priority=priority, + ): + yield LLMEngine.validate_output(output, RequestOutput) + except asyncio.CancelledError: + await self.abort(request_id) + raise async def encode( self, @@ -1067,8 +1088,8 @@ async def encode( lora_request: Optional[LoRARequest] = None, trace_headers: Optional[Mapping[str, str]] = None, priority: int = 0, - ) -> AsyncGenerator[EmbeddingRequestOutput, None]: - """Generate outputs for a request from an embedding model. + ) -> AsyncGenerator[PoolingRequestOutput, None]: + """Generate outputs for a request from a pooling model. Generate outputs for a request. This method is a coroutine. It adds the request into the waiting queue of the LLMEngine and streams the outputs @@ -1085,7 +1106,7 @@ async def encode( Only applicable with priority scheduling. Yields: - The output `EmbeddingRequestOutput` objects from the LLMEngine + The output `PoolingRequestOutput` objects from the LLMEngine for the request. Details: @@ -1130,15 +1151,19 @@ async def encode( >>> # Process and return the final output >>> ... """ - async for output in await self.add_request( - request_id, - prompt, - pooling_params, - lora_request=lora_request, - trace_headers=trace_headers, - priority=priority, - ): - yield LLMEngine.validate_output(output, EmbeddingRequestOutput) + try: + async for output in await self.add_request( + request_id, + prompt, + pooling_params, + lora_request=lora_request, + trace_headers=trace_headers, + priority=priority, + ): + yield LLMEngine.validate_output(output, PoolingRequestOutput) + except asyncio.CancelledError: + await self.abort(request_id) + raise async def abort(self, request_id: str) -> None: """Abort a request. diff --git a/vllm/engine/llm_engine.py b/vllm/engine/llm_engine.py index a4975cece9a81..dc2d77d6927cd 100644 --- a/vllm/engine/llm_engine.py +++ b/vllm/engine/llm_engine.py @@ -1,3 +1,4 @@ +import copy import time from collections import Counter as collectionsCounter from collections import deque @@ -10,7 +11,7 @@ from typing import Set, Type, Union, cast, overload import torch -from typing_extensions import TypeVar +from typing_extensions import TypeVar, deprecated import vllm.envs as envs from vllm.config import (DecodingConfig, LoRAConfig, ModelConfig, @@ -40,16 +41,15 @@ get_local_guided_decoding_logits_processor) from vllm.model_executor.layers.sampler import SamplerOutput from vllm.multimodal import MULTIMODAL_REGISTRY, MultiModalRegistry -from vllm.outputs import (EmbeddingRequestOutput, RequestOutput, +from vllm.outputs import (PoolingRequestOutput, RequestOutput, RequestOutputFactory) from vllm.pooling_params import PoolingParams from vllm.prompt_adapter.request import PromptAdapterRequest from vllm.sampling_params import RequestOutputKind, SamplingParams -from vllm.sequence import (EmbeddingSequenceGroupOutput, ExecuteModelRequest, - ParallelSampleSequenceGroup, Sequence, - SequenceGroup, SequenceGroupBase, - SequenceGroupMetadata, SequenceGroupOutput, - SequenceStatus) +from vllm.sequence import (ExecuteModelRequest, ParallelSampleSequenceGroup, + PoolingSequenceGroupOutput, Sequence, SequenceGroup, + SequenceGroupBase, SequenceGroupMetadata, + SequenceGroupOutput, SequenceStatus) from vllm.tracing import (SpanAttributes, SpanKind, extract_trace_context, init_tracer) from vllm.transformers_utils.config import try_get_generation_config @@ -80,7 +80,7 @@ def _load_generation_config_dict(model_config: ModelConfig) -> Dict[str, Any]: _G = TypeVar("_G", bound=BaseTokenizerGroup, default=BaseTokenizerGroup) -_O = TypeVar("_O", RequestOutput, EmbeddingRequestOutput) +_O = TypeVar("_O", RequestOutput, PoolingRequestOutput) @dataclass @@ -112,7 +112,7 @@ class SchedulerContext: def __init__(self, multi_step_stream_outputs: bool = False): self.output_queue: Deque[OutputData] = deque() self.request_outputs: List[Union[RequestOutput, - EmbeddingRequestOutput]] = [] + PoolingRequestOutput]] = [] self.seq_group_metadata_list: Optional[ List[SequenceGroupMetadata]] = None self.scheduler_outputs: Optional[SchedulerOutputs] = None @@ -231,6 +231,7 @@ def __init__( use_cached_outputs: bool = False, ) -> None: + self.vllm_config = vllm_config self.model_config = vllm_config.model_config self.cache_config = vllm_config.cache_config self.lora_config = vllm_config.lora_config @@ -246,60 +247,12 @@ def __init__( ) logger.info( - "Initializing an LLM engine (v%s) with config: " - "model=%r, speculative_config=%r, tokenizer=%r, " - "skip_tokenizer_init=%s, tokenizer_mode=%s, revision=%s, " - "override_neuron_config=%s, tokenizer_revision=%s, " - "trust_remote_code=%s, dtype=%s, max_seq_len=%d, " - "download_dir=%r, load_format=%s, tensor_parallel_size=%d, " - "pipeline_parallel_size=%d, " - "disable_custom_all_reduce=%s, quantization=%s, " - "enforce_eager=%s, kv_cache_dtype=%s, " - "quantization_param_path=%s, device_config=%s, " - "decoding_config=%r, observability_config=%r, " - "seed=%d, served_model_name=%s, " - "num_scheduler_steps=%d, chunked_prefill_enabled=%s " - "multi_step_stream_outputs=%s, enable_prefix_caching=%s, " - "use_async_output_proc=%s, use_cached_outputs=%s, " - "mm_processor_kwargs=%s, pooler_config=%r," - "compilation_config=%r", + "Initializing an LLM engine (v%s) with config: %s, " + "use_cached_outputs=%s, ", VLLM_VERSION, - self.model_config.model, - self.speculative_config, - self.model_config.tokenizer, - self.model_config.skip_tokenizer_init, - self.model_config.tokenizer_mode, - self.model_config.revision, - self.model_config.override_neuron_config, - self.model_config.tokenizer_revision, - self.model_config.trust_remote_code, - self.model_config.dtype, - self.model_config.max_model_len, - self.load_config.download_dir, - self.load_config.load_format, - self.parallel_config.tensor_parallel_size, - self.parallel_config.pipeline_parallel_size, - self.parallel_config.disable_custom_all_reduce, - self.model_config.quantization, - self.model_config.enforce_eager, - self.cache_config.cache_dtype, - self.model_config.quantization_param_path, - self.device_config.device, - self.decoding_config, - self.observability_config, - self.model_config.seed, - self.model_config.served_model_name, - self.scheduler_config.num_scheduler_steps, - self.scheduler_config.chunked_prefill_enabled, - self.scheduler_config.multi_step_stream_outputs, - self.cache_config.enable_prefix_caching, - self.model_config.use_async_output_proc, + vllm_config, use_cached_outputs, - self.model_config.mm_processor_kwargs, - self.model_config.pooler_config, - vllm_config.compilation_config, ) - # TODO(woosuk): Print more configs in debug mode. self.log_stats = log_stats self.use_cached_outputs = use_cached_outputs @@ -334,7 +287,7 @@ def get_tokenizer_for_seq(sequence: Sequence) -> AnyTokenizer: self.model_executor = executor_class(vllm_config=vllm_config, ) - if self.model_config.task != "embedding": + if self.model_config.runner_type != "pooling": self._initialize_kv_caches() # If usage stat is enabled, collect relevant info. @@ -432,13 +385,14 @@ def get_tokenizer_for_seq(sequence: Sequence) -> AnyTokenizer: self.stat_loggers = { "logging": LoggingStatLogger( - local_interval=_LOCAL_LOGGING_INTERVAL_SEC), + local_interval=_LOCAL_LOGGING_INTERVAL_SEC, + vllm_config=vllm_config), "prometheus": PrometheusStatLogger( local_interval=_LOCAL_LOGGING_INTERVAL_SEC, labels=dict( model_name=self.model_config.served_model_name), - max_model_len=self.model_config.max_model_len), + vllm_config=vllm_config), } self.stat_loggers["prometheus"].info("cache_config", self.cache_config) @@ -472,6 +426,7 @@ def _initialize_kv_caches(self) -> None: The workers will determine the number of blocks in both the GPU cache and the swap CPU cache. """ + start = time.time() num_gpu_blocks, num_cpu_blocks = ( self.model_executor.determine_num_available_blocks()) @@ -487,6 +442,9 @@ def _initialize_kv_caches(self) -> None: self.cache_config.num_cpu_blocks = num_cpu_blocks self.model_executor.initialize_cache(num_gpu_blocks, num_cpu_blocks) + elapsed = time.time() - start + logger.info(("init engine (profile, create kv cache, " + "warmup model) took %.2f seconds"), elapsed) @classmethod def _get_executor_cls(cls, @@ -619,7 +577,7 @@ def _init_tokenizer(self) -> BaseTokenizerGroup: model_config=self.model_config, scheduler_config=self.scheduler_config, parallel_config=self.parallel_config, - enable_lora=bool(self.lora_config)) + lora_config=self.lora_config) def _verify_args(self) -> None: self.model_config.verify_with_parallel_config(self.parallel_config) @@ -719,12 +677,11 @@ def _add_processed_request( def stop_remote_worker_execution_loop(self) -> None: self.model_executor.stop_remote_worker_execution_loop() - @overload # DEPRECATED + @overload def add_request( self, request_id: str, - *, - inputs: PromptType, + prompt: PromptType, params: Union[SamplingParams, PoolingParams], arrival_time: Optional[float] = None, lora_request: Optional[LoRARequest] = None, @@ -735,10 +692,12 @@ def add_request( ... @overload + @deprecated("'inputs' will be renamed to 'prompt") def add_request( self, request_id: str, - prompt: PromptType, + *, + inputs: PromptType, params: Union[SamplingParams, PoolingParams], arrival_time: Optional[float] = None, lora_request: Optional[LoRARequest] = None, @@ -1006,9 +965,9 @@ def has_unfinished_requests_for_virtual_engine( @staticmethod def _process_sequence_group_outputs( seq_group: SequenceGroup, - outputs: List[EmbeddingSequenceGroupOutput], + outputs: List[PoolingSequenceGroupOutput], ) -> None: - seq_group.embeddings = outputs[0].embeddings + seq_group.pooled_data = outputs[0].data for seq in seq_group.get_seqs(): seq.status = SequenceStatus.FINISHED_STOPPED @@ -1023,9 +982,9 @@ def _update_num_computed_tokens_for_multi_step_prefill( This function updates num_computed_tokens for prompt sequences when Multi-Step is enabled. - seq_group: SequenceGroup to update the num_computed_tokens for. + seq_group: SequenceGroup to update the num_computed_tokens for. seq_group_meta: Metadata of the given SequenceGroup. - is_first_step_output: Optional[bool] - + is_first_step_output: Optional[bool] - When available, is_first_step_output indicates if the appended output token is the output of the first-step in multi-step. A value of None indicates that outputs from all steps in @@ -1163,7 +1122,7 @@ def _process_model_outputs(self, seq_group.metrics.model_execute_time = ( o.model_execute_time) - if self.model_config.task == "embedding": + if self.model_config.runner_type == "pooling": self._process_sequence_group_outputs(seq_group, output) else: self.output_processor.process_prompt_logprob(seq_group, output) @@ -1314,7 +1273,7 @@ def _advance_to_next_step( else: seq.append_token_id(sample.output_token, sample.logprobs) - def step(self) -> List[Union[RequestOutput, EmbeddingRequestOutput]]: + def step(self) -> List[Union[RequestOutput, PoolingRequestOutput]]: """Performs one decoding iteration and returns newly generated results. .. figure:: https://i.imgur.com/sv2HssD.png @@ -1398,6 +1357,9 @@ def step(self) -> List[Union[RequestOutput, EmbeddingRequestOutput]]: ctx.seq_group_metadata_list = seq_group_metadata_list ctx.scheduler_outputs = scheduler_outputs + finished_requests_ids = self.scheduler[ + virtual_engine].get_and_reset_finished_requests_ids() + # Maybe switch from async mode to sync mode if not allow_async_output_proc and len(ctx.output_queue) > 0: self._process_model_outputs(ctx=ctx) @@ -1409,13 +1371,13 @@ def step(self) -> List[Union[RequestOutput, EmbeddingRequestOutput]]: self._cache_scheduler_outputs_for_multi_step( virtual_engine, seq_group_metadata_list, scheduler_outputs, allow_async_output_proc) + else: + finished_requests_ids = list() assert seq_group_metadata_list is not None assert scheduler_outputs is not None if not scheduler_outputs.is_empty(): - finished_requests_ids = self.scheduler[ - virtual_engine].get_and_reset_finished_requests_ids() # Check if we have a cached last_output from the previous iteration. # For supporting PP this is probably the best way to pass the @@ -1821,8 +1783,8 @@ def _get_stats(self, num_prompt_tokens_iter) # Spec decode, if enabled, emits specialized metrics from the worker in # sampler output. - if model_output and (model_output[0].spec_decode_worker_metrics - is not None): + if model_output and isinstance(model_output[0], SamplerOutput) and ( + model_output[0].spec_decode_worker_metrics is not None): spec_decode_metrics = model_output[0].spec_decode_worker_metrics else: spec_decode_metrics = None @@ -2032,7 +1994,11 @@ def _build_logits_processors( logits_processors = [] - if (guided_decoding := sampling_params.guided_decoding) is not None: + if sampling_params.guided_decoding is not None: + # Defensively copy sampling params since guided decoding logits + # processors can have different state for each request + sampling_params = copy.copy(sampling_params) + guided_decoding = sampling_params.guided_decoding logger.debug( "Building guided decoding logits processor in " @@ -2043,7 +2009,9 @@ def _build_logits_processors( self.decoding_config.guided_decoding_backend processor = get_local_guided_decoding_logits_processor( - guided_params=guided_decoding, tokenizer=tokenizer) + guided_params=guided_decoding, + tokenizer=tokenizer, + model_config=self.model_config) if processor: logits_processors.append(processor) diff --git a/vllm/engine/metrics.py b/vllm/engine/metrics.py index 5bfd6a9f4b386..c8aec8dd3afa3 100644 --- a/vllm/engine/metrics.py +++ b/vllm/engine/metrics.py @@ -6,6 +6,7 @@ import numpy as np import prometheus_client +from vllm.config import VllmConfig from vllm.engine.metrics_types import (StatLoggerBase, Stats, SupportsMetricsInfo) from vllm.executor.ray_utils import ray @@ -44,10 +45,12 @@ class Metrics: _counter_cls = prometheus_client.Counter _histogram_cls = prometheus_client.Histogram - def __init__(self, labelnames: List[str], max_model_len: int): + def __init__(self, labelnames: List[str], vllm_config: VllmConfig): # Unregister any existing vLLM collectors (for CI/CD) self._unregister_vllm_metrics() + max_model_len = vllm_config.model_config.max_model_len + # System stats # Scheduler State self.gauge_scheduler_running = self._gauge_cls( @@ -115,11 +118,15 @@ def __init__(self, labelnames: List[str], max_model_len: int): name="vllm:tokens_total", documentation="Number of prefill plus generation tokens processed.", labelnames=labelnames) + buckets = [1, 8, 16, 32, 64, 128, 256, 512, 1024, 2048, 4096, 8096] + if not vllm_config.model_config.enforce_eager: + buckets = vllm_config.compilation_config.capture_sizes.copy() + buckets.sort() self.histogram_iteration_tokens = self._histogram_cls( name="vllm:iteration_tokens_total", documentation="Histogram of number of tokens per engine_step.", labelnames=labelnames, - buckets=[1, 8, 16, 32, 64, 128, 256, 512, 1024, 2048, 4096, 8096]) + buckets=buckets) self.histogram_time_to_first_token = self._histogram_cls( name="vllm:time_to_first_token_seconds", documentation="Histogram of time to first token in seconds.", @@ -361,10 +368,10 @@ class RayMetrics(Metrics): _histogram_cls: Type[prometheus_client.Histogram] = cast( Type[prometheus_client.Histogram], _RayHistogramWrapper) - def __init__(self, labelnames: List[str], max_model_len: int): + def __init__(self, labelnames: List[str], vllm_config: VllmConfig): if ray_metrics is None: raise ImportError("RayMetrics requires Ray to be installed.") - super().__init__(labelnames, max_model_len) + super().__init__(labelnames, vllm_config) def _unregister_vllm_metrics(self) -> None: # No-op on purpose @@ -421,8 +428,8 @@ def get_throughput(tracked_stats: List[int], now: float, class LoggingStatLogger(StatLoggerBase): """LoggingStatLogger is used in LLMEngine to log to Stdout.""" - def __init__(self, *args, **kwargs) -> None: - super().__init__(*args, **kwargs) + def __init__(self, local_interval: float, vllm_config: VllmConfig) -> None: + super().__init__(local_interval, vllm_config) self.last_prompt_throughput: Optional[float] = None self.last_generation_throughput: Optional[float] = None @@ -473,13 +480,13 @@ def log(self, stats: Stats) -> None: ) if (stats.cpu_prefix_cache_hit_rate >= 0 or stats.gpu_prefix_cache_hit_rate >= 0): - logger.info( + log_fn( "Prefix cache hit rate: GPU: %.2f%%, CPU: %.2f%%", stats.gpu_prefix_cache_hit_rate * 100, stats.cpu_prefix_cache_hit_rate * 100, ) if self.spec_decode_metrics is not None: - logger.info( + log_fn( self._format_spec_decode_metrics_str( self.spec_decode_metrics)) @@ -515,12 +522,12 @@ class PrometheusStatLogger(StatLoggerBase): _gauge_cls = prometheus_client.Gauge def __init__(self, local_interval: float, labels: Dict[str, str], - max_model_len: int) -> None: - super().__init__(local_interval) + vllm_config: VllmConfig) -> None: + super().__init__(local_interval, vllm_config) # Prometheus metrics self.labels = labels self.metrics = self._metrics_cls(labelnames=list(labels.keys()), - max_model_len=max_model_len) + vllm_config=vllm_config) def _log_gauge(self, gauge, data: Union[int, float]) -> None: # Convenience function for logging to gauge. @@ -599,9 +606,9 @@ def _log_prometheus(self, stats: Stats) -> None: stats.time_queue_requests) self._log_histogram(self.metrics.histogram_inference_time_request, stats.time_inference_requests) - self._log_histogram(self.metrics.histogram_decode_time_request, - stats.time_prefill_requests) self._log_histogram(self.metrics.histogram_prefill_time_request, + stats.time_prefill_requests) + self._log_histogram(self.metrics.histogram_decode_time_request, stats.time_decode_requests) self._log_histogram(self.metrics.histogram_time_in_queue_request, stats.time_in_queue_requests) diff --git a/vllm/engine/metrics_types.py b/vllm/engine/metrics_types.py index 5f7ec3bbcb269..5c7a430d11c5a 100644 --- a/vllm/engine/metrics_types.py +++ b/vllm/engine/metrics_types.py @@ -16,6 +16,7 @@ from dataclasses import dataclass from typing import Dict, List, Optional, Protocol +from vllm.config import VllmConfig from vllm.spec_decode.metrics import SpecDecodeWorkerMetrics @@ -77,7 +78,7 @@ def metrics_info(self) -> Dict[str, str]: class StatLoggerBase(ABC): """Base class for StatLogger.""" - def __init__(self, local_interval: float) -> None: + def __init__(self, local_interval: float, vllm_config: VllmConfig) -> None: # Tracked stats over current local logging interval. self.num_prompt_tokens: List[int] = [] self.num_generation_tokens: List[int] = [] diff --git a/vllm/engine/multiprocessing/__init__.py b/vllm/engine/multiprocessing/__init__.py index 34c161e9395ae..420f540d0b5f4 100644 --- a/vllm/engine/multiprocessing/__init__.py +++ b/vllm/engine/multiprocessing/__init__.py @@ -2,6 +2,8 @@ from enum import Enum from typing import List, Mapping, Optional, Union, overload +from typing_extensions import deprecated + from vllm import PoolingParams from vllm.inputs import PromptType from vllm.lora.request import LoRARequest @@ -32,11 +34,10 @@ class RPCProcessRequest: prompt_adapter_request: Optional[PromptAdapterRequest] = None priority: int = 0 - @overload # DEPRECATED + @overload def __init__( self, - *, - inputs: PromptType, + prompt: PromptType, params: Union[SamplingParams, PoolingParams], request_id: str, lora_request: Optional[LoRARequest] = None, @@ -47,9 +48,11 @@ def __init__( ... @overload + @deprecated("'inputs' will be renamed to 'prompt") def __init__( self, - prompt: PromptType, + *, + inputs: PromptType, params: Union[SamplingParams, PoolingParams], request_id: str, lora_request: Optional[LoRARequest] = None, diff --git a/vllm/engine/multiprocessing/client.py b/vllm/engine/multiprocessing/client.py index fe21c58c775fe..0a046c71e86e8 100644 --- a/vllm/engine/multiprocessing/client.py +++ b/vllm/engine/multiprocessing/client.py @@ -9,6 +9,7 @@ import psutil import zmq import zmq.asyncio +from typing_extensions import deprecated from zmq import Frame # type: ignore[attr-defined] from zmq.asyncio import Socket @@ -35,7 +36,7 @@ from vllm.logger import init_logger from vllm.lora.request import LoRARequest from vllm.model_executor.layers.sampler import SamplerOutput -from vllm.outputs import EmbeddingRequestOutput, RequestOutput +from vllm.outputs import PoolingRequestOutput, RequestOutput from vllm.prompt_adapter.request import PromptAdapterRequest from vllm.sampling_params import SamplingParams from vllm.transformers_utils.tokenizer_group import init_tokenizer_from_configs @@ -93,8 +94,7 @@ def __init__(self, ipc_path: str, engine_config: VllmConfig, model_config=self.model_config, scheduler_config=engine_config.scheduler_config, parallel_config=engine_config.parallel_config, - enable_lora=bool(engine_config.lora_config), - ) + lora_config=engine_config.lora_config) self.input_preprocessor = InputPreprocessor(self.model_config, self.tokenizer) @@ -414,11 +414,10 @@ def errored(self) -> bool: def dead_error(self) -> BaseException: return ENGINE_DEAD_ERROR(self._errored_with) - @overload # DEPRECATED + @overload def generate( self, - *, - inputs: PromptType, + prompt: PromptType, sampling_params: SamplingParams, request_id: str, lora_request: Optional[LoRARequest] = None, @@ -429,9 +428,11 @@ def generate( ... @overload + @deprecated("'inputs' will be renamed to 'prompt") def generate( self, - prompt: PromptType, + *, + inputs: PromptType, sampling_params: SamplingParams, request_id: str, lora_request: Optional[LoRARequest] = None, @@ -472,8 +473,8 @@ def generate( trace_headers: OpenTelemetry trace headers. prompt_adapter_request: Prompt Adapter request to use for generation, if any. - priority: Priority of the request (lower means earlier handling). - Any priority other than 0 will lead to an error if the + priority: Priority of the request (lower means earlier handling). + Any priority other than 0 will lead to an error if the scheduling policy is not "priority". """ if inputs is not None: @@ -485,29 +486,30 @@ def generate( lora_request, trace_headers, prompt_adapter_request, priority) - @overload # DEPRECATED + @overload def encode( self, - *, - inputs: PromptType, + prompt: PromptType, pooling_params: PoolingParams, request_id: str, lora_request: Optional[LoRARequest] = None, trace_headers: Optional[Mapping[str, str]] = None, priority: int = 0, - ) -> AsyncGenerator[EmbeddingRequestOutput, None]: + ) -> AsyncGenerator[PoolingRequestOutput, None]: ... @overload + @deprecated("'inputs' will be renamed to 'prompt") def encode( self, - prompt: PromptType, + *, + inputs: PromptType, pooling_params: PoolingParams, request_id: str, lora_request: Optional[LoRARequest] = None, trace_headers: Optional[Mapping[str, str]] = None, priority: int = 0, - ) -> AsyncGenerator[EmbeddingRequestOutput, None]: + ) -> AsyncGenerator[PoolingRequestOutput, None]: ... @deprecate_kwargs( @@ -524,8 +526,8 @@ def encode( priority: int = 0, *, inputs: Optional[PromptType] = None # DEPRECATED - ) -> AsyncGenerator[EmbeddingRequestOutput, None]: - """Generate outputs for a request from an embedding model. + ) -> AsyncGenerator[PoolingRequestOutput, None]: + """Generate outputs for a request from a pooling model. Generate outputs for a request. This method is a coroutine. It adds the request into the waiting queue of the LLMEngine and streams the outputs @@ -540,7 +542,7 @@ def encode( trace_headers: OpenTelemetry trace headers. Yields: - The output `EmbeddingRequestOutput` objects from the LLMEngine + The output `PoolingRequestOutput` objects from the LLMEngine for the request. """ if inputs is not None: @@ -549,7 +551,7 @@ def encode( and request_id is not None) return cast( - AsyncGenerator[EmbeddingRequestOutput, None], + AsyncGenerator[PoolingRequestOutput, None], self._process_request(prompt, pooling_params, request_id, @@ -567,13 +569,17 @@ async def _process_request( prompt_adapter_request: Optional[PromptAdapterRequest] = None, priority: int = 0, ) -> Union[AsyncGenerator[RequestOutput, None], AsyncGenerator[ - EmbeddingRequestOutput, None]]: + PoolingRequestOutput, None]]: """Send an RPCGenerateRequest to the RPCServer and stream responses.""" # If already dead, error out. if self._errored_with is not None: raise ENGINE_DEAD_ERROR(self._errored_with) + # Ensure the request id is unique among running requests + if request_id in self.output_queues: + raise ValueError(f"Request {request_id} already exists") + # Constructing guided decoding logits processors is expensive, so we do # it here to avoid contending with cpu resources and the GIL on the # backend process. @@ -586,6 +592,7 @@ async def _process_request( default_guided_backend=(self.decoding_config.guided_decoding_backend if self.decoding_config else DecodingConfig.guided_decoding_backend), + model_config=self.model_config ) # 1) Create output queue for this requests. diff --git a/vllm/engine/output_processor/multi_step.py b/vllm/engine/output_processor/multi_step.py index 7a6ebb430541f..a9b638ed02a1e 100644 --- a/vllm/engine/output_processor/multi_step.py +++ b/vllm/engine/output_processor/multi_step.py @@ -65,7 +65,7 @@ def process_prompt_logprob(self, seq_group: SequenceGroup, @staticmethod @functools.lru_cache def _log_prompt_logprob_unsupported_warning_once(): - # Reminder: Please update docs/source/serving/compatibility_matrix.rst + # Reminder: Please update docs/source/usage/compatibility_matrix.rst # If the feature combo become valid logger.warning( "Prompt logprob is not supported by multi step workers. " diff --git a/vllm/engine/protocol.py b/vllm/engine/protocol.py index e15395d75c91f..a066836b92708 100644 --- a/vllm/engine/protocol.py +++ b/vllm/engine/protocol.py @@ -11,8 +11,7 @@ from vllm.logger import init_logger from vllm.lora.request import LoRARequest from vllm.model_executor.layers.sampler import SamplerOutput -from vllm.outputs import (CompletionOutput, EmbeddingRequestOutput, - RequestOutput) +from vllm.outputs import CompletionOutput, PoolingRequestOutput, RequestOutput from vllm.pooling_params import PoolingParams from vllm.prompt_adapter.request import PromptAdapterRequest from vllm.sampling_params import BeamSearchParams, SamplingParams @@ -209,8 +208,8 @@ def encode( lora_request: Optional[LoRARequest] = None, trace_headers: Optional[Mapping[str, str]] = None, priority: int = 0, - ) -> AsyncGenerator[EmbeddingRequestOutput, None]: - """Generate outputs for a request from an embedding model.""" + ) -> AsyncGenerator[PoolingRequestOutput, None]: + """Generate outputs for a request from a pooling model.""" ... @abstractmethod diff --git a/vllm/entrypoints/api_server.py b/vllm/entrypoints/api_server.py index ea3c93f733038..95da1c6e7b9bf 100644 --- a/vllm/entrypoints/api_server.py +++ b/vllm/entrypoints/api_server.py @@ -17,11 +17,11 @@ from vllm.engine.arg_utils import AsyncEngineArgs from vllm.engine.async_llm_engine import AsyncLLMEngine from vllm.entrypoints.launcher import serve_http +from vllm.entrypoints.utils import with_cancellation from vllm.logger import init_logger from vllm.sampling_params import SamplingParams from vllm.usage.usage_lib import UsageContext -from vllm.utils import (FlexibleArgumentParser, iterate_with_cancellation, - random_uuid) +from vllm.utils import FlexibleArgumentParser, random_uuid from vllm.version import __version__ as VLLM_VERSION logger = init_logger("vllm.entrypoints.api_server") @@ -47,6 +47,11 @@ async def generate(request: Request) -> Response: - other fields: the sampling parameters (See `SamplingParams` for details). """ request_dict = await request.json() + return await _generate(request_dict, raw_request=request) + + +@with_cancellation +async def _generate(request_dict: dict, raw_request: Request) -> Response: prompt = request_dict.pop("prompt") stream = request_dict.pop("stream", False) sampling_params = SamplingParams(**request_dict) @@ -54,8 +59,6 @@ async def generate(request: Request) -> Response: assert engine is not None results_generator = engine.generate(prompt, sampling_params, request_id) - results_generator = iterate_with_cancellation( - results_generator, is_cancelled=request.is_disconnected) # Streaming case async def stream_results() -> AsyncGenerator[bytes, None]: diff --git a/vllm/entrypoints/chat_utils.py b/vllm/entrypoints/chat_utils.py index c2054dcbfce0e..3df08c740d65b 100644 --- a/vllm/entrypoints/chat_utils.py +++ b/vllm/entrypoints/chat_utils.py @@ -13,7 +13,8 @@ # yapf conflicts with isort for this block # yapf: disable from openai.types.chat import (ChatCompletionAssistantMessageParam, - ChatCompletionContentPartImageParam) + ChatCompletionContentPartImageParam, + ChatCompletionContentPartInputAudioParam) from openai.types.chat import ( ChatCompletionContentPartParam as OpenAIChatCompletionContentPartParam) from openai.types.chat import (ChatCompletionContentPartRefusalParam, @@ -105,6 +106,7 @@ class CustomChatCompletionContentSimpleVideoParam(TypedDict, total=False): ChatCompletionContentPartParam: TypeAlias = Union[ OpenAIChatCompletionContentPartParam, ChatCompletionContentPartAudioParam, + ChatCompletionContentPartInputAudioParam, ChatCompletionContentPartVideoParam, ChatCompletionContentPartRefusalParam, CustomChatCompletionContentSimpleImageParam, CustomChatCompletionContentSimpleAudioParam, @@ -418,7 +420,7 @@ def _placeholder_str(self, modality: ModalityStr, raise TypeError(f"Unknown {modality} model type: {model_type}") elif modality == "audio": if model_type == "ultravox": - return "<|reserved_special_token_0|>" + return "<|audio|>" if model_type == "qwen2_audio": return (f"Audio {current_count}: " f"<|audio_bos|><|AUDIO|><|audio_eos|>") @@ -519,6 +521,10 @@ def parse_image(self, image_url: str) -> None: def parse_audio(self, audio_url: str) -> None: raise NotImplementedError + @abstractmethod + def parse_input_audio(self, input_audio: Dict[str, str]) -> None: + raise NotImplementedError + @abstractmethod def parse_video(self, video_url: str) -> None: raise NotImplementedError @@ -545,6 +551,15 @@ def parse_audio(self, audio_url: str) -> None: placeholder = self._tracker.add("audio", audio) self._add_placeholder(placeholder) + def parse_input_audio(self, input_audio: Dict[str, str]) -> None: + input_audio_data = input_audio.get("data","") + input_audio_format = input_audio.get("format","") + audio_url = f"data:audio/{input_audio_format};base64,{input_audio_data}" + audio = get_and_parse_audio(audio_url) + + placeholder = self._tracker.add("audio", audio) + self._add_placeholder(placeholder) + def parse_video(self, video_url: str) -> None: video = get_and_parse_video(video_url) @@ -574,6 +589,15 @@ def parse_audio(self, audio_url: str) -> None: placeholder = self._tracker.add("audio", audio_coro) self._add_placeholder(placeholder) + def parse_input_audio(self, input_audio: Dict[str, str]) -> None: + input_audio_data = input_audio.get("data","") + input_audio_format = input_audio.get("format","") + audio_url = f"data:audio/{input_audio_format};base64,{input_audio_data}" + audio_coro = async_get_and_parse_audio(audio_url) + + placeholder = self._tracker.add("audio", audio_coro) + self._add_placeholder(placeholder) + def parse_video(self, video_url: str) -> None: video = async_get_and_parse_video(video_url) @@ -667,17 +691,22 @@ def _get_full_multimodal_text_prompt(placeholder_counts: Dict[str, int], _TextParser = partial(cast, ChatCompletionContentPartTextParam) _ImageParser = partial(cast, ChatCompletionContentPartImageParam) _AudioParser = partial(cast, ChatCompletionContentPartAudioParam) +_InputAudioParser = partial(cast, ChatCompletionContentPartInputAudioParam) _RefusalParser = partial(cast, ChatCompletionContentPartRefusalParam) _VideoParser = partial(cast, ChatCompletionContentPartVideoParam) # Define a mapping from part types to their corresponding parsing functions. -MM_PARSER_MAP: Dict[str, Callable[[ChatCompletionContentPartParam], str]] = { +MM_PARSER_MAP: Dict[str, + Callable[[ChatCompletionContentPartParam], + Union[str, Dict[str,str]]]] = { "text": lambda part: _TextParser(part).get("text", ""), "image_url": lambda part: _ImageParser(part).get("image_url", {}).get("url", ""), "audio_url": lambda part: _AudioParser(part).get("audio_url", {}).get("url", ""), + "input_audio": + lambda part: _InputAudioParser(part).get("input_audio", {}), "refusal": lambda part: _RefusalParser(part).get("refusal", ""), "video_url": @@ -686,7 +715,8 @@ def _get_full_multimodal_text_prompt(placeholder_counts: Dict[str, int], def _parse_chat_message_content_mm_part( - part: ChatCompletionContentPartParam) -> Tuple[str, str]: + part: ChatCompletionContentPartParam) -> Tuple[str, + Union[str, Dict[str, str]]]: """ Parses a given multi-modal content part based on its type. @@ -717,6 +747,7 @@ def _parse_chat_message_content_mm_part( return part_type, content # Handle missing 'type' but provided direct URL fields. + # 'type' is required field by pydantic if part_type is None: if part.get("image_url") is not None: image_params = cast(CustomChatCompletionContentSimpleImageParam, @@ -726,6 +757,9 @@ def _parse_chat_message_content_mm_part( audio_params = cast(CustomChatCompletionContentSimpleAudioParam, part) return "audio_url", audio_params.get("audio_url", "") + if part.get("input_audio") is not None: + input_audio_params = cast(Dict[str, str], part) + return "input_audio", input_audio_params if part.get("video_url") is not None: video_params = cast(CustomChatCompletionContentSimpleVideoParam, part) @@ -739,7 +773,7 @@ def _parse_chat_message_content_mm_part( VALID_MESSAGE_CONTENT_MM_PART_TYPES = ("text", "refusal", "image_url", - "audio_url", "video_url") + "audio_url", "input_audio", "video_url") def _parse_chat_message_content_parts( @@ -795,7 +829,7 @@ def _parse_chat_message_content_part( # Handle structured dictionary parts part_type, content = _parse_chat_message_content_mm_part(part) - # if part_type is text/refusal/image_url/audio_url/video_url but + # if part_type is text/refusal/image_url/audio_url/video_url/input_audio but # content is empty, log a warning and skip if part_type in VALID_MESSAGE_CONTENT_MM_PART_TYPES and not content: logger.warning( @@ -804,18 +838,30 @@ def _parse_chat_message_content_part( return None if part_type in ("text", "refusal"): - return {'type': 'text', 'text': content} if wrap_dicts else content + str_content = cast(str, content) + if wrap_dicts: + return {'type': 'text', 'text': str_content} + else: + return str_content if part_type == "image_url": - mm_parser.parse_image(content) + str_content = cast(str, content) + mm_parser.parse_image(str_content) return {'type': 'image'} if wrap_dicts else None if part_type == "audio_url": - mm_parser.parse_audio(content) + str_content = cast(str, content) + mm_parser.parse_audio(str_content) + return {'type': 'audio'} if wrap_dicts else None + + if part_type == "input_audio": + dict_content = cast(Dict[str, str], content) + mm_parser.parse_input_audio(dict_content) return {'type': 'audio'} if wrap_dicts else None if part_type == "video_url": - mm_parser.parse_video(content) + str_content = cast(str, content) + mm_parser.parse_video(str_content) return {'type': 'video'} if wrap_dicts else None raise NotImplementedError(f"Unknown part type: {part_type}") @@ -840,7 +886,6 @@ def _parse_chat_message_content( content = [ ChatCompletionContentPartTextParam(type="text", text=content) ] - result = _parse_chat_message_content_parts( role, content, # type: ignore diff --git a/vllm/entrypoints/llm.py b/vllm/entrypoints/llm.py index 1551a9a998160..58ab892676b9a 100644 --- a/vllm/entrypoints/llm.py +++ b/vllm/entrypoints/llm.py @@ -1,11 +1,11 @@ import itertools -import json import warnings from contextlib import contextmanager from typing import (Any, ClassVar, Dict, List, Optional, Sequence, Tuple, Type, Union, cast, overload) from tqdm import tqdm +from typing_extensions import deprecated from vllm import envs from vllm.beam_search import (BeamSearchInstance, BeamSearchOutput, @@ -26,7 +26,9 @@ from vllm.lora.request import LoRARequest from vllm.model_executor.guided_decoding.guided_fields import ( GuidedDecodingRequest, LLMGuidedOptions) -from vllm.outputs import EmbeddingRequestOutput, RequestOutput +from vllm.outputs import (ClassificationRequestOutput, EmbeddingRequestOutput, + PoolingRequestOutput, RequestOutput, + ScoringRequestOutput) from vllm.pooling_params import PoolingParams from vllm.prompt_adapter.request import PromptAdapterRequest from vllm.sampling_params import (BeamSearchParams, GuidedDecodingParams, @@ -120,7 +122,7 @@ class LLM: serving, use the :class:`~vllm.AsyncLLMEngine` class instead. """ - DEPRECATE_LEGACY: ClassVar[bool] = False + DEPRECATE_LEGACY: ClassVar[bool] = True """A flag to toggle whether to deprecate the legacy generate/encode API.""" DEPRECATE_INIT_POSARGS: ClassVar[bool] = True @@ -185,12 +187,9 @@ def __init__( kwargs["disable_log_stats"] = True if compilation_config is not None: - if isinstance(compilation_config, (int)): + if isinstance(compilation_config, (int, dict)): compilation_config_instance = CompilationConfig.from_cli( str(compilation_config)) - elif isinstance(compilation_config, (dict)): - compilation_config_instance = CompilationConfig.from_cli( - json.dumps(compilation_config)) else: compilation_config_instance = compilation_config else: @@ -233,6 +232,10 @@ def __init__( self.request_counter = Counter() + def __del__(self): + if self.llm_engine and hasattr(self.llm_engine, "shutdown"): + self.llm_engine.shutdown() + @staticmethod def get_engine_class() -> Type[LLMEngine]: if envs.VLLM_USE_V1: @@ -255,7 +258,24 @@ def set_tokenizer(self, tokenizer: AnyTokenizer) -> None: else: tokenizer_group.tokenizer = get_cached_tokenizer(tokenizer) + @overload + def generate( + self, + prompts: Union[PromptType, Sequence[PromptType]], + /, + sampling_params: Optional[Union[SamplingParams, + Sequence[SamplingParams]]] = None, + *, + use_tqdm: bool = True, + lora_request: Optional[Union[List[LoRARequest], LoRARequest]] = None, + prompt_adapter_request: Optional[PromptAdapterRequest] = None, + guided_options_request: Optional[Union[LLMGuidedOptions, + GuidedDecodingRequest]] = None, + ) -> List[RequestOutput]: + ... + @overload # LEGACY: single (prompt + optional token ids) + @deprecated("'prompt_token_ids' will become part of 'prompts'") def generate( self, prompts: str, @@ -264,10 +284,14 @@ def generate( prompt_token_ids: Optional[List[int]] = None, use_tqdm: bool = True, lora_request: Optional[Union[List[LoRARequest], LoRARequest]] = None, + prompt_adapter_request: Optional[PromptAdapterRequest] = None, + guided_options_request: Optional[Union[LLMGuidedOptions, + GuidedDecodingRequest]] = None, ) -> List[RequestOutput]: ... @overload # LEGACY: multi (prompt + optional token ids) + @deprecated("'prompt_token_ids' will become part of 'prompts'") def generate( self, prompts: List[str], @@ -276,10 +300,14 @@ def generate( prompt_token_ids: Optional[List[List[int]]] = None, use_tqdm: bool = True, lora_request: Optional[Union[List[LoRARequest], LoRARequest]] = None, + prompt_adapter_request: Optional[PromptAdapterRequest] = None, + guided_options_request: Optional[Union[LLMGuidedOptions, + GuidedDecodingRequest]] = None, ) -> List[RequestOutput]: ... @overload # LEGACY: single (token ids + optional prompt) + @deprecated("'prompt_token_ids' will become part of 'prompts'") def generate( self, prompts: Optional[str] = None, @@ -289,10 +317,14 @@ def generate( prompt_token_ids: List[int], use_tqdm: bool = True, lora_request: Optional[Union[List[LoRARequest], LoRARequest]] = None, + prompt_adapter_request: Optional[PromptAdapterRequest] = None, + guided_options_request: Optional[Union[LLMGuidedOptions, + GuidedDecodingRequest]] = None, ) -> List[RequestOutput]: ... @overload # LEGACY: multi (token ids + optional prompt) + @deprecated("'prompt_token_ids' will become part of 'prompts'") def generate( self, prompts: Optional[List[str]] = None, @@ -302,10 +334,14 @@ def generate( prompt_token_ids: List[List[int]], use_tqdm: bool = True, lora_request: Optional[Union[List[LoRARequest], LoRARequest]] = None, + prompt_adapter_request: Optional[PromptAdapterRequest] = None, + guided_options_request: Optional[Union[LLMGuidedOptions, + GuidedDecodingRequest]] = None, ) -> List[RequestOutput]: ... @overload # LEGACY: single or multi token ids [pos-only] + @deprecated("'prompt_token_ids' will become part of 'prompts'") def generate( self, prompts: None, @@ -313,19 +349,9 @@ def generate( prompt_token_ids: Union[List[int], List[List[int]]], use_tqdm: bool = True, lora_request: Optional[Union[List[LoRARequest], LoRARequest]] = None, - ) -> List[RequestOutput]: - ... - - @overload - def generate( - self, - prompts: Union[PromptType, Sequence[PromptType]], - /, - *, - sampling_params: Optional[Union[SamplingParams, - Sequence[SamplingParams]]] = None, - use_tqdm: bool = True, - lora_request: Optional[Union[List[LoRARequest], LoRARequest]] = None, + prompt_adapter_request: Optional[PromptAdapterRequest] = None, + guided_options_request: Optional[Union[LLMGuidedOptions, + GuidedDecodingRequest]] = None, ) -> List[RequestOutput]: ... @@ -379,19 +405,20 @@ def generate( considered legacy and may be deprecated in the future. You should instead pass them via the ``inputs`` parameter. """ - task = self.llm_engine.model_config.task - if task != "generate": + runner_type = self.llm_engine.model_config.runner_type + if runner_type != "generate": messages = [ "LLM.generate() is only supported for (conditional) generation " "models (XForCausalLM, XForConditionalGeneration).", ] - supported_tasks = self.llm_engine.model_config.supported_tasks - if "generate" in supported_tasks: + supported_runner_types = self.llm_engine.model_config \ + .supported_runner_types + if "generate" in supported_runner_types: messages.append( - "Your model supports the 'generate' task, but is " - f"currently initialized for the '{task}' task. Please " - "initialize the model using `--task generate`.") + "Your model supports the 'generate' runner, but is " + f"currently initialized for the '{runner_type}' runner. " + "Please initialize vLLM using `--task generate`.") raise ValueError(" ".join(messages)) @@ -670,7 +697,22 @@ def chat( lora_request=lora_request, ) + @overload + def encode( + self, + prompts: Union[PromptType, Sequence[PromptType]], + /, + pooling_params: Optional[Union[PoolingParams, + Sequence[PoolingParams]]] = None, + *, + use_tqdm: bool = True, + lora_request: Optional[Union[List[LoRARequest], LoRARequest]] = None, + prompt_adapter_request: Optional[PromptAdapterRequest] = None, + ) -> List[PoolingRequestOutput]: + ... + @overload # LEGACY: single (prompt + optional token ids) + @deprecated("'prompt_token_ids' will become part of 'prompts'") def encode( self, prompts: str, @@ -679,10 +721,12 @@ def encode( prompt_token_ids: Optional[List[int]] = None, use_tqdm: bool = True, lora_request: Optional[Union[List[LoRARequest], LoRARequest]] = None, - ) -> List[EmbeddingRequestOutput]: + prompt_adapter_request: Optional[PromptAdapterRequest] = None, + ) -> List[PoolingRequestOutput]: ... @overload # LEGACY: multi (prompt + optional token ids) + @deprecated("'prompt_token_ids' will become part of 'prompts'") def encode( self, prompts: List[str], @@ -691,10 +735,12 @@ def encode( prompt_token_ids: Optional[List[List[int]]] = None, use_tqdm: bool = True, lora_request: Optional[Union[List[LoRARequest], LoRARequest]] = None, - ) -> List[EmbeddingRequestOutput]: + prompt_adapter_request: Optional[PromptAdapterRequest] = None, + ) -> List[PoolingRequestOutput]: ... @overload # LEGACY: single (token ids + optional prompt) + @deprecated("'prompt_token_ids' will become part of 'prompts'") def encode( self, prompts: Optional[str] = None, @@ -704,10 +750,12 @@ def encode( prompt_token_ids: List[int], use_tqdm: bool = True, lora_request: Optional[Union[List[LoRARequest], LoRARequest]] = None, - ) -> List[EmbeddingRequestOutput]: + prompt_adapter_request: Optional[PromptAdapterRequest] = None, + ) -> List[PoolingRequestOutput]: ... @overload # LEGACY: multi (token ids + optional prompt) + @deprecated("'prompt_token_ids' will become part of 'prompts'") def encode( self, prompts: Optional[List[str]] = None, @@ -717,10 +765,12 @@ def encode( prompt_token_ids: List[List[int]], use_tqdm: bool = True, lora_request: Optional[Union[List[LoRARequest], LoRARequest]] = None, - ) -> List[EmbeddingRequestOutput]: + prompt_adapter_request: Optional[PromptAdapterRequest] = None, + ) -> List[PoolingRequestOutput]: ... @overload # LEGACY: single or multi token ids [pos-only] + @deprecated("'prompt_token_ids' will become part of 'prompts'") def encode( self, prompts: None, @@ -728,20 +778,8 @@ def encode( prompt_token_ids: Union[List[int], List[List[int]]], use_tqdm: bool = True, lora_request: Optional[Union[List[LoRARequest], LoRARequest]] = None, - ) -> List[EmbeddingRequestOutput]: - ... - - @overload - def encode( - self, - prompts: Union[PromptType, Sequence[PromptType]], - /, - *, - pooling_params: Optional[Union[PoolingParams, - Sequence[PoolingParams]]] = None, - use_tqdm: bool = True, - lora_request: Optional[Union[List[LoRARequest], LoRARequest]] = None, - ) -> List[EmbeddingRequestOutput]: + prompt_adapter_request: Optional[PromptAdapterRequest] = None, + ) -> List[PoolingRequestOutput]: ... @deprecate_kwargs( @@ -759,8 +797,9 @@ def encode( use_tqdm: bool = True, lora_request: Optional[Union[List[LoRARequest], LoRARequest]] = None, prompt_adapter_request: Optional[PromptAdapterRequest] = None, - ) -> List[EmbeddingRequestOutput]: - """Generates the completions for the input prompts. + ) -> List[PoolingRequestOutput]: + """Apply pooling to the hidden states corresponding to the input + prompts. This class automatically batches the given prompts, considering the memory constraint. For the best performance, put all of your prompts @@ -778,24 +817,26 @@ def encode( generation, if any. Returns: - A list of ``EmbeddingRequestOutput`` objects containing the - generated embeddings in the same order as the input prompts. + A list of ``PoolingRequestOutput`` objects containing the + pooled hidden states in the same order as the input prompts. Note: Using ``prompts`` and ``prompt_token_ids`` as keyword parameters is considered legacy and may be deprecated in the future. You should instead pass them via the ``inputs`` parameter. """ - task = self.llm_engine.model_config.task - if task != "embedding": - messages = ["LLM.encode() is only supported for embedding models."] + runner_type = self.llm_engine.model_config.runner_type + if runner_type != "pooling": + messages = ["LLM.encode() is only supported for pooling models."] - supported_tasks = self.llm_engine.model_config.supported_tasks - if "embedding" in supported_tasks: + supported_runner_types = self.llm_engine.model_config \ + .supported_runner_types + if "pooling" in supported_runner_types: messages.append( - "Your model supports the 'embedding' task, but is " - f"currently initialized for the '{task}' task. Please " - "initialize the model using `--task embedding`.") + "Your model supports the 'pooling' runner, but is " + f"currently initialized for the '{runner_type}' runner. " + "Please initialize vLLM using `--task embed`, " + "`--task classify`, `--task score` etc.") raise ValueError(" ".join(messages)) @@ -821,30 +862,112 @@ def encode( outputs = self._run_engine(use_tqdm=use_tqdm) return self.engine_class.validate_outputs(outputs, - EmbeddingRequestOutput) + PoolingRequestOutput) + + def embed( + self, + prompts: Union[PromptType, Sequence[PromptType]], + /, + *, + use_tqdm: bool = True, + lora_request: Optional[Union[List[LoRARequest], LoRARequest]] = None, + prompt_adapter_request: Optional[PromptAdapterRequest] = None, + ) -> List[EmbeddingRequestOutput]: + """ + Generate an embedding vector for each prompt. + + This class automatically batches the given prompts, considering + the memory constraint. For the best performance, put all of your prompts + into a single list and pass it to this method. + + Args: + prompts: The prompts to the LLM. You may pass a sequence of prompts + for batch inference. See :class:`~vllm.inputs.PromptType` + for more details about the format of each prompts. + use_tqdm: Whether to use tqdm to display the progress bar. + lora_request: LoRA request to use for generation, if any. + prompt_adapter_request: Prompt Adapter request to use for + generation, if any. + + Returns: + A list of ``EmbeddingRequestOutput`` objects containing the + embedding vectors in the same order as the input prompts. + """ + if self.llm_engine.model_config.task != "embed": + raise ValueError( + "Embedding API is only enabled for `--task embed`") + + items = self.encode(prompts, + use_tqdm=use_tqdm, + lora_request=lora_request, + prompt_adapter_request=prompt_adapter_request) + + return [EmbeddingRequestOutput.from_base(item) for item in items] + + def classify( + self, + prompts: Union[PromptType, Sequence[PromptType]], + /, + *, + use_tqdm: bool = True, + lora_request: Optional[Union[List[LoRARequest], LoRARequest]] = None, + prompt_adapter_request: Optional[PromptAdapterRequest] = None, + ) -> List[ClassificationRequestOutput]: + """ + Generate class logits for each prompt. + + This class automatically batches the given prompts, considering + the memory constraint. For the best performance, put all of your prompts + into a single list and pass it to this method. + + Args: + prompts: The prompts to the LLM. You may pass a sequence of prompts + for batch inference. See :class:`~vllm.inputs.PromptType` + for more details about the format of each prompts. + use_tqdm: Whether to use tqdm to display the progress bar. + lora_request: LoRA request to use for generation, if any. + prompt_adapter_request: Prompt Adapter request to use for + generation, if any. + + Returns: + A list of ``ClassificationRequestOutput`` objects containing the + embedding vectors in the same order as the input prompts. + """ + if self.llm_engine.model_config.task != "classify": + raise ValueError( + "Classification API is only enabled for `--task classify`") + + items = self.encode(prompts, + use_tqdm=use_tqdm, + lora_request=lora_request, + prompt_adapter_request=prompt_adapter_request) + + return [ClassificationRequestOutput.from_base(item) for item in items] def score( self, text_1: Union[SingletonPrompt, Sequence[SingletonPrompt]], text_2: Union[SingletonPrompt, Sequence[SingletonPrompt]], /, + *, truncate_prompt_tokens: Optional[int] = None, use_tqdm: bool = True, lora_request: Optional[Union[List[LoRARequest], LoRARequest]] = None, prompt_adapter_request: Optional[PromptAdapterRequest] = None, - ) -> List[EmbeddingRequestOutput]: - """Generates similarity scores for all pairs . + ) -> List[ScoringRequestOutput]: + """Generate similarity scores for all pairs ````. - The inputs can be 1 -> 1, 1 -> N or N -> N. In the 1 - N case - the text_1 sentence will be replicated N times to pair with the text_2 - sentences. The input pairs are used to build a list of prompts for the + The inputs can be ``1 -> 1``, ``1 -> N`` or ``N -> N``. + In the ``1 - N`` case the ``text_1`` sentence will be replicated ``N`` + times to pair with the ``text_2`` sentences. + The input pairs are used to build a list of prompts for the cross encoder model. This class automatically batches the prompts, considering the memory constraint. For the best performance, put all of your texts into a single list and pass it to this method. Args: text_1: can be a single prompt or a list of prompts, in which - case it has to have the same length as the text_2 list + case it has to have the same length as the ``text_2`` list text_2: The texts to pair with the query to form the input to the LLM. See :class:`~vllm.inputs.PromptType` for more details about the format of each prompts. @@ -854,24 +977,28 @@ def score( generation, if any. Returns: - A list of ``EmbeddingRequestOutput`` objects containing the + A list of ``ScoringRequestOutput`` objects containing the generated scores in the same order as the input prompts. """ - task = self.llm_engine.model_config.task - if task != "embedding": - messages = ["LLM.score() is only supported for embedding models."] + runner_type = self.llm_engine.model_config.runner_type + if runner_type != "pooling": + messages = ["LLM.score() is only supported for pooling models."] - supported_tasks = self.llm_engine.model_config.supported_tasks - if "embedding" in supported_tasks: + supported_runner_types = self.llm_engine.model_config \ + .supported_runner_types + if "pooling" in supported_runner_types: messages.append( - "Your model supports the 'embedding' task, but is " - f"currently initialized for the '{task}' task. Please " - "initialize the model using `--task embedding`.") + "Your model supports the 'pooling' runner, but is " + f"currently initialized for the '{runner_type}' runner. " + "Please initialize vLLM using `--task embed`, " + "`--task classify`, `--task score` etc.") raise ValueError(" ".join(messages)) if not self.llm_engine.model_config.is_cross_encoder: - raise ValueError("Your model does not support the cross encoding") + raise ValueError("Your model does not support cross encoding") + if self.llm_engine.model_config.task != "score": + raise ValueError("Score API is only enabled for `--task score`") tokenizer = self.llm_engine.get_tokenizer() @@ -942,8 +1069,10 @@ def ensure_str(prompt: SingletonPrompt): ) outputs = self._run_engine(use_tqdm=use_tqdm) - return self.engine_class.validate_outputs(outputs, - EmbeddingRequestOutput) + items = self.engine_class.validate_outputs(outputs, + PoolingRequestOutput) + + return [ScoringRequestOutput.from_base(item) for item in items] def start_profile(self) -> None: self.llm_engine.start_profile() @@ -1085,7 +1214,7 @@ def _add_guided_params( def _run_engine( self, *, use_tqdm: bool - ) -> List[Union[RequestOutput, EmbeddingRequestOutput]]: + ) -> List[Union[RequestOutput, PoolingRequestOutput]]: # Initialize tqdm. if use_tqdm: num_requests = self.llm_engine.get_num_unfinished_requests() @@ -1098,7 +1227,7 @@ def _run_engine( ) # Run the engine. - outputs: List[Union[RequestOutput, EmbeddingRequestOutput]] = [] + outputs: List[Union[RequestOutput, PoolingRequestOutput]] = [] total_in_toks = 0 total_out_toks = 0 while self.llm_engine.has_unfinished_requests(): diff --git a/vllm/entrypoints/openai/api_server.py b/vllm/entrypoints/openai/api_server.py index 6bc31ef83ded4..00e2d1a56f160 100644 --- a/vllm/entrypoints/openai/api_server.py +++ b/vllm/entrypoints/openai/api_server.py @@ -1,4 +1,5 @@ import asyncio +import atexit import importlib import inspect import multiprocessing @@ -58,6 +59,7 @@ from vllm.entrypoints.openai.serving_tokenization import ( OpenAIServingTokenization) from vllm.entrypoints.openai.tool_parsers import ToolParserManager +from vllm.entrypoints.utils import with_cancellation from vllm.logger import init_logger from vllm.usage.usage_lib import UsageContext from vllm.utils import (FlexibleArgumentParser, get_open_zmq_ipc_path, @@ -175,8 +177,8 @@ async def build_async_engine_client_from_engine_args( # Select random path for IPC. ipc_path = get_open_zmq_ipc_path() - logger.info("Multiprocessing frontend to use %s for IPC Path.", - ipc_path) + logger.debug("Multiprocessing frontend to use %s for IPC Path.", + ipc_path) # Start RPCServer in separate process (holds the LLMEngine). # the current process might have CUDA context, @@ -196,6 +198,14 @@ async def build_async_engine_client_from_engine_args( assert engine_pid is not None, "Engine process failed to start." logger.info("Started engine process with PID %d", engine_pid) + def _cleanup_ipc_path(): + socket_path = ipc_path.replace("ipc://", "") + if os.path.exists(socket_path): + os.remove(socket_path) + + # Ensure we clean up the local IPC socket file on exit. + atexit.register(_cleanup_ipc_path) + # Build RPCClient, which conforms to EngineClient Protocol. engine_config = engine_args.create_engine_config() build_client = partial(MQLLMEngineClient, ipc_path, engine_config, @@ -249,8 +259,8 @@ def mount_metrics(app: FastAPI): prometheus_multiproc_dir_path = os.getenv("PROMETHEUS_MULTIPROC_DIR", None) if prometheus_multiproc_dir_path is not None: - logger.info("vLLM to use %s as PROMETHEUS_MULTIPROC_DIR", - prometheus_multiproc_dir_path) + logger.debug("vLLM to use %s as PROMETHEUS_MULTIPROC_DIR", + prometheus_multiproc_dir_path) registry = CollectorRegistry() multiprocess.MultiProcessCollector(registry) @@ -302,10 +312,11 @@ async def health(raw_request: Request) -> Response: @router.post("/tokenize") +@with_cancellation async def tokenize(request: TokenizeRequest, raw_request: Request): handler = tokenization(raw_request) - generator = await handler.create_tokenize(request) + generator = await handler.create_tokenize(request, raw_request) if isinstance(generator, ErrorResponse): return JSONResponse(content=generator.model_dump(), status_code=generator.code) @@ -316,10 +327,11 @@ async def tokenize(request: TokenizeRequest, raw_request: Request): @router.post("/detokenize") +@with_cancellation async def detokenize(request: DetokenizeRequest, raw_request: Request): handler = tokenization(raw_request) - generator = await handler.create_detokenize(request) + generator = await handler.create_detokenize(request, raw_request) if isinstance(generator, ErrorResponse): return JSONResponse(content=generator.model_dump(), status_code=generator.code) @@ -344,6 +356,7 @@ async def show_version(): @router.post("/v1/chat/completions") +@with_cancellation async def create_chat_completion(request: ChatCompletionRequest, raw_request: Request): handler = chat(raw_request) @@ -364,6 +377,7 @@ async def create_chat_completion(request: ChatCompletionRequest, @router.post("/v1/completions") +@with_cancellation async def create_completion(request: CompletionRequest, raw_request: Request): handler = completion(raw_request) if handler is None: @@ -381,6 +395,7 @@ async def create_completion(request: CompletionRequest, raw_request: Request): @router.post("/v1/embeddings") +@with_cancellation async def create_embedding(request: EmbeddingRequest, raw_request: Request): handler = embedding(raw_request) if handler is None: @@ -397,7 +412,8 @@ async def create_embedding(request: EmbeddingRequest, raw_request: Request): assert_never(generator) -@router.post("/v1/score") +@router.post("/score") +@with_cancellation async def create_score(request: ScoreRequest, raw_request: Request): handler = score(raw_request) if handler is None: @@ -414,6 +430,16 @@ async def create_score(request: ScoreRequest, raw_request: Request): assert_never(generator) +@router.post("/v1/score") +@with_cancellation +async def create_score_v1(request: ScoreRequest, raw_request: Request): + logger.warning( + "To indicate that Score API is not part of standard OpenAI API, we " + "have moved it to `/score`. Please update your client accordingly.") + + return await create_score(request, raw_request) + + if envs.VLLM_TORCH_PROFILER_DIR: logger.warning( "Torch Profiler is enabled in the API server. This should ONLY be " @@ -573,7 +599,7 @@ def init_app_state( enable_auto_tools=args.enable_auto_tool_choice, tool_parser=args.tool_call_parser, enable_prompt_tokens_details=args.enable_prompt_tokens_details, - ) if model_config.task == "generate" else None + ) if model_config.runner_type == "generate" else None state.openai_serving_completion = OpenAIServingCompletion( engine_client, model_config, @@ -582,7 +608,7 @@ def init_app_state( prompt_adapters=args.prompt_adapters, request_logger=request_logger, return_tokens_as_token_ids=args.return_tokens_as_token_ids, - ) if model_config.task == "generate" else None + ) if model_config.runner_type == "generate" else None state.openai_serving_embedding = OpenAIServingEmbedding( engine_client, model_config, @@ -590,13 +616,13 @@ def init_app_state( request_logger=request_logger, chat_template=resolved_chat_template, chat_template_content_format=args.chat_template_content_format, - ) if model_config.task == "embedding" else None + ) if model_config.runner_type == "pooling" else None state.openai_serving_scores = OpenAIServingScores( engine_client, model_config, base_model_paths, request_logger=request_logger - ) if (model_config.task == "embedding" \ + ) if (model_config.runner_type == "pooling" \ and model_config.is_cross_encoder) else None state.openai_serving_tokenization = OpenAIServingTokenization( engine_client, diff --git a/vllm/entrypoints/openai/logits_processors.py b/vllm/entrypoints/openai/logits_processors.py index 7913f8720ca73..c8132811de903 100644 --- a/vllm/entrypoints/openai/logits_processors.py +++ b/vllm/entrypoints/openai/logits_processors.py @@ -71,7 +71,7 @@ def get_logits_processors( # Check if token_id is within the vocab size for token_id, bias in clamped_logit_bias.items(): - if token_id < 0 or token_id >= tokenizer.vocab_size: + if token_id < 0 or token_id >= len(tokenizer): raise ValueError(f"token_id {token_id} in logit_bias contains " "out-of-vocab token id") @@ -81,6 +81,6 @@ def get_logits_processors( if allowed_token_ids is not None: logits_processors.append( _get_allowed_token_ids_logits_processor( - frozenset(allowed_token_ids), tokenizer.vocab_size)) + frozenset(allowed_token_ids), len(tokenizer))) return logits_processors diff --git a/vllm/entrypoints/openai/protocol.py b/vllm/entrypoints/openai/protocol.py index ee94a9413f098..5a70e0952666b 100644 --- a/vllm/entrypoints/openai/protocol.py +++ b/vllm/entrypoints/openai/protocol.py @@ -1,5 +1,6 @@ # Adapted from # https://github.com/lm-sys/FastChat/blob/168ccc29d3f7edc50823016105c024fe2282732a/fastchat/protocol/openai_api_protocol.py +import re import time from argparse import Namespace from typing import Any, Dict, List, Literal, Optional, Union @@ -14,7 +15,7 @@ from vllm.sampling_params import (BeamSearchParams, GuidedDecodingParams, RequestOutputKind, SamplingParams) from vllm.sequence import Logprob -from vllm.utils import random_uuid +from vllm.utils import random_uuid, resolve_obj_by_qualname logger = init_logger(__name__) @@ -148,6 +149,46 @@ class ChatCompletionNamedToolChoiceParam(OpenAIBaseModel): type: Literal["function"] = "function" +class LogitsProcessorConstructor(BaseModel): + qualname: str + args: Optional[List[Any]] = None + kwargs: Optional[Dict[str, Any]] = None + + +LogitsProcessors = List[Union[str, LogitsProcessorConstructor]] + + +def get_logits_processors(processors: Optional[LogitsProcessors], + pattern: Optional[str]) -> Optional[List[Any]]: + if processors and pattern: + logits_processors = [] + for processor in processors: + qualname = processor if isinstance(processor, + str) else processor.qualname + if not re.match(pattern, qualname): + raise ValueError( + f"Logits processor '{qualname}' is not allowed by this " + "server. See --logits-processor-pattern engine argument " + "for more information.") + try: + logits_processor = resolve_obj_by_qualname(qualname) + except Exception as e: + raise ValueError( + f"Logits processor '{qualname}' could not be resolved: {e}" + ) from e + if isinstance(processor, LogitsProcessorConstructor): + logits_processor = logits_processor(*processor.args or [], + **processor.kwargs or {}) + logits_processors.append(logits_processor) + return logits_processors + elif processors: + raise ValueError( + "The `logits_processors` argument is not supported by this " + "server. See --logits-processor-pattern engine argugment " + "for more information.") + return None + + class ChatCompletionRequest(OpenAIBaseModel): # Ordered by official OpenAI API documentation # https://platform.openai.com/docs/api-reference/chat/create @@ -170,7 +211,7 @@ class ChatCompletionRequest(OpenAIBaseModel): stop: Optional[Union[str, List[str]]] = Field(default_factory=list) stream: Optional[bool] = False stream_options: Optional[StreamOptions] = None - temperature: Optional[float] = 0.7 + temperature: Optional[float] = 1.0 top_p: Optional[float] = 1.0 tools: Optional[List[ChatCompletionToolsParam]] = None tool_choice: Optional[Union[Literal["none"], Literal["auto"], @@ -293,6 +334,17 @@ class ChatCompletionRequest(OpenAIBaseModel): "The request_id related to this request. If the caller does " "not set it, a random_uuid will be generated. This id is used " "through out the inference process and return in response.")) + logits_processors: Optional[LogitsProcessors] = Field( + default=None, + description=( + "A list of either qualified names of logits processors, or " + "constructor objects, to apply when sampling. A constructor is " + "a JSON object with a required 'qualname' field specifying the " + "qualified name of the processor class/factory, and optional " + "'args' and 'kwargs' fields containing positional and keyword " + "arguments. For example: {'qualname': " + "'my_module.MyLogitsProcessor', 'args': [1, 2], 'kwargs': " + "{'param': 'value'}}.")) # doc: end-chat-completion-extra-params @@ -314,7 +366,9 @@ def to_beam_search_params(self, length_penalty=self.length_penalty, include_stop_str_in_output=self.include_stop_str_in_output) - def to_sampling_params(self, default_max_tokens: int) -> SamplingParams: + def to_sampling_params( + self, default_max_tokens: int, + logits_processor_pattern: Optional[str]) -> SamplingParams: # TODO(#9845): remove max_tokens when field is removed from OpenAI API max_tokens = self.max_completion_tokens or self.max_tokens if max_tokens is None: @@ -333,7 +387,7 @@ def to_sampling_params(self, default_max_tokens: int) -> SamplingParams: assert json_schema is not None self.guided_json = json_schema.json_schema if self.guided_decoding_backend is None: - self.guided_decoding_backend = "lm-format-enforcer" + self.guided_decoding_backend = "xgrammar" guided_decoding = GuidedDecodingParams.from_optional( json=self._get_guided_json_from_tool() or self.guided_json, @@ -364,6 +418,8 @@ def to_sampling_params(self, default_max_tokens: int) -> SamplingParams: min_tokens=self.min_tokens, skip_special_tokens=self.skip_special_tokens, spaces_between_special_tokens=self.spaces_between_special_tokens, + logits_processors=get_logits_processors(self.logits_processors, + logits_processor_pattern), include_stop_str_in_output=self.include_stop_str_in_output, truncate_prompt_tokens=self.truncate_prompt_tokens, output_kind=RequestOutputKind.DELTA if self.stream \ @@ -599,6 +655,17 @@ class CompletionRequest(OpenAIBaseModel): "The priority of the request (lower means earlier handling; " "default: 0). Any priority other than 0 will raise an error " "if the served model does not use priority scheduling.")) + logits_processors: Optional[LogitsProcessors] = Field( + default=None, + description=( + "A list of either qualified names of logits processors, or " + "constructor objects, to apply when sampling. A constructor is " + "a JSON object with a required 'qualname' field specifying the " + "qualified name of the processor class/factory, and optional " + "'args' and 'kwargs' fields containing positional and keyword " + "arguments. For example: {'qualname': " + "'my_module.MyLogitsProcessor', 'args': [1, 2], 'kwargs': " + "{'param': 'value'}}.")) # doc: end-completion-extra-params @@ -619,7 +686,9 @@ def to_beam_search_params(self, length_penalty=self.length_penalty, include_stop_str_in_output=self.include_stop_str_in_output) - def to_sampling_params(self, default_max_tokens: int) -> SamplingParams: + def to_sampling_params( + self, default_max_tokens: int, + logits_processor_pattern: Optional[str]) -> SamplingParams: max_tokens = self.max_tokens if max_tokens is None: max_tokens = default_max_tokens @@ -665,6 +734,8 @@ def to_sampling_params(self, default_max_tokens: int) -> SamplingParams: skip_special_tokens=self.skip_special_tokens, spaces_between_special_tokens=self.spaces_between_special_tokens, include_stop_str_in_output=self.include_stop_str_in_output, + logits_processors=get_logits_processors(self.logits_processors, + logits_processor_pattern), truncate_prompt_tokens=self.truncate_prompt_tokens, output_kind=RequestOutputKind.DELTA if self.stream \ else RequestOutputKind.FINAL_ONLY, @@ -812,10 +883,11 @@ class ScoreRequest(OpenAIBaseModel): text_2: Union[List[str], str] truncate_prompt_tokens: Optional[Annotated[int, Field(ge=1)]] = None - # doc: begin-chat-embedding-pooling-params + # doc: begin-score-pooling-params additional_data: Optional[Any] = None - # doc: end-chat-embedding-pooling-params + # doc: end-score-pooling-params + # doc: begin-score-extra-params priority: int = Field( default=0, description=( @@ -823,6 +895,8 @@ class ScoreRequest(OpenAIBaseModel): "default: 0). Any priority other than 0 will raise an error " "if the served model does not use priority scheduling.")) + # doc: end-score-extra-params + def to_pooling_params(self): return PoolingParams(additional_data=self.additional_data) @@ -900,7 +974,7 @@ class EmbeddingResponse(OpenAIBaseModel): class ScoreResponseData(OpenAIBaseModel): index: int object: str = "score" - score: Union[List[float], str] + score: float class ScoreResponse(OpenAIBaseModel): diff --git a/vllm/entrypoints/openai/run_batch.py b/vllm/entrypoints/openai/run_batch.py index 00cdb3b6839f5..675daf54c0d0d 100644 --- a/vllm/entrypoints/openai/run_batch.py +++ b/vllm/entrypoints/openai/run_batch.py @@ -224,7 +224,7 @@ async def main(args): chat_template=None, chat_template_content_format="auto", enable_prompt_tokens_details=args.enable_prompt_tokens_details, - ) if model_config.task == "generate" else None + ) if model_config.runner_type == "generate" else None openai_serving_embedding = OpenAIServingEmbedding( engine, model_config, @@ -232,7 +232,7 @@ async def main(args): request_logger=request_logger, chat_template=None, chat_template_content_format="auto", - ) if model_config.task == "embedding" else None + ) if model_config.runner_type == "pooling" else None tracker = BatchProgressTracker() logger.info("Reading batch from %s...", args.input_file) diff --git a/vllm/entrypoints/openai/serving_chat.py b/vllm/entrypoints/openai/serving_chat.py index 54ca0463bcab1..81bce0dd370bb 100644 --- a/vllm/entrypoints/openai/serving_chat.py +++ b/vllm/entrypoints/openai/serving_chat.py @@ -32,7 +32,6 @@ from vllm.sequence import Logprob from vllm.transformers_utils.tokenizer import AnyTokenizer, MistralTokenizer from vllm.transformers_utils.tokenizers import maybe_serialize_tool_calls -from vllm.utils import iterate_with_cancellation logger = init_logger(__name__) @@ -123,6 +122,8 @@ async def create_chat_completion( prompt_adapter_request, ) = self._maybe_get_adapters(request) + model_name = self._get_model_name(lora_request) + tokenizer = await self.engine_client.get_tokenizer(lora_request) tool_parser = self.tool_parser @@ -176,7 +177,8 @@ async def create_chat_completion( logger.exception("Error in preprocessing prompt inputs") return self.create_error_response(str(e)) - request_id = f"chatcmpl-{request.request_id}" + request_id = "chatcmpl-" \ + f"{self._base_request_id(raw_request, request.request_id)}" request_metadata = RequestResponseMetadata(request_id=request_id) if raw_request: @@ -194,7 +196,8 @@ async def create_chat_completion( default_max_tokens) else: sampling_params = request.to_sampling_params( - default_max_tokens) + default_max_tokens, + self.model_config.logits_processor_pattern) self._log_inputs(request_id, request_prompts[i], @@ -230,20 +233,16 @@ async def create_chat_completion( assert len(generators) == 1 result_generator, = generators - if raw_request: - result_generator = iterate_with_cancellation( - result_generator, raw_request.is_disconnected) - # Streaming response if request.stream: return self.chat_completion_stream_generator( - request, result_generator, request_id, conversation, tokenizer, - request_metadata) + request, result_generator, request_id, model_name, + conversation, tokenizer, request_metadata) try: return await self.chat_completion_full_generator( - request, result_generator, request_id, conversation, tokenizer, - request_metadata) + request, result_generator, request_id, model_name, + conversation, tokenizer, request_metadata) except ValueError as e: # TODO: Use a vllm-specific Validation Error return self.create_error_response(str(e)) @@ -258,11 +257,11 @@ async def chat_completion_stream_generator( request: ChatCompletionRequest, result_generator: AsyncIterator[RequestOutput], request_id: str, + model_name: str, conversation: List[ConversationMessage], tokenizer: AnyTokenizer, request_metadata: RequestResponseMetadata, ) -> AsyncGenerator[str, None]: - model_name = self.base_model_paths[0].name created_time = int(time.time()) chunk_object_type: Final = "chat.completion.chunk" first_iteration = True @@ -495,21 +494,33 @@ async def chat_completion_stream_generator( if self._should_check_for_unstreamed_tool_arg_tokens( delta_message, output) and tool_parser: + latest_delta_len = 0 + if ((isinstance( + delta_message.tool_calls[0].function, + DeltaFunctionCall)) and isinstance( + delta_message.tool_calls[0].function. + arguments, str)): + latest_delta_len = len( + delta_message.tool_calls[0].function. + arguments) + # get the expected call based on partial JSON # parsing which "autocompletes" the JSON expected_call = json.dumps( tool_parser.prev_tool_call_arr[index].get( - "arguments", {})) + "arguments", {}), + ensure_ascii=False) # get what we've streamed so far for arguments # for the current tool actual_call = tool_parser.streamed_args_for_tool[ index] + if (latest_delta_len > 0): + actual_call = actual_call[:-latest_delta_len] # check to see if there's anything left to stream remaining_call = expected_call.replace( actual_call, "", 1) - # set that as a delta message delta_message = DeltaMessage(tool_calls=[ DeltaToolCall(index=index, @@ -591,12 +602,12 @@ async def chat_completion_full_generator( request: ChatCompletionRequest, result_generator: AsyncIterator[RequestOutput], request_id: str, + model_name: str, conversation: List[ConversationMessage], tokenizer: AnyTokenizer, request_metadata: RequestResponseMetadata, ) -> Union[ErrorResponse, ChatCompletionResponse]: - model_name = self.base_model_paths[0].name created_time = int(time.time()) final_res: Optional[RequestOutput] = None diff --git a/vllm/entrypoints/openai/serving_completion.py b/vllm/entrypoints/openai/serving_completion.py index 936aae8f1c267..5cf9df92e296e 100644 --- a/vllm/entrypoints/openai/serving_completion.py +++ b/vllm/entrypoints/openai/serving_completion.py @@ -30,7 +30,7 @@ from vllm.sampling_params import BeamSearchParams, SamplingParams from vllm.sequence import Logprob from vllm.transformers_utils.tokenizer import AnyTokenizer -from vllm.utils import merge_async_iterators, random_uuid +from vllm.utils import merge_async_iterators logger = init_logger(__name__) @@ -85,8 +85,7 @@ async def create_completion( return self.create_error_response( "suffix is not currently supported") - model_name = self.base_model_paths[0].name - request_id = f"cmpl-{random_uuid()}" + request_id = f"cmpl-{self._base_request_id(raw_request)}" created_time = int(time.time()) request_metadata = RequestResponseMetadata(request_id=request_id) @@ -101,7 +100,7 @@ async def create_completion( tokenizer = await self.engine_client.get_tokenizer(lora_request) - request_prompts, engine_prompts = self._preprocess_completion( + request_prompts, engine_prompts = await self._preprocess_completion( request, tokenizer, request.prompt, @@ -124,7 +123,8 @@ async def create_completion( default_max_tokens) else: sampling_params = request.to_sampling_params( - default_max_tokens) + default_max_tokens, + self.model_config.logits_processor_pattern) request_id_item = f"{request_id}-{i}" @@ -159,9 +159,9 @@ async def create_completion( # TODO: Use a vllm-specific Validation Error return self.create_error_response(str(e)) - result_generator = merge_async_iterators( - *generators, is_cancelled=raw_request.is_disconnected) + result_generator = merge_async_iterators(*generators) + model_name = self._get_model_name(lora_request) num_prompts = len(engine_prompts) # Similar to the OpenAI API, when n != best_of, we do not stream the @@ -392,6 +392,12 @@ def request_output_to_completion_response( prompt_token_ids = final_res.prompt_token_ids assert prompt_token_ids is not None prompt_logprobs = final_res.prompt_logprobs + if prompt_logprobs: + for logprob_dict in prompt_logprobs: + if logprob_dict: + for logprob_values in logprob_dict.values(): + if logprob_values.logprob == float('-inf'): + logprob_values.logprob = -9999.0 prompt_text = final_res.prompt token_ids: GenericSequence[int] diff --git a/vllm/entrypoints/openai/serving_embedding.py b/vllm/entrypoints/openai/serving_embedding.py index c84a7d2d8e13e..879276646d2ba 100644 --- a/vllm/entrypoints/openai/serving_embedding.py +++ b/vllm/entrypoints/openai/serving_embedding.py @@ -18,8 +18,9 @@ ErrorResponse, UsageInfo) from vllm.entrypoints.openai.serving_engine import BaseModelPath, OpenAIServing from vllm.logger import init_logger -from vllm.outputs import EmbeddingOutput, EmbeddingRequestOutput -from vllm.utils import merge_async_iterators, random_uuid +from vllm.outputs import (EmbeddingOutput, EmbeddingRequestOutput, + PoolingRequestOutput) +from vllm.utils import merge_async_iterators logger = init_logger(__name__) @@ -40,14 +41,16 @@ def _get_embedding( def request_output_to_embedding_response( - final_res_batch: List[EmbeddingRequestOutput], request_id: str, + final_res_batch: List[PoolingRequestOutput], request_id: str, created_time: int, model_name: str, encoding_format: Literal["float", "base64"]) -> EmbeddingResponse: data: List[EmbeddingResponseData] = [] num_prompt_tokens = 0 for idx, final_res in enumerate(final_res_batch): + embedding_res = EmbeddingRequestOutput.from_base(final_res) prompt_token_ids = final_res.prompt_token_ids - embedding = _get_embedding(final_res.outputs, encoding_format) + + embedding = _get_embedding(embedding_res.outputs, encoding_format) embedding_data = EmbeddingResponseData(index=idx, embedding=embedding) data.append(embedding_data) @@ -110,7 +113,7 @@ async def create_embedding( "dimensions is currently not supported") model_name = request.model - request_id = f"embd-{random_uuid()}" + request_id = f"embd-{self._base_request_id(raw_request)}" created_time = int(time.monotonic()) truncate_prompt_tokens = None @@ -156,19 +159,20 @@ async def create_embedding( add_special_tokens=request.add_special_tokens, ) else: - request_prompts, engine_prompts = self._preprocess_completion( - request, - tokenizer, - request.input, - truncate_prompt_tokens=truncate_prompt_tokens, - add_special_tokens=request.add_special_tokens, - ) + (request_prompts, + engine_prompts) = await self._preprocess_completion( + request, + tokenizer, + request.input, + truncate_prompt_tokens=truncate_prompt_tokens, + add_special_tokens=request.add_special_tokens, + ) except ValueError as e: logger.exception("Error in preprocessing prompt inputs") return self.create_error_response(str(e)) # Schedule the request and get the result generator. - generators: List[AsyncGenerator[EmbeddingRequestOutput, None]] = [] + generators: List[AsyncGenerator[PoolingRequestOutput, None]] = [] try: pooling_params = request.to_pooling_params() @@ -198,15 +202,12 @@ async def create_embedding( # TODO: Use a vllm-specific Validation Error return self.create_error_response(str(e)) - result_generator = merge_async_iterators( - *generators, - is_cancelled=raw_request.is_disconnected if raw_request else None, - ) + result_generator = merge_async_iterators(*generators) num_prompts = len(engine_prompts) # Non-streaming response - final_res_batch: List[Optional[EmbeddingRequestOutput]] + final_res_batch: List[Optional[PoolingRequestOutput]] final_res_batch = [None] * num_prompts try: async for i, res in result_generator: @@ -214,7 +215,7 @@ async def create_embedding( assert all(final_res is not None for final_res in final_res_batch) - final_res_batch_checked = cast(List[EmbeddingRequestOutput], + final_res_batch_checked = cast(List[PoolingRequestOutput], final_res_batch) response = request_output_to_embedding_response( diff --git a/vllm/entrypoints/openai/serving_engine.py b/vllm/entrypoints/openai/serving_engine.py index cae2877ea7e99..5b6a089e4c319 100644 --- a/vllm/entrypoints/openai/serving_engine.py +++ b/vllm/entrypoints/openai/serving_engine.py @@ -1,10 +1,12 @@ import json import pathlib +from concurrent.futures.thread import ThreadPoolExecutor from dataclasses import dataclass from http import HTTPStatus from typing import (Any, Callable, Dict, Iterable, Iterator, List, Mapping, Optional, Sequence, Tuple, TypedDict, Union) +from fastapi import Request from pydantic import Field from starlette.datastructures import Headers from typing_extensions import Annotated @@ -29,7 +31,7 @@ ErrorResponse, LoadLoraAdapterRequest, ModelCard, ModelList, - ModelPermission, + ModelPermission, ScoreRequest, TokenizeChatRequest, TokenizeCompletionRequest, UnloadLoraAdapterRequest) @@ -46,7 +48,7 @@ from vllm.tracing import (contains_trace_headers, extract_trace_headers, log_tracing_disabled_warning) from vllm.transformers_utils.tokenizer import AnyTokenizer, MistralTokenizer -from vllm.utils import AtomicCounter, is_list_of +from vllm.utils import AtomicCounter, is_list_of, make_async, random_uuid logger = init_logger(__name__) @@ -71,7 +73,7 @@ class LoRAModulePath: CompletionLikeRequest = Union[CompletionRequest, DetokenizeRequest, - EmbeddingCompletionRequest, + EmbeddingCompletionRequest, ScoreRequest, TokenizeCompletionRequest] ChatLikeRequest = Union[ChatCompletionRequest, EmbeddingChatRequest, @@ -140,6 +142,14 @@ def __init__( self.request_logger = request_logger self.return_tokens_as_token_ids = return_tokens_as_token_ids + self._tokenizer_executor = ThreadPoolExecutor(max_workers=1) + + self._tokenize_prompt_input_async = make_async( + self._tokenize_prompt_input, executor=self._tokenizer_executor) + self._tokenize_prompt_input_or_inputs_async = make_async( + self._tokenize_prompt_input_or_inputs, + executor=self._tokenizer_executor) + async def show_available_models(self) -> ModelList: """Show available models. Right now we only have one model.""" model_cards = [ @@ -368,7 +378,7 @@ def _tokenize_prompt_input_or_inputs( input_or_inputs: Union[str, List[str], List[int], List[List[int]]], truncate_prompt_tokens: Optional[Annotated[int, Field(ge=1)]] = None, add_special_tokens: bool = True, - ) -> Iterator[TextTokensPrompt]: + ) -> List[TextTokensPrompt]: """ Tokenize/detokenize depending on the input format. @@ -376,45 +386,41 @@ def _tokenize_prompt_input_or_inputs( , each input can be a string or array of tokens. Note that each request can pass one or more inputs. """ - for prompt_input in parse_and_batch_prompt(input_or_inputs): - # Although our type checking is based on mypy, - # VSCode Pyright extension should still work properly - # "is True" is required for Pyright to perform type narrowing - # See: https://github.com/microsoft/pyright/issues/7672 - if prompt_input["is_tokens"] is False: - yield self._normalize_prompt_text_to_input( - request, - tokenizer, - prompt=prompt_input["content"], - truncate_prompt_tokens=truncate_prompt_tokens, - add_special_tokens=add_special_tokens, - ) - else: - yield self._normalize_prompt_tokens_to_input( - request, - tokenizer, - prompt_ids=prompt_input["content"], - truncate_prompt_tokens=truncate_prompt_tokens, - ) + # Although our type checking is based on mypy, + # VSCode Pyright extension should still work properly + # "is True" is required for Pyright to perform type narrowing + # See: https://github.com/microsoft/pyright/issues/7672 + return [ + self._normalize_prompt_text_to_input( + request, + tokenizer, + prompt=prompt_input["content"], + truncate_prompt_tokens=truncate_prompt_tokens, + add_special_tokens=add_special_tokens) + if prompt_input["is_tokens"] is False else + self._normalize_prompt_tokens_to_input( + request, + tokenizer, + prompt_ids=prompt_input["content"], + truncate_prompt_tokens=truncate_prompt_tokens) + for prompt_input in parse_and_batch_prompt(input_or_inputs) + ] - def _preprocess_completion( + async def _preprocess_completion( self, request: CompletionLikeRequest, tokenizer: AnyTokenizer, input_or_inputs: Union[str, List[str], List[int], List[List[int]]], truncate_prompt_tokens: Optional[Annotated[int, Field(ge=1)]] = None, add_special_tokens: bool = True, - ) -> Tuple[Sequence[TextTokensPrompt], List[TokensPrompt]]: - request_prompts = [ - request_prompt - for request_prompt in self._tokenize_prompt_input_or_inputs( - request, - tokenizer, - input_or_inputs, - truncate_prompt_tokens=truncate_prompt_tokens, - add_special_tokens=add_special_tokens, - ) - ] + ) -> Tuple[List[TextTokensPrompt], List[TokensPrompt]]: + request_prompts = await self._tokenize_prompt_input_or_inputs_async( + request, + tokenizer, + input_or_inputs, + truncate_prompt_tokens=truncate_prompt_tokens, + add_special_tokens=add_special_tokens, + ) engine_prompts = [ TokensPrompt(prompt_token_ids=request_prompt["prompt_token_ids"]) @@ -493,7 +499,7 @@ async def _preprocess_chat( request=request) if isinstance(request_prompt, str): - prompt_inputs = self._tokenize_prompt_input( + prompt_inputs = await self._tokenize_prompt_input_async( request, tokenizer, request_prompt, @@ -560,6 +566,16 @@ async def _get_trace_headers( return None + @staticmethod + def _base_request_id(raw_request: Optional[Request], + default: Optional[str] = None) -> Optional[str]: + """Pulls the request id to use from a header, if provided""" + default = default or random_uuid() + if raw_request is None: + return default + + return raw_request.headers.get("X-Request-Id", default) + @staticmethod def _get_decoded_token(logprob: Logprob, token_id: int, @@ -647,3 +663,16 @@ async def unload_lora_adapter( def _is_model_supported(self, model_name): return any(model.name == model_name for model in self.base_model_paths) + + def _get_model_name(self, lora: Optional[LoRARequest]): + """ + Returns the appropriate model name depending on the availability + and support of the LoRA or base model. + Parameters: + - lora: LoRARequest that contain a base_model_name. + Returns: + - str: The name of the base model or the first available model path. + """ + if lora is not None: + return lora.lora_name + return self.base_model_paths[0].name diff --git a/vllm/entrypoints/openai/serving_score.py b/vllm/entrypoints/openai/serving_score.py index 156fea6f47982..101d170bee4d6 100644 --- a/vllm/entrypoints/openai/serving_score.py +++ b/vllm/entrypoints/openai/serving_score.py @@ -13,24 +13,24 @@ from vllm.entrypoints.openai.serving_engine import BaseModelPath, OpenAIServing from vllm.inputs.data import TokensPrompt from vllm.logger import init_logger -from vllm.outputs import EmbeddingRequestOutput +from vllm.outputs import PoolingRequestOutput, ScoringRequestOutput from vllm.transformers_utils.tokenizers.mistral import MistralTokenizer -from vllm.utils import merge_async_iterators, random_uuid +from vllm.utils import make_async, merge_async_iterators logger = init_logger(__name__) def request_output_to_score_response( - final_res_batch: List[EmbeddingRequestOutput], request_id: str, + final_res_batch: List[PoolingRequestOutput], request_id: str, created_time: int, model_name: str) -> ScoreResponse: data: List[ScoreResponseData] = [] - score = None num_prompt_tokens = 0 for idx, final_res in enumerate(final_res_batch): - if final_res is not None: - score = final_res.outputs.embedding - score_data = ScoreResponseData(index=idx, score=score) - data.append(score_data) + classify_res = ScoringRequestOutput.from_base(final_res) + + score_data = ScoreResponseData(index=idx, + score=classify_res.outputs.score) + data.append(score_data) usage = UsageInfo( prompt_tokens=num_prompt_tokens, @@ -102,7 +102,7 @@ async def create_score( return error_check_ret model_name = request.model - request_id = f"score-{random_uuid()}" + request_id = f"score-{self._base_request_id(raw_request)}" created_time = int(time.monotonic()) truncate_prompt_tokens = request.truncate_prompt_tokens @@ -119,7 +119,7 @@ async def create_score( if prompt_adapter_request is not None: raise NotImplementedError("Prompt adapter is not supported " - "for embedding models") + "for scoring models") if isinstance(tokenizer, MistralTokenizer): raise ValueError( @@ -133,7 +133,7 @@ async def create_score( return self.create_error_response(str(e)) # Schedule the request and get the result generator. - generators: List[AsyncGenerator[EmbeddingRequestOutput, None]] = [] + generators: List[AsyncGenerator[PoolingRequestOutput, None]] = [] input_pairs = make_pairs(request.text_1, request.text_2) @@ -145,9 +145,11 @@ async def create_score( tokenization_kwargs["truncation"] = True tokenization_kwargs["max_length"] = truncate_prompt_tokens - prompt_inputs = tokenizer(text=q, - text_pair=t, - **tokenization_kwargs) + tokenize_async = make_async(tokenizer.__call__, + executor=self._tokenizer_executor) + prompt_inputs = await tokenize_async(text=q, + text_pair=t, + **tokenization_kwargs) engine_prompt = TokensPrompt( prompt_token_ids=prompt_inputs["input_ids"], token_type_ids=prompt_inputs.get("token_type_ids")) @@ -184,15 +186,12 @@ async def create_score( # TODO: Use a vllm-specific Validation Error return self.create_error_response(str(e)) - result_generator = merge_async_iterators( - *generators, - is_cancelled=raw_request.is_disconnected if raw_request else None, - ) + result_generator = merge_async_iterators(*generators) num_prompts = len(engine_prompts) # Non-streaming response - final_res_batch: List[Optional[EmbeddingRequestOutput]] + final_res_batch: List[Optional[PoolingRequestOutput]] final_res_batch = [None] * num_prompts try: @@ -201,7 +200,7 @@ async def create_score( assert all(final_res is not None for final_res in final_res_batch) - final_res_batch_checked = cast(List[EmbeddingRequestOutput], + final_res_batch_checked = cast(List[PoolingRequestOutput], final_res_batch) response = request_output_to_score_response( diff --git a/vllm/entrypoints/openai/serving_tokenization.py b/vllm/entrypoints/openai/serving_tokenization.py index 59b3b1311f881..2e849333680d4 100644 --- a/vllm/entrypoints/openai/serving_tokenization.py +++ b/vllm/entrypoints/openai/serving_tokenization.py @@ -1,5 +1,7 @@ from typing import Final, List, Optional, Union +from fastapi import Request + from vllm.config import ModelConfig from vllm.engine.protocol import EngineClient from vllm.entrypoints.chat_utils import ChatTemplateContentFormatOption @@ -17,7 +19,6 @@ LoRAModulePath, OpenAIServing) from vllm.logger import init_logger -from vllm.utils import random_uuid logger = init_logger(__name__) @@ -48,12 +49,13 @@ def __init__( async def create_tokenize( self, request: TokenizeRequest, + raw_request: Request, ) -> Union[TokenizeResponse, ErrorResponse]: error_check_ret = await self._check_model(request) if error_check_ret is not None: return error_check_ret - request_id = f"tokn-{random_uuid()}" + request_id = f"tokn-{self._base_request_id(raw_request)}" try: ( @@ -81,12 +83,13 @@ async def create_tokenize( add_special_tokens=request.add_special_tokens, ) else: - request_prompts, engine_prompts = self._preprocess_completion( - request, - tokenizer, - request.prompt, - add_special_tokens=request.add_special_tokens, - ) + (request_prompts, + engine_prompts) = await self._preprocess_completion( + request, + tokenizer, + request.prompt, + add_special_tokens=request.add_special_tokens, + ) except ValueError as e: logger.exception("Error in preprocessing prompt inputs") return self.create_error_response(str(e)) @@ -111,12 +114,13 @@ async def create_tokenize( async def create_detokenize( self, request: DetokenizeRequest, + raw_request: Request, ) -> Union[DetokenizeResponse, ErrorResponse]: error_check_ret = await self._check_model(request) if error_check_ret is not None: return error_check_ret - request_id = f"tokn-{random_uuid()}" + request_id = f"tokn-{self._base_request_id(raw_request)}" ( lora_request, @@ -134,7 +138,7 @@ async def create_detokenize( # Silently ignore prompt adapter since it does not affect tokenization # (Unlike in Embeddings API where an error is raised) - prompt_input = self._tokenize_prompt_input( + prompt_input = await self._tokenize_prompt_input_async( request, tokenizer, request.tokens, diff --git a/vllm/entrypoints/openai/tool_parsers/granite_tool_parser.py b/vllm/entrypoints/openai/tool_parsers/granite_tool_parser.py index b5854ca39ab47..dae481a2154a1 100644 --- a/vllm/entrypoints/openai/tool_parsers/granite_tool_parser.py +++ b/vllm/entrypoints/openai/tool_parsers/granite_tool_parser.py @@ -35,11 +35,13 @@ class GraniteToolParser(ToolParser): def __init__(self, tokenizer: AnyTokenizer): super().__init__(tokenizer) + self.bot_token = "<|tool_call|>" def extract_tool_calls( self, model_output: str, request: ChatCompletionRequest) -> ExtractedToolCallInformation: - stripped = model_output.strip() + # remove whitespace and the BOT token if it exists + stripped = model_output.strip().removeprefix(self.bot_token).lstrip() if not stripped or stripped[0] != '[': return ExtractedToolCallInformation(tools_called=False, tool_calls=[], @@ -86,7 +88,11 @@ def extract_tool_calls_streaming( ) -> Union[DeltaMessage, None]: start_idx = consume_space(0, current_text) - if not current_text or current_text[start_idx] != '[': + if current_text[start_idx:].startswith(self.bot_token): + start_idx = consume_space(start_idx + len(self.bot_token), + current_text) + if not current_text or start_idx >= len(current_text)\ + or current_text[start_idx] != '[': return DeltaMessage(content=delta_text) # bit mask flags for partial JSON parsing. If the name hasn't been diff --git a/vllm/entrypoints/openai/tool_parsers/hermes_tool_parser.py b/vllm/entrypoints/openai/tool_parsers/hermes_tool_parser.py index 18816cd665b3e..869d15ac359ea 100644 --- a/vllm/entrypoints/openai/tool_parsers/hermes_tool_parser.py +++ b/vllm/entrypoints/openai/tool_parsers/hermes_tool_parser.py @@ -91,7 +91,8 @@ def extract_tool_calls( function=FunctionCall( name=function_call["name"], # function call args are JSON but as a string - arguments=json.dumps(function_call["arguments"]))) + arguments=json.dumps(function_call["arguments"], + ensure_ascii=False))) for function_call in raw_function_calls ] @@ -139,13 +140,26 @@ def extract_tool_calls_streaming( self.tool_call_start_token_id) cur_tool_end_count = current_token_ids.count( self.tool_call_end_token_id) + tool_call_portion = None + text_portion = None # case: if we're generating text, OR rounding out a tool call if (cur_tool_start_count == cur_tool_end_count - and prev_tool_end_count == cur_tool_end_count): + and prev_tool_end_count == cur_tool_end_count + and self.tool_call_end_token not in delta_text): logger.debug("Generating text content! skipping tool parsing.") - if delta_text != self.tool_call_end_token: - return DeltaMessage(content=delta_text) + return DeltaMessage(content=delta_text) + + if self.tool_call_end_token in delta_text: + logger.debug("tool_call_end_token in delta_text") + full_text = current_text + delta_text + tool_call_portion = full_text.split( + self.tool_call_start_token)[-1].split( + self.tool_call_end_token)[0].rstrip() + delta_text = delta_text.split( + self.tool_call_end_token)[0].rstrip() + text_portion = delta_text.split( + self.tool_call_end_token)[-1].lstrip() # case: if tool open & close tag counts don't match, we're doing # imaginary "else" block here @@ -184,15 +198,21 @@ def extract_tool_calls_streaming( # case -- the current tool call is being closed. elif (cur_tool_start_count == cur_tool_end_count - and cur_tool_end_count > prev_tool_end_count): + and cur_tool_end_count >= prev_tool_end_count): + if (self.prev_tool_call_arr is None + or len(self.prev_tool_call_arr) == 0): + logger.debug( + "attempting to close tool call, but no tool call") + return None diff = self.prev_tool_call_arr[self.current_tool_id].get( "arguments") if diff: diff = diff.encode('utf-8').decode( 'unicode_escape') if diff is str else diff - diff = json.dumps( - diff, ensure_ascii=False - )[len(self.streamed_args_for_tool[self.current_tool_id]):] + if ('"}' not in delta_text): + return None + end_loc = delta_text.rindex('"}') + diff = delta_text[:end_loc] + '"}' logger.debug( "Finishing tool and found diff that had not " "been streamed yet: %s", diff) @@ -221,10 +241,15 @@ def extract_tool_calls_streaming( except partial_json_parser.core.exceptions.MalformedJSON: logger.debug('not enough tokens to parse into JSON yet') return None + except json.decoder.JSONDecodeError: + logger.debug("unable to parse JSON") + return None # case - we haven't sent the tool name yet. If it's available, send # it. otherwise, wait until it's available. if not self.current_tool_name_sent: + if (current_tool_call is None): + return None function_name: Union[str, None] = current_tool_call.get("name") if function_name: self.current_tool_name_sent = True @@ -284,13 +309,17 @@ def extract_tool_calls_streaming( # autocompleting the JSON elif cur_arguments and not prev_arguments: - cur_arguments_json = json.dumps(cur_arguments) + cur_arguments_json = json.dumps(cur_arguments, + ensure_ascii=False) logger.debug("finding %s in %s", delta_text, cur_arguments_json) # get the location where previous args differ from current - args_delta_start_loc = cur_arguments_json.index(delta_text) \ - + len(delta_text) + if (delta_text not in cur_arguments_json[:-2]): + return None + args_delta_start_loc = cur_arguments_json[:-2]. \ + rindex(delta_text) + \ + len(delta_text) # use that to find the actual delta arguments_delta = cur_arguments_json[:args_delta_start_loc] diff --git a/vllm/entrypoints/openai/tool_parsers/mistral_tool_parser.py b/vllm/entrypoints/openai/tool_parsers/mistral_tool_parser.py index 5caac84138e3b..bada805dd35b9 100644 --- a/vllm/entrypoints/openai/tool_parsers/mistral_tool_parser.py +++ b/vllm/entrypoints/openai/tool_parsers/mistral_tool_parser.py @@ -19,7 +19,6 @@ extract_intermediate_diff) from vllm.logger import init_logger from vllm.transformers_utils.tokenizer import AnyTokenizer, MistralTokenizer -from vllm.utils import random_uuid logger = init_logger(__name__) @@ -109,7 +108,8 @@ def extract_tool_calls( function=FunctionCall( name=raw_function_call["name"], # function call args are JSON but as a string - arguments=json.dumps(raw_function_call["arguments"]))) + arguments=json.dumps(raw_function_call["arguments"], + ensure_ascii=False))) for raw_function_call in function_call_arr ] @@ -199,7 +199,7 @@ def extract_tool_calls_streaming( diff: Union[str, None] = current_tool_call.get("arguments") if diff: - diff = json.dumps(diff).replace( + diff = json.dumps(diff, ensure_ascii=False).replace( self.streamed_args_for_tool[self.current_tool_id], "") delta = DeltaMessage(tool_calls=[ @@ -232,7 +232,7 @@ def extract_tool_calls_streaming( delta = DeltaMessage(tool_calls=[ DeltaToolCall(index=self.current_tool_id, type="function", - id=f"chatcmpl-tool-{random_uuid()}", + id=MistralToolCall.generate_random_id(), function=DeltaFunctionCall( name=function_name).model_dump( exclude_none=True)) @@ -250,6 +250,8 @@ def extract_tool_calls_streaming( cur_arguments = current_tool_call.get("arguments") new_text = delta_text.replace("\'", "\"") + if ('"}' in new_text): + new_text = new_text[:new_text.rindex('"}')] if not cur_arguments and not prev_arguments: @@ -260,12 +262,15 @@ def extract_tool_calls_streaming( "mid-arguments") delta = None elif cur_arguments and not prev_arguments: - cur_arguments_json = json.dumps(cur_arguments) + cur_arguments_json = json.dumps(cur_arguments, + ensure_ascii=False)[:-2] logger.debug("finding %s in %s", new_text, cur_arguments_json) + if (new_text not in cur_arguments_json): + return None arguments_delta = cur_arguments_json[:cur_arguments_json. - index(new_text) + + rindex(new_text) + len(new_text)] logger.debug("First tokens in arguments received: %s", arguments_delta) @@ -279,8 +284,10 @@ def extract_tool_calls_streaming( self.current_tool_id] += arguments_delta elif cur_arguments and prev_arguments: - cur_args_json = json.dumps(cur_arguments) - prev_args_json = json.dumps(prev_arguments) + cur_args_json = json.dumps(cur_arguments, + ensure_ascii=False) + prev_args_json = json.dumps(prev_arguments, + ensure_ascii=False) logger.debug("Searching for diff between \n%s\n%s", cur_args_json, prev_args_json) diff --git a/vllm/entrypoints/utils.py b/vllm/entrypoints/utils.py new file mode 100644 index 0000000000000..e8a78d216d0f0 --- /dev/null +++ b/vllm/entrypoints/utils.py @@ -0,0 +1,57 @@ +import asyncio +import functools + +from fastapi import Request + + +async def listen_for_disconnect(request: Request) -> None: + """Returns if a disconnect message is received""" + while True: + message = await request.receive() + if message["type"] == "http.disconnect": + break + + +def with_cancellation(handler_func): + """Decorator that allows a route handler to be cancelled by client + disconnections. + + This does _not_ use request.is_disconnected, which does not work with + middleware. Instead this follows the pattern from + starlette.StreamingResponse, which simultaneously awaits on two tasks- one + to wait for an http disconnect message, and the other to do the work that we + want done. When the first task finishes, the other is cancelled. + + A core assumption of this method is that the body of the request has already + been read. This is a safe assumption to make for fastapi handlers that have + already parsed the body of the request into a pydantic model for us. + This decorator is unsafe to use elsewhere, as it will consume and throw away + all incoming messages for the request while it looks for a disconnect + message. + + In the case where a `StreamingResponse` is returned by the handler, this + wrapper will stop listening for disconnects and instead the response object + will start listening for disconnects. + """ + + # Functools.wraps is required for this wrapper to appear to fastapi as a + # normal route handler, with the correct request type hinting. + @functools.wraps(handler_func) + async def wrapper(*args, **kwargs): + + # The request is either the second positional arg or `raw_request` + request = args[1] if len(args) > 1 else kwargs["raw_request"] + + handler_task = asyncio.create_task(handler_func(*args, **kwargs)) + cancellation_task = asyncio.create_task(listen_for_disconnect(request)) + + done, pending = await asyncio.wait([handler_task, cancellation_task], + return_when=asyncio.FIRST_COMPLETED) + for task in pending: + task.cancel() + + if handler_task in done: + return handler_task.result() + return None + + return wrapper diff --git a/vllm/envs.py b/vllm/envs.py index c896770e5f6bc..18870c1c6b51a 100644 --- a/vllm/envs.py +++ b/vllm/envs.py @@ -8,7 +8,6 @@ VLLM_RPC_BASE_PATH: str = tempfile.gettempdir() VLLM_USE_MODELSCOPE: bool = False VLLM_RINGBUFFER_WARNING_INTERVAL: int = 60 - VLLM_INSTANCE_ID: Optional[str] = None VLLM_NCCL_SO_PATH: Optional[str] = None LD_LIBRARY_PATH: Optional[str] = None VLLM_USE_TRITON_FLASH_ATTN: bool = False @@ -46,6 +45,7 @@ VLLM_USE_RAY_SPMD_WORKER: bool = False VLLM_USE_RAY_COMPILED_DAG: bool = False VLLM_USE_RAY_COMPILED_DAG_NCCL_CHANNEL: bool = True + VLLM_USE_RAY_COMPILED_DAG_OVERLAP_COMM: bool = True VLLM_WORKER_MULTIPROC_METHOD: str = "fork" VLLM_ASSETS_CACHE: str = os.path.join(VLLM_CACHE_ROOT, "assets") VLLM_IMAGE_FETCH_TIMEOUT: int = 5 @@ -69,7 +69,9 @@ VLLM_SKIP_P2P_CHECK: bool = False VLLM_DISABLED_KERNELS: List[str] = [] VLLM_USE_V1: bool = False - VLLM_ENABLE_V1_MULTIPROCESSING: bool = False + VLLM_ENABLE_V1_MULTIPROCESSING: bool = True + VLLM_LOG_BATCHSIZE_INTERVAL: float = -1 + VLLM_DISABLE_COMPILE_CACHE: bool = False def get_default_cache_root(): @@ -113,7 +115,8 @@ def get_default_config_root(): # If set, vllm will use precompiled binaries (*.so) "VLLM_USE_PRECOMPILED": - lambda: bool(os.environ.get("VLLM_USE_PRECOMPILED")), + lambda: bool(os.environ.get("VLLM_USE_PRECOMPILED")) or bool( + os.environ.get("VLLM_PRECOMPILED_WHEEL_LOCATION")), # CMake build type # If not set, defaults to "Debug" or "RelWithDebInfo" @@ -174,11 +177,6 @@ def get_default_config_root(): "VLLM_USE_MODELSCOPE": lambda: os.environ.get("VLLM_USE_MODELSCOPE", "False").lower() == "true", - # Instance id represents an instance of the VLLM. All processes in the same - # instance should have the same instance id. - "VLLM_INSTANCE_ID": - lambda: os.environ.get("VLLM_INSTANCE_ID", None), - # Interval in seconds to log a warning message when the ring buffer is full "VLLM_RINGBUFFER_WARNING_INTERVAL": lambda: int(os.environ.get("VLLM_RINGBUFFER_WARNING_INTERVAL", "60")), @@ -341,6 +339,13 @@ def get_default_config_root(): lambda: bool(int(os.getenv("VLLM_USE_RAY_COMPILED_DAG_NCCL_CHANNEL", "1")) ), + # If the env var is set, it enables GPU communication overlap in + # Ray's compiled DAG. This flag is ignored if + # VLLM_USE_RAY_COMPILED_DAG is not set. + "VLLM_USE_RAY_COMPILED_DAG_OVERLAP_COMM": + lambda: bool(int(os.getenv("VLLM_USE_RAY_COMPILED_DAG_OVERLAP_COMM", "1")) + ), + # Use dedicated multiprocess context for workers. # Both spawn and fork work "VLLM_WORKER_MULTIPROC_METHOD": @@ -456,7 +461,11 @@ def get_default_config_root(): # If set, enable multiprocessing in LLM for the V1 code path. "VLLM_ENABLE_V1_MULTIPROCESSING": - lambda: bool(int(os.getenv("VLLM_ENABLE_V1_MULTIPROCESSING", "0"))), + lambda: bool(int(os.getenv("VLLM_ENABLE_V1_MULTIPROCESSING", "1"))), + "VLLM_LOG_BATCHSIZE_INTERVAL": + lambda: float(os.getenv("VLLM_LOG_BATCHSIZE_INTERVAL", "-1")), + "VLLM_DISABLE_COMPILE_CACHE": + lambda: bool(int(os.getenv("VLLM_DISABLE_COMPILE_CACHE", "0"))), } # end-env-vars-definition diff --git a/vllm/executor/cpu_executor.py b/vllm/executor/cpu_executor.py index 336f9bc8efb20..2816b5c5c1f88 100644 --- a/vllm/executor/cpu_executor.py +++ b/vllm/executor/cpu_executor.py @@ -10,8 +10,7 @@ from vllm.model_executor.layers.sampler import SamplerOutput from vllm.prompt_adapter.request import PromptAdapterRequest from vllm.sequence import ExecuteModelRequest -from vllm.utils import (get_distributed_init_method, get_open_port, - get_vllm_instance_id, make_async) +from vllm.utils import get_distributed_init_method, get_open_port, make_async from vllm.worker.worker_base import WorkerWrapperBase logger = init_logger(__name__) @@ -23,7 +22,7 @@ class CPUExecutor(ExecutorBase): def _init_executor(self) -> None: assert self.device_config.device_type == "cpu" - # Reminder: Please update docs/source/serving/compatibility_matrix.rst + # Reminder: Please update docs/source/usage/compatibility_matrix.rst # If the feature combo become valid assert self.lora_config is None, "cpu backend doesn't support LoRA" @@ -31,9 +30,6 @@ def _init_executor(self) -> None: # Environment variables for CPU executor # - # Ensure that VLLM_INSTANCE_ID is set, to be inherited by workers - os.environ["VLLM_INSTANCE_ID"] = get_vllm_instance_id() - # Disable torch async compiling which won't work with daemonic processes os.environ["TORCHINDUCTOR_COMPILE_THREADS"] = "1" diff --git a/vllm/executor/multiproc_gpu_executor.py b/vllm/executor/multiproc_gpu_executor.py index a6c05a71d2b6f..fc58163cade64 100644 --- a/vllm/executor/multiproc_gpu_executor.py +++ b/vllm/executor/multiproc_gpu_executor.py @@ -3,25 +3,19 @@ from functools import partial from typing import Any, List, Optional -import torch - from vllm.executor.distributed_gpu_executor import ( # yapf: disable DistributedGPUExecutor, DistributedGPUExecutorAsync) from vllm.executor.gpu_executor import create_worker -from vllm.executor.multiproc_worker_utils import (ProcessWorkerWrapper, - ResultHandler, WorkerMonitor) +from vllm.executor.multiproc_worker_utils import ( + ProcessWorkerWrapper, ResultHandler, WorkerMonitor, + set_multiprocessing_worker_envs) from vllm.logger import init_logger from vllm.model_executor.layers.sampler import SamplerOutput from vllm.sequence import ExecuteModelRequest -from vllm.triton_utils.importing import HAS_TRITON from vllm.utils import (_run_task_with_lock, cuda_device_count_stateless, - cuda_is_initialized, get_distributed_init_method, - get_open_port, get_vllm_instance_id, make_async, + get_distributed_init_method, get_open_port, make_async, update_environment_variables) -if HAS_TRITON: - from vllm.triton_utils import maybe_set_triton_cache_manager - logger = init_logger(__name__) @@ -37,33 +31,8 @@ def _init_executor(self) -> None: world_size = self.parallel_config.world_size tensor_parallel_size = self.parallel_config.tensor_parallel_size - # Ensure that VLLM_INSTANCE_ID is set, to be inherited by workers - os.environ["VLLM_INSTANCE_ID"] = get_vllm_instance_id() - - # Disable torch async compiling which won't work with daemonic processes - os.environ["TORCHINDUCTOR_COMPILE_THREADS"] = "1" - - # Configure thread parallelism if OMP_NUM_THREADS isn't set - # - # Helps to avoid CPU contention. The default of spawning a thread per - # core combined with multiprocessing for each GPU can have a negative - # impact on performance. The contention is amplified when running in a - # container where CPU limits can cause throttling. - default_omp_num_threads = 1 - if "OMP_NUM_THREADS" not in os.environ and ( - current_parallelism := - torch.get_num_threads()) > default_omp_num_threads: - logger.warning( - "Reducing Torch parallelism from %d threads to %d to avoid " - "unnecessary CPU contention. Set OMP_NUM_THREADS in the " - "external environment to tune this value as needed.", - current_parallelism, default_omp_num_threads) - os.environ["OMP_NUM_THREADS"] = str(default_omp_num_threads) - torch.set_num_threads(default_omp_num_threads) - - # workaround for https://github.com/vllm-project/vllm/issues/6103 - if HAS_TRITON and world_size > 1: - maybe_set_triton_cache_manager() + # Set multiprocessing envs that are common to V0 and V1 + set_multiprocessing_worker_envs(self.parallel_config) # Multiprocessing-based executor does not support multi-node setting. # Since it only works for single node, we can use the loopback address @@ -125,13 +94,6 @@ def _check_executor_parameters(self): "CUDA_VISIBLE_DEVICES": (",".join(map(str, range(world_size)))) }) - if (cuda_is_initialized() - and os.environ.get("VLLM_WORKER_MULTIPROC_METHOD") != "spawn"): - logger.warning("CUDA was previously initialized. We must use " - "the `spawn` multiprocessing start method. Setting " - "VLLM_WORKER_MULTIPROC_METHOD to 'spawn'.") - os.environ["VLLM_WORKER_MULTIPROC_METHOD"] = "spawn" - cuda_device_count = cuda_device_count_stateless() # Use confusing message for more common TP-only case. assert tensor_parallel_size <= cuda_device_count, ( diff --git a/vllm/executor/multiproc_worker_utils.py b/vllm/executor/multiproc_worker_utils.py index 884267d23dfc8..c4d90f0856f86 100644 --- a/vllm/executor/multiproc_worker_utils.py +++ b/vllm/executor/multiproc_worker_utils.py @@ -11,8 +11,15 @@ from typing import (Any, Callable, Dict, Generic, List, Optional, TextIO, TypeVar, Union) +import torch + import vllm.envs as envs from vllm.logger import init_logger +from vllm.triton_utils.importing import HAS_TRITON +from vllm.utils import cuda_is_initialized + +if HAS_TRITON: + from vllm.triton_utils import maybe_set_triton_cache_manager logger = init_logger(__name__) @@ -267,6 +274,49 @@ def write_with_prefix(s: str): file.write = write_with_prefix # type: ignore[method-assign] +def _check_multiproc_method(): + if (cuda_is_initialized() + and os.environ.get("VLLM_WORKER_MULTIPROC_METHOD") != "spawn"): + logger.warning("CUDA was previously initialized. We must use " + "the `spawn` multiprocessing start method. Setting " + "VLLM_WORKER_MULTIPROC_METHOD to 'spawn'. " + "See https://docs.vllm.ai/en/latest/getting_started/" + "debugging.html#python-multiprocessing " + "for more information.") + os.environ["VLLM_WORKER_MULTIPROC_METHOD"] = "spawn" + + def get_mp_context(): + _check_multiproc_method() mp_method = envs.VLLM_WORKER_MULTIPROC_METHOD return multiprocessing.get_context(mp_method) + + +def set_multiprocessing_worker_envs(parallel_config): + """ Set up environment variables that should be used when there are workers + in a multiprocessing environment. This should be called by the parent + process before worker processes are created""" + + _check_multiproc_method() + + # Configure thread parallelism if OMP_NUM_THREADS isn't set + # + # Helps to avoid CPU contention. The default of spawning a thread per + # core combined with multiprocessing for each GPU can have a negative + # impact on performance. The contention is amplified when running in a + # container where CPU limits can cause throttling. + default_omp_num_threads = 1 + if "OMP_NUM_THREADS" not in os.environ and ( + current_parallelism := + torch.get_num_threads()) > default_omp_num_threads: + logger.warning( + "Reducing Torch parallelism from %d threads to %d to avoid " + "unnecessary CPU contention. Set OMP_NUM_THREADS in the " + "external environment to tune this value as needed.", + current_parallelism, default_omp_num_threads) + os.environ["OMP_NUM_THREADS"] = str(default_omp_num_threads) + torch.set_num_threads(default_omp_num_threads) + + # workaround for https://github.com/vllm-project/vllm/issues/6103 + if HAS_TRITON and parallel_config.world_size > 1: + maybe_set_triton_cache_manager() diff --git a/vllm/executor/neuron_executor.py b/vllm/executor/neuron_executor.py index 31e6fdc3ab1bb..a9efc4f9a801c 100644 --- a/vllm/executor/neuron_executor.py +++ b/vllm/executor/neuron_executor.py @@ -29,11 +29,13 @@ def _init_worker(self): wrapper = WorkerWrapperBase(vllm_config=self.vllm_config) distributed_init_method = get_distributed_init_method( get_ip(), get_open_port()) - self.driver_worker = wrapper.init_worker( + wrapper.init_worker( vllm_config=self.vllm_config, local_rank=0, rank=0, - distributed_init_method=distributed_init_method) + distributed_init_method=distributed_init_method, + ) + self.driver_worker = wrapper.worker self.driver_worker.init_device() self.driver_worker.load_model() diff --git a/vllm/executor/openvino_executor.py b/vllm/executor/openvino_executor.py index db0070ce510ee..057a32364e512 100644 --- a/vllm/executor/openvino_executor.py +++ b/vllm/executor/openvino_executor.py @@ -36,7 +36,7 @@ def _init_worker(self): distributed_init_method = get_distributed_init_method( get_ip(), get_open_port()) - self.driver_worker = wrapper.init_worker( + wrapper.init_worker( ov_core=ov.Core(), vllm_config=self.vllm_config, local_rank=0, @@ -45,6 +45,7 @@ def _init_worker(self): kv_cache_dtype=self.cache_config.cache_dtype, is_driver_worker=True, ) + self.driver_worker = wrapper.worker self.driver_worker.init_device() self.driver_worker.load_model() diff --git a/vllm/executor/ray_gpu_executor.py b/vllm/executor/ray_gpu_executor.py index 6542b18ae70b1..4bf5cbbd18ffe 100644 --- a/vllm/executor/ray_gpu_executor.py +++ b/vllm/executor/ray_gpu_executor.py @@ -15,8 +15,7 @@ from vllm.model_executor.layers.sampler import SamplerOutput from vllm.sequence import ExecuteModelRequest from vllm.utils import (_run_task_with_lock, get_distributed_init_method, - get_ip, get_open_port, get_vllm_instance_id, - make_async) + get_ip, get_open_port, make_async) if ray is not None: from ray.util.scheduling_strategies import PlacementGroupSchedulingStrategy @@ -189,8 +188,14 @@ def sort_by_driver_then_worker_ip(worker): self.workers = sorted(self.workers, key=sort_by_driver_then_worker_ip) # Get the set of GPU IDs used on each node. - worker_node_and_gpu_ids = self._run_workers("get_node_and_gpu_ids", - use_dummy_driver=True) + worker_node_and_gpu_ids = [] + for worker in [self.driver_dummy_worker] + self.workers: + if worker is None: + # driver_dummy_worker can be None when using ray spmd worker. + continue + worker_node_and_gpu_ids.append( + ray.get(worker.get_node_and_gpu_ids.remote()) \ + ) # type: ignore node_workers = defaultdict(list) # node id -> list of worker ranks node_gpus = defaultdict(list) # node id -> list of gpu ids @@ -220,14 +225,10 @@ def sort_by_driver_then_worker_ip(worker): " environment variable, make sure it is unique for" " each node.") - VLLM_INSTANCE_ID = get_vllm_instance_id() - # Set environment variables for the driver and workers. all_args_to_update_environment_variables = [({ "CUDA_VISIBLE_DEVICES": ",".join(map(str, node_gpus[node_id])), - "VLLM_INSTANCE_ID": - VLLM_INSTANCE_ID, "VLLM_TRACE_FUNCTION": str(envs.VLLM_TRACE_FUNCTION), **({ @@ -334,7 +335,6 @@ def _run_workers( async_run_tensor_parallel_workers_only: bool = False, all_args: Optional[List[Tuple[Any, ...]]] = None, all_kwargs: Optional[List[Dict[str, Any]]] = None, - use_dummy_driver: bool = False, max_concurrent_workers: Optional[int] = None, **kwargs, ) -> Any: @@ -394,18 +394,10 @@ def _run_workers( driver_kwargs = kwargs if all_kwargs is None else all_kwargs[0] # Start the driver worker after all the ray workers. - if not use_dummy_driver: - driver_worker_output = [ - self.driver_worker.execute_method(method, *driver_args, - **driver_kwargs) - ] - else: - assert self.driver_dummy_worker is not None - driver_worker_output = [ - ray.get( - self.driver_dummy_worker.execute_method.remote( - method, *driver_args, **driver_kwargs)) - ] + driver_worker_output = [ + self.driver_worker.execute_method(method, *driver_args, + **driver_kwargs) + ] # Get the results of the ray workers. if self.workers: @@ -422,12 +414,10 @@ def _check_ray_adag_installation(self): import pkg_resources from packaging import version - required_version = version.parse("2.35") + required_version = version.parse("2.40") current_version = version.parse( pkg_resources.get_distribution("ray").version) - # TODO: update the constraint once we adapt to the backward - # incompatible API change from ray 2.36 - if current_version != required_version: + if current_version < required_version: raise ValueError(f"Ray version {required_version} is " f"required, but found {current_version}") @@ -453,6 +443,8 @@ def _compiled_ray_dag(self, enable_asyncio: bool): logger.info("VLLM_USE_RAY_COMPILED_DAG_NCCL_CHANNEL = %s", envs.VLLM_USE_RAY_COMPILED_DAG_NCCL_CHANNEL) + logger.info("VLLM_USE_RAY_COMPILED_DAG_OVERLAP_COMM = %s", + envs.VLLM_USE_RAY_COMPILED_DAG_OVERLAP_COMM) with InputNode() as input_data: # Example DAG: PP=2, TP=4 # (ExecuteModelReq, None) -> 0 -> (ExecuteModelReq, IntermediateOutput) -> 4 -> SamplerOutput # noqa: E501 @@ -488,7 +480,10 @@ def _compiled_ray_dag(self, enable_asyncio: bool): forward_dag = MultiOutputNode(outputs) - return forward_dag.experimental_compile(enable_asyncio=enable_asyncio) + return forward_dag.experimental_compile( + enable_asyncio=enable_asyncio, + _overlap_gpu_communication=envs. + VLLM_USE_RAY_COMPILED_DAG_OVERLAP_COMM) def __del__(self): self.shutdown() @@ -515,8 +510,8 @@ async def execute_model_async( serialized_data = self.input_encoder.encode(execute_model_req) dag_future = await self.forward_dag.execute_async(serialized_data) - outputs = await dag_future - return self.output_decoder.decode(outputs[0]) + output = await dag_future[0] + return self.output_decoder.decode(output) async def _driver_execute_model_async( self, diff --git a/vllm/executor/ray_hpu_executor.py b/vllm/executor/ray_hpu_executor.py index a74328e5aa272..f3025cb537ab8 100644 --- a/vllm/executor/ray_hpu_executor.py +++ b/vllm/executor/ray_hpu_executor.py @@ -15,8 +15,7 @@ from vllm.model_executor.layers.sampler import SamplerOutput from vllm.sequence import ExecuteModelRequest from vllm.utils import (_run_task_with_lock, get_distributed_init_method, - get_ip, get_open_port, get_vllm_instance_id, - make_async) + get_ip, get_open_port, make_async) if ray is not None: from ray.util.scheduling_strategies import PlacementGroupSchedulingStrategy @@ -164,9 +163,14 @@ def sort_by_driver_then_worker_ip(worker): # node will be placed first. self.workers = sorted(self.workers, key=sort_by_driver_then_worker_ip) - # Get the set of GPU IDs used on each node. - worker_node_and_gpu_ids = self._run_workers("get_node_and_gpu_ids", - use_dummy_driver=True) + worker_node_and_gpu_ids = [] + for worker in [self.driver_dummy_worker] + self.workers: + if worker is None: + # driver_dummy_worker can be None when using ray spmd worker. + continue + worker_node_and_gpu_ids.append( + ray.get(worker.get_node_and_gpu_ids.remote()) \ + ) # type: ignore node_workers = defaultdict(list) # node id -> list of worker ranks node_gpus = defaultdict(list) # node id -> list of gpu ids @@ -196,12 +200,8 @@ def sort_by_driver_then_worker_ip(worker): "environment variable, make sure it is unique for" " each node.") - VLLM_INSTANCE_ID = get_vllm_instance_id() - # Set environment variables for the driver and workers. all_args_to_update_environment_variables = [({ - "VLLM_INSTANCE_ID": - VLLM_INSTANCE_ID, "VLLM_TRACE_FUNCTION": str(envs.VLLM_TRACE_FUNCTION), }, ) for (node_id, _) in worker_node_and_gpu_ids] @@ -301,7 +301,6 @@ def _run_workers( async_run_tensor_parallel_workers_only: bool = False, all_args: Optional[List[Tuple[Any, ...]]] = None, all_kwargs: Optional[List[Dict[str, Any]]] = None, - use_dummy_driver: bool = False, max_concurrent_workers: Optional[int] = None, **kwargs, ) -> Any: @@ -361,18 +360,10 @@ def _run_workers( driver_kwargs = kwargs if all_kwargs is None else all_kwargs[0] # Start the driver worker after all the ray workers. - if not use_dummy_driver: - driver_worker_output = [ - self.driver_worker.execute_method(method, *driver_args, - **driver_kwargs) - ] - else: - assert self.driver_dummy_worker is not None - driver_worker_output = [ - ray.get( - self.driver_dummy_worker.execute_method.remote( - method, *driver_args, **driver_kwargs)) - ] + driver_worker_output = [ + self.driver_worker.execute_method(method, *driver_args, + **driver_kwargs) + ] # Get the results of the ray workers. if self.workers: diff --git a/vllm/executor/ray_tpu_executor.py b/vllm/executor/ray_tpu_executor.py index c227b5e283c68..5118c13934f0d 100644 --- a/vllm/executor/ray_tpu_executor.py +++ b/vllm/executor/ray_tpu_executor.py @@ -13,7 +13,7 @@ from vllm.model_executor.layers.sampler import SamplerOutput from vllm.sequence import ExecuteModelRequest from vllm.utils import (get_distributed_init_method, get_ip, get_open_port, - get_vllm_instance_id, make_async) + make_async) if ray is not None: from ray.util.scheduling_strategies import PlacementGroupSchedulingStrategy @@ -137,19 +137,21 @@ def sort_by_driver_then_worker_ip(worker): self.workers = sorted(self.workers, key=sort_by_driver_then_worker_ip) # Get the set of TPU IDs used on each node. - worker_node_and_gpu_ids = self._run_workers("get_node_and_gpu_ids", - use_dummy_driver=True) + worker_node_and_gpu_ids = [] + for worker in [self.driver_dummy_worker] + self.workers: + if worker is None: + # driver_dummy_worker can be None when using ray spmd worker. + continue + worker_node_and_gpu_ids.append( + ray.get(worker.get_node_and_gpu_ids.remote()) \ + ) # type: ignore node_workers = defaultdict(list) for i, (node_id, _) in enumerate(worker_node_and_gpu_ids): node_workers[node_id].append(i) - VLLM_INSTANCE_ID = get_vllm_instance_id() - # Set environment variables for the driver and workers. all_args_to_update_environment_variables = [({ - "VLLM_INSTANCE_ID": - VLLM_INSTANCE_ID, "VLLM_TRACE_FUNCTION": str(envs.VLLM_TRACE_FUNCTION), }, ) for _ in worker_node_and_gpu_ids] @@ -203,7 +205,6 @@ def _run_workers( async_run_remote_workers_only: bool = False, all_args: Optional[List[Tuple[Any, ...]]] = None, all_kwargs: Optional[List[Dict[str, Any]]] = None, - use_dummy_driver: bool = False, max_concurrent_workers: Optional[int] = None, use_ray_compiled_dag: bool = False, **kwargs, @@ -245,14 +246,8 @@ def _run_workers( driver_kwargs = kwargs if all_kwargs is None else all_kwargs[0] # Start the driver worker after all the ray workers. - if not use_dummy_driver: - driver_worker_output = self.driver_worker.execute_method( - method, *driver_args, **driver_kwargs) - else: - assert self.driver_dummy_worker is not None - driver_worker_output = ray.get( - self.driver_dummy_worker.execute_method.remote( - method, *driver_args, **driver_kwargs)) + driver_worker_output = self.driver_worker.execute_method( + method, *driver_args, **driver_kwargs) # Get the results of the ray workers. if self.workers: ray_worker_outputs = ray.get(ray_worker_outputs) diff --git a/vllm/executor/ray_utils.py b/vllm/executor/ray_utils.py index 4f28efd639084..426aa1b5c728f 100644 --- a/vllm/executor/ray_utils.py +++ b/vllm/executor/ray_utils.py @@ -277,10 +277,14 @@ def initialize_ray_cluster( f"Total number of devices: {device_bundles}.") else: num_devices_in_cluster = ray.cluster_resources().get(device_str, 0) + # Log a warning message and delay resource allocation failure response. + # Avoid immediate rejection to allow user-initiated placement group + # created and wait cluster to be ready if parallel_config.world_size > num_devices_in_cluster: - raise ValueError( - f"The number of required {device_str}s exceeds the total " - f"number of available {device_str}s in the placement group.") + logger.warning( + "The number of required %ss exceeds the total " + "number of available %ss in the placement group.", device_str, + device_str) # Create a new placement group placement_group_specs: List[Dict[str, float]] = ([{ device_str: 1.0 diff --git a/vllm/executor/ray_xpu_executor.py b/vllm/executor/ray_xpu_executor.py index 2b1cdc09b0a9f..d2086f5fef26c 100644 --- a/vllm/executor/ray_xpu_executor.py +++ b/vllm/executor/ray_xpu_executor.py @@ -1,11 +1,13 @@ import asyncio from typing import List, Optional +import ray + import vllm.envs as envs from vllm.executor.ray_gpu_executor import RayGPUExecutor, RayGPUExecutorAsync from vllm.executor.xpu_executor import XPUExecutor from vllm.logger import init_logger -from vllm.utils import get_vllm_instance_id, make_async +from vllm.utils import make_async logger = init_logger(__name__) @@ -14,15 +16,16 @@ class RayXPUExecutor(RayGPUExecutor, XPUExecutor): def _get_env_vars_to_be_updated(self): # Get the set of GPU IDs used on each node. - worker_node_and_gpu_ids = self._run_workers("get_node_and_gpu_ids", - use_dummy_driver=True) - - VLLM_INSTANCE_ID = get_vllm_instance_id() + worker_node_and_gpu_ids = [] + for worker in [self.driver_dummy_worker] + self.workers: + if worker is None: + # driver_dummy_worker can be None when using ray spmd worker. + continue + worker_node_and_gpu_ids.append( + ray.get(worker.get_node_and_gpu_ids.remote())) # type: ignore # Set environment variables for the driver and workers. all_args_to_update_environment_variables = [({ - "VLLM_INSTANCE_ID": - VLLM_INSTANCE_ID, "VLLM_TRACE_FUNCTION": str(envs.VLLM_TRACE_FUNCTION), }, ) for (_, _) in worker_node_and_gpu_ids] diff --git a/vllm/forward_context.py b/vllm/forward_context.py index aaa3e4bb3a1e8..7f56575279e9b 100644 --- a/vllm/forward_context.py +++ b/vllm/forward_context.py @@ -1,8 +1,22 @@ +import time +from collections import defaultdict from contextlib import contextmanager from dataclasses import dataclass from typing import Any, Dict, Optional +import torch + +import vllm.envs as envs from vllm.config import VllmConfig +from vllm.logger import init_logger + +logger = init_logger(__name__) + +track_batchsize: bool = envs.VLLM_LOG_BATCHSIZE_INTERVAL >= 0 +last_logging_time: float = 0 +forward_start_time: float = 0 +batchsize_logging_interval: float = envs.VLLM_LOG_BATCHSIZE_INTERVAL +batchsize_forward_time: defaultdict = defaultdict(list) @dataclass @@ -26,7 +40,13 @@ def get_forward_context() -> ForwardContext: @contextmanager def set_forward_context(context: Any, vllm_config: VllmConfig): """A context manager that stores the current forward context, - can be attention metadata, etc.""" + can be attention metadata, etc. + Here we can inject common logic for every model forward pass. + """ + global forward_start_time + need_to_track_batchsize = track_batchsize and context is not None + if need_to_track_batchsize: + forward_start_time = time.perf_counter() global _forward_context prev_context = _forward_context _forward_context = ForwardContext( @@ -36,4 +56,37 @@ def set_forward_context(context: Any, vllm_config: VllmConfig): try: yield finally: + global batchsize_counter + global last_logging_time, batchsize_logging_interval + if need_to_track_batchsize: + if hasattr(context, "num_prefill_tokens"): + # for v0 attention backends + batchsize = context.num_prefill_tokens + \ + context.num_decode_tokens + else: + # for v1 attention backends + batchsize = context.num_input_tokens + # we use synchronous scheduling right now, + # adding a sync point here should not affect + # scheduling of the next batch + torch.cuda.synchronize() + now = time.perf_counter() + # time measurement is in milliseconds + batchsize_forward_time[batchsize].append( + (now - forward_start_time) * 1000) + if now - last_logging_time > batchsize_logging_interval: + last_logging_time = now + forward_stats = [] + for bs, times in batchsize_forward_time.items(): + if len(times) <= 1: + # can be cudagraph / profiling run + continue + medium = torch.quantile(torch.tensor(times), q=0.5).item() + medium = round(medium, 2) + forward_stats.append((bs, len(times), medium)) + forward_stats.sort(key=lambda x: x[1], reverse=True) + if forward_stats: + logger.info(("Batchsize forward time stats " + "(batchsize, count, median_time(ms)): %s"), + forward_stats) _forward_context = prev_context diff --git a/vllm/inputs/__init__.py b/vllm/inputs/__init__.py index 54fbd7a321a6f..d4402e77a3886 100644 --- a/vllm/inputs/__init__.py +++ b/vllm/inputs/__init__.py @@ -38,34 +38,3 @@ "InputProcessingContext", "InputRegistry", ] - - -def __getattr__(name: str): - import warnings - - if name == "PromptInput": - msg = ("PromptInput has been renamed to PromptType. " - "The original name will be removed in an upcoming version.") - - warnings.warn(DeprecationWarning(msg), stacklevel=2) - - return PromptType - - if name == "LLMInputs": - msg = ("LLMInputs has been renamed to DecoderOnlyInputs. " - "The original name will be removed in an upcoming version.") - - warnings.warn(DeprecationWarning(msg), stacklevel=2) - - return DecoderOnlyInputs - - if name == "EncoderDecoderLLMInputs": - msg = ( - "EncoderDecoderLLMInputs has been renamed to EncoderDecoderInputs. " - "The original name will be removed in an upcoming version.") - - warnings.warn(DeprecationWarning(msg), stacklevel=2) - - return EncoderDecoderInputs - - raise AttributeError(f"module {__name__!r} has no attribute {name!r}") diff --git a/vllm/inputs/data.py b/vllm/inputs/data.py index fb7dbbebd7b90..d54cbb5c37819 100644 --- a/vllm/inputs/data.py +++ b/vllm/inputs/data.py @@ -7,7 +7,8 @@ from typing_extensions import NotRequired, TypedDict, TypeVar, assert_never if TYPE_CHECKING: - from vllm.multimodal import MultiModalDataDict, MultiModalPlaceholderDict + from vllm.multimodal import (MultiModalDataDict, MultiModalKwargs, + MultiModalPlaceholderDict) from vllm.multimodal.inputs import MultiModalInputsV2 @@ -150,11 +151,22 @@ class TokenInputs(TypedDict): if the model supports it. """ + multi_modal_inputs: NotRequired["MultiModalKwargs"] + """ + Optional multi-modal inputs to pass to the model, + if the model supports it. + """ + multi_modal_placeholders: NotRequired["MultiModalPlaceholderDict"] """ Placeholder ranges for the multi-modal data. """ + multi_modal_hashes: NotRequired[List[str]] + """ + The hashes of the multi-modal data. + """ + mm_processor_kwargs: NotRequired[Dict[str, Any]] """ Optional multi-modal processor kwargs to be forwarded to the @@ -169,6 +181,8 @@ def token_inputs( token_type_ids: Optional[List[int]] = None, prompt: Optional[str] = None, multi_modal_data: Optional["MultiModalDataDict"] = None, + multi_modal_inputs: Optional["MultiModalKwargs"] = None, + multi_modal_hashes: Optional[List[str]] = None, multi_modal_placeholders: Optional["MultiModalPlaceholderDict"] = None, mm_processor_kwargs: Optional[Dict[str, Any]] = None, ) -> TokenInputs: @@ -181,6 +195,10 @@ def token_inputs( inputs["token_type_ids"] = token_type_ids if multi_modal_data is not None: inputs["multi_modal_data"] = multi_modal_data + if multi_modal_inputs is not None: + inputs["multi_modal_inputs"] = multi_modal_inputs + if multi_modal_hashes is not None: + inputs["multi_modal_hashes"] = multi_modal_hashes if multi_modal_placeholders is not None: inputs["multi_modal_placeholders"] = multi_modal_placeholders if mm_processor_kwargs is not None: @@ -273,6 +291,30 @@ def multi_modal_data(self) -> "MultiModalDataDict": assert_never(inputs) + @cached_property + def multi_modal_inputs(self) -> Union[Dict, "MultiModalKwargs"]: + inputs = self.inputs + + if inputs["type"] == "token": + return inputs.get("multi_modal_inputs", {}) + + if inputs["type"] == "multimodal": + return inputs.get("mm_kwargs", {}) + + assert_never(inputs) + + @cached_property + def multi_modal_hashes(self) -> List[str]: + inputs = self.inputs + + if inputs["type"] == "token": + return inputs.get("multi_modal_hashes", []) + + if inputs["type"] == "multimodal": + return inputs.get("mm_hashes", []) + + assert_never(inputs) + @cached_property def multi_modal_placeholders(self) -> "MultiModalPlaceholderDict": inputs = self.inputs @@ -358,34 +400,3 @@ def to_enc_dec_tuple_list( return [(enc_dec_prompt["encoder_prompt"], enc_dec_prompt["decoder_prompt"]) for enc_dec_prompt in enc_dec_prompts] - - -def __getattr__(name: str): - import warnings - - if name == "PromptInput": - msg = ("PromptInput has been renamed to PromptType. " - "The original name will be removed in an upcoming version.") - - warnings.warn(DeprecationWarning(msg), stacklevel=2) - - return PromptType - - if name == "LLMInputs": - msg = ("LLMInputs has been renamed to DecoderOnlyInputs. " - "The original name will be removed in an upcoming version.") - - warnings.warn(DeprecationWarning(msg), stacklevel=2) - - return DecoderOnlyInputs - - if name == "EncoderDecoderLLMInputs": - msg = ( - "EncoderDecoderLLMInputs has been renamed to EncoderDecoderInputs. " - "The original name will be removed in an upcoming version.") - - warnings.warn(DeprecationWarning(msg), stacklevel=2) - - return EncoderDecoderInputs - - raise AttributeError(f"module {__name__!r} has no attribute {name!r}") diff --git a/vllm/inputs/registry.py b/vllm/inputs/registry.py index 68b4756331e6d..d34784d8713cf 100644 --- a/vllm/inputs/registry.py +++ b/vllm/inputs/registry.py @@ -2,17 +2,18 @@ from collections import UserDict from dataclasses import dataclass from typing import (TYPE_CHECKING, Any, Callable, Dict, Mapping, NamedTuple, - Optional, Protocol, Type, cast) + Optional, Protocol, Type) from torch import nn -from transformers import PretrainedConfig, ProcessorMixin +from transformers.configuration_utils import PretrainedConfig +from transformers.processing_utils import ProcessorMixin from typing_extensions import TypeVar, assert_never from vllm.logger import init_logger from vllm.transformers_utils.processor import cached_get_processor from vllm.transformers_utils.tokenizer import AnyTokenizer -from vllm.utils import (get_allowed_kwarg_only_overrides, print_warning_once, - resolve_mm_processor_kwargs) +from vllm.utils import (ClassRegistry, get_allowed_kwarg_only_overrides, + print_warning_once, resolve_mm_processor_kwargs) from .data import ProcessorInputs, SingletonInputs from .parse import is_encoder_decoder_inputs @@ -47,7 +48,6 @@ def get_hf_config(self, hf_config_type: Type[C] = PretrainedConfig) -> C: Raises: TypeError: If the model is not of the specified type. """ - hf_config = self.model_config.hf_config if not isinstance(hf_config, hf_config_type): raise TypeError("Invalid type of HuggingFace config. " @@ -60,20 +60,69 @@ def get_hf_image_processor_config(self) -> Dict[str, Any]: """ Get the HuggingFace image processor configuration of the model. """ - return self.model_config.hf_image_processor_config + def get_mm_config(self): + """ + Get the multimodal config of the model. + + Raises: + RuntimeError: If the model is not a multimodal model. + """ + mm_config = self.model_config.multimodal_config + if mm_config is None: + raise RuntimeError("Not a multimodal model") + + return mm_config + + def get_hf_processor(self, **kwargs: object) -> ProcessorMixin: + base_kwargs = self.model_config.mm_processor_kwargs + if base_kwargs is None: + base_kwargs = {} + + merged_kwargs = {**base_kwargs, **kwargs} + + return cached_get_processor( + self.model_config.model, + trust_remote_code=self.model_config.trust_remote_code, + **merged_kwargs, + ) + @dataclass(frozen=True) class InputProcessingContext(InputContext): tokenizer: AnyTokenizer """The tokenizer used to tokenize the inputs.""" - def get_hf_processor(self) -> ProcessorMixin: + def get_hf_processor(self, **kwargs: object) -> ProcessorMixin: + base_kwargs = self.model_config.mm_processor_kwargs + if base_kwargs is None: + base_kwargs = {} + + merged_kwargs = {**base_kwargs, **kwargs} + return cached_get_processor( - self.model_config.tokenizer, + self.model_config.model, tokenizer=self.tokenizer, # Override the tokenizer with ours trust_remote_code=self.model_config.trust_remote_code, + **merged_kwargs, + ) + + def resolve_hf_processor_call_kwargs( + self, + hf_processor: ProcessorMixin, + inference_kwargs: Mapping[str, object], + ) -> Mapping[str, object]: + assert callable(hf_processor) + + base_kwargs = self.model_config.mm_processor_kwargs + if base_kwargs is None: + base_kwargs = {} + + return resolve_mm_processor_kwargs( + base_kwargs, + inference_kwargs, + hf_processor, ) @@ -136,12 +185,12 @@ class InputRegistry: """ def __init__(self) -> None: - self._dummy_factories_by_model_type: Dict[Type[nn.Module], - DummyDataFactory] = {} - self._dummy_encoder_factories_by_model_type: Dict[ - Type[nn.Module], DummyDataFactory] = {} - self._input_processors_by_model_type: Dict[Type[nn.Module], - InputProcessor] = {} + self._dummy_factories_by_model_type = \ + ClassRegistry[nn.Module, DummyDataFactory]() + self._dummy_encoder_factories_by_model_type = \ + ClassRegistry[nn.Module, DummyDataFactory]() + self._input_processors_by_model_type = \ + ClassRegistry[nn.Module, InputProcessor]() def _default_dummy_data_factory( self, @@ -171,7 +220,8 @@ def register_dummy_data(self, factory: DummyDataFactory): """ def wrapper(model_cls: N) -> N: - if model_cls in self._dummy_factories_by_model_type: + if self._dummy_factories_by_model_type.contains(model_cls, + strict=True): logger.warning( "Model class %s already has dummy data " "registered to %s. It is overwritten by the new one.", @@ -195,7 +245,8 @@ def register_dummy_encoder_data(self, factory: DummyDataFactory): """ def wrapper(model_cls: N) -> N: - if model_cls in self._dummy_encoder_factories_by_model_type: + if self._dummy_encoder_factories_by_model_type.contains( + model_cls, strict=True): logger.warning( "Model class %s already has dummy encoder data " "registered to %s. It is overwritten by the new one.", @@ -232,19 +283,35 @@ def dummy_data_for_profiling( """ # Avoid circular import from vllm.model_executor.model_loader import get_model_architecture - - model_cls, _ = get_model_architecture(model_config) - if is_encoder_data: - dummy_factory = self._get_dummy_encoder_data_factory(model_cls) + from vllm.multimodal import MultiModalKwargs + from vllm.multimodal.utils import cached_get_tokenizer + + if mm_registry.has_processor(model_config): + tokenizer = cached_get_tokenizer( + model_config.tokenizer, + trust_remote_code=model_config.trust_remote_code, + ) + processor = mm_registry.create_processor(model_config, tokenizer) + + mm_counts = mm_registry.get_mm_limits_per_prompt(model_config) + mm_max_tokens = mm_registry.get_max_tokens_by_modality( + model_config) + + dummy_data = processor.get_dummy_data(seq_len, mm_counts, + mm_max_tokens) else: - dummy_factory = self._get_dummy_data_factory(model_cls) - mm_counts = mm_registry.get_mm_limits_per_prompt(model_config) - mm_processor_kwargs = get_allowed_kwarg_only_overrides( - dummy_factory, overrides=model_config.mm_processor_kwargs) + model_cls, _ = get_model_architecture(model_config) + if is_encoder_data: + dummy_factory = self._get_dummy_encoder_data_factory(model_cls) + else: + dummy_factory = self._get_dummy_data_factory(model_cls) + mm_counts = mm_registry.get_mm_limits_per_prompt(model_config) + mm_processor_kwargs = get_allowed_kwarg_only_overrides( + dummy_factory, overrides=model_config.mm_processor_kwargs) - dummy_data = dummy_factory(InputContext(model_config), seq_len, - _MultiModalCounts(mm_counts), - **mm_processor_kwargs) + dummy_data = dummy_factory(InputContext(model_config), seq_len, + _MultiModalCounts(mm_counts), + **mm_processor_kwargs) # Having more tokens is over-conservative but otherwise fine num_tokens = dummy_data.seq_data.prompt_token_ids @@ -257,7 +324,9 @@ def dummy_data_for_profiling( raise AssertionError( f"Expected at least {seq_len} dummy tokens for profiling, " f"but found {len(num_tokens)} tokens instead.") - if dummy_data.multi_modal_data is not None: + + if (dummy_data.multi_modal_data is not None and + not isinstance(dummy_data.multi_modal_data, MultiModalKwargs)): for k, v in dummy_data.multi_modal_data.items(): num_items = len(v) if isinstance(v, list) else 1 num_expected = mm_counts[k] @@ -287,7 +356,8 @@ def register_input_processor(self, processor: InputProcessor): """ def wrapper(model_cls: N) -> N: - if model_cls in self._input_processors_by_model_type: + if self._input_processors_by_model_type.contains(model_cls, + strict=True): logger.warning( "Model class %s already has input processor " "registered to %s. It is overwritten by the new one.", @@ -339,7 +409,7 @@ def process_input(self, model_config: "ModelConfig", # If it's empty, it'll fall back to the default kwarg values mm_processor_kwargs = resolve_mm_processor_kwargs( model_config.mm_processor_kwargs, - cast(Dict[str, Any], inputs.get("mm_processor_kwargs")), + inputs.get("mm_processor_kwargs", {}), # type: ignore processor, ) diff --git a/vllm/lora/fully_sharded_layers.py b/vllm/lora/fully_sharded_layers.py index f5c2eced9d2bb..545ec21ca74c1 100644 --- a/vllm/lora/fully_sharded_layers.py +++ b/vllm/lora/fully_sharded_layers.py @@ -1,5 +1,5 @@ # pylint: disable=unused-argument -from typing import TYPE_CHECKING, List, Optional, Union +from typing import TYPE_CHECKING, List, Optional, Tuple, Union, cast import torch import torch.nn as nn @@ -32,6 +32,44 @@ def dec(*args, **kwargs): return dec +def _mcp_apply(x, bias, layer: ColumnParallelLinearWithLoRA): + """ + For `ColumnParallelLinearWithLoRA` or classes that inherit from + `ColumnParallelLinearWithLoRA`, they share the same `apply` logic. + """ + assert (layer.n_slices == len(layer.lora_a_stacked) == len( + layer.lora_b_stacked) == len(layer.output_slices)) + if layer.lora_bias_stacked is not None: + assert layer.n_slices == len(layer.lora_bias_stacked) + + output = layer.base_layer.quant_method.apply(layer.base_layer, x, bias) + + x = x.view(-1, x.shape[-1]) + output, out_orig_shape = output.view(-1, output.shape[-1]), output.shape + + # Since communication is needed, the buffer is directly initialized as a + # tensor rather than a tuple of tensor. + buffers = torch.zeros( + (layer.n_slices, x.shape[0], layer.lora_a_stacked[0].shape[2]), + dtype=torch.float32, + device=x.device, + ) + + layer.punica_wrapper.add_shrink(buffers, x, layer.lora_a_stacked, 1.0) + buffers = tensor_model_parallel_all_gather(buffers) + layer.punica_wrapper.add_expand(output, + buffers, + layer.lora_b_stacked, + layer.lora_bias_stacked, + layer.output_slices, + offset_start=0, + add_input=True) + + output = output.view(*out_orig_shape) + # now have column partitioned and packed output + return output + + # these layers are based on the tensor parallelism strategy given in # Y. Sheng et al., S-LoRA: Serving Thousands of Concurrent LoRA Adapters. 2023, # https://arxiv.org/abs/2311.03285. @@ -51,40 +89,15 @@ class ColumnParallelLinearWithShardedLoRA(ColumnParallelLinearWithLoRA): # gather operation. def slice_lora_a(self, lora_a: torch.Tensor) -> torch.Tensor: tp_rank = get_tensor_model_parallel_rank() - shard_size = self.lora_a_stacked.shape[2] + shard_size = self.lora_a_stacked[0].shape[2] start_idx = tp_rank * shard_size lora_a = lora_a[:, start_idx:start_idx + shard_size] return lora_a - def apply(self, x: torch.Tensor, - bias: Optional[torch.Tensor]) -> torch.Tensor: - output = self.base_layer.quant_method.apply(self.base_layer, x, bias) - - x = x.view(-1, x.shape[-1]) - output, out_orig_shape = output.view(-1, - output.shape[-1]), output.shape - buffer = torch.zeros( - (x.shape[0], self.lora_a_stacked.shape[2]), - dtype=torch.float32, - device=x.device, - ) - self.punica_wrapper.add_shrink(buffer, x, self.lora_a_stacked, 1.0) - buffer = tensor_model_parallel_all_gather(buffer) - self.punica_wrapper.add_expand(output, - buffer, - self.lora_b_stacked, - add_input=True) - # now have column partitioned output - - if self.bias_stacked is not None: - self.bias_stacked = self.bias_stacked.view( - -1, self.bias_stacked.shape[-1]) - self.bias_stacked = self.bias_stacked[ - self.punica_wrapper.token_lora_indices] - output += self.bias_stacked - - output = output.view(*out_orig_shape) - return output + def apply(self, + x: torch.Tensor, + bias: Optional[torch.Tensor] = None) -> torch.Tensor: + return _mcp_apply(x, bias, self) @classmethod @_fully_sharded_can_replace @@ -105,59 +118,6 @@ def can_replace_layer( ) -def _mcp_apply(x, bias, layer: QKVParallelLinearWithLora): - """ - MergedColumnParallelLinearWithShardedLoRA and - MergedQKVParallelLinearWithShardedLora share the same - LoRa weight application method. - - The main difference is the step by shard_size for lora_b which can - vary for MergedQKVParallelLinearWithShardedLora but is constant for - MergedColumnParallelLinearWithShardedLoRA. - """ - # expecting 2 for column parallel and 3 for qkv - n = len(layer.lora_a_stacked) - output = layer.base_layer.quant_method.apply(layer.base_layer, x, bias) - - x = x.view(-1, x.shape[-1]) - output, out_orig_shape = output.view(-1, output.shape[-1]), output.shape - buffers = torch.zeros( - (n, x.shape[0], layer.lora_a_stacked[0].shape[2]), - dtype=torch.float32, - device=x.device, - ) - for idx in range(n): - layer.punica_wrapper.add_shrink(buffers[idx], x, - layer.lora_a_stacked[idx], 1.0) - - buffers = tensor_model_parallel_all_gather(buffers) - left_offset = 0 - for idx in range(n): - shard_size = layer.lora_b_stacked[idx].shape[2] - - if layer.bias_stacked is not None: - bias = layer.bias_stacked[idx] - if bias is not None: - bias = bias.view(-1, bias.shape[-1]) - bias = bias[layer.punica_wrapper.token_lora_indices] - bias[layer.punica_wrapper.token_lora_indices == -1] = 0 - output[:, left_offset:left_offset + shard_size] += bias - - layer.punica_wrapper.add_expand_slice( - output, - buffers[idx], - layer.lora_b_stacked[idx], - left_offset, - shard_size, - add_input=True, - ) - left_offset += shard_size - - output = output.view(*out_orig_shape) - # now have column partitioned and packed output - return output - - class MergedColumnParallelLinearWithShardedLoRA( MergedColumnParallelLinearWithLoRA): """ @@ -181,8 +141,9 @@ def slice_lora_a( ] return lora_a - def apply(self, x: torch.Tensor, - bias: Optional[torch.Tensor]) -> torch.Tensor: + def apply(self, + x: torch.Tensor, + bias: Optional[torch.Tensor] = None) -> torch.Tensor: return _mcp_apply(x, bias, self) @classmethod @@ -214,30 +175,15 @@ class QKVParallelLinearWithShardedLora(QKVParallelLinearWithLora): def slice_lora_a(self, lora_a: torch.Tensor) -> torch.Tensor: tp_rank = get_tensor_model_parallel_rank() - shard_size = self.lora_a_stacked.shape[2] + shard_size = self.lora_a_stacked[0].shape[2] start_idx = tp_rank * shard_size lora_a = lora_a[:, start_idx:start_idx + shard_size] return lora_a - def apply(self, x: torch.Tensor, - bias: Optional[torch.Tensor]) -> torch.Tensor: - output = self.base_layer.quant_method.apply(self.base_layer, x, bias) - - x = x.view(-1, x.shape[-1]) - output, out_orig_shape = output.view(-1, - output.shape[-1]), output.shape - buffer = torch.zeros((x.shape[0], self.lora_a_stacked.shape[2]), - dtype=torch.float32, - device=x.device) - self.punica_wrapper.add_shrink(buffer, x, self.lora_a_stacked, 1.0) - buffer = tensor_model_parallel_all_gather(buffer) - self.punica_wrapper.add_expand(output, - buffer, - self.lora_b_stacked, - add_input=True) - # now have column partitioned output - output = output.view(*out_orig_shape) - return output + def apply(self, + x: torch.Tensor, + bias: Optional[torch.Tensor] = None) -> torch.Tensor: + return _mcp_apply(x, bias, self) @classmethod @_fully_sharded_can_replace @@ -278,8 +224,9 @@ def slice_lora_a( ] return lora_a - def apply(self, x: torch.Tensor, - bias: Optional[torch.Tensor]) -> torch.Tensor: + def apply(self, + x: torch.Tensor, + bias: Optional[torch.Tensor] = None) -> torch.Tensor: return _mcp_apply(x, bias, self) @classmethod @@ -312,7 +259,7 @@ class RowParallelLinearWithShardedLoRA(RowParallelLinearWithLoRA): """ def slice_lora_b(self, lora_b: torch.Tensor) -> torch.Tensor: - shard_size = self.lora_b_stacked.shape[2] + shard_size = self.lora_b_stacked[0].shape[2] start_idx = self.tp_rank * shard_size end_idx = (self.tp_rank + 1) * shard_size lora_b = lora_b[:, start_idx:end_idx] @@ -321,20 +268,24 @@ def slice_lora_b(self, lora_b: torch.Tensor) -> torch.Tensor: def slice_bias(self, bias: torch.Tensor) -> torch.Tensor: if bias is None: return bias - shard_size = self.bias_stacked.shape[2] + self.lora_bias_stacked = cast(Tuple[torch.Tensor, ...], + self.lora_bias_stacked) + shard_size = self.lora_bias_stacked[0].shape[2] start_idx = self.tp_rank * shard_size end_idx = (self.tp_rank + 1) * shard_size bias = bias[start_idx:end_idx] return bias - def apply(self, x: torch.Tensor) -> torch.Tensor: + def apply(self, + x: torch.Tensor, + bias: Optional[torch.Tensor] = None) -> torch.Tensor: output = self.base_layer.quant_method.apply(self.base_layer, x) x = x.view(-1, x.shape[-1]) output, out_orig_shape = output.view(-1, output.shape[-1]), output.shape buffer = torch.zeros( - (x.shape[0], self.lora_a_stacked.shape[2]), + (self.n_slices, x.shape[0], self.lora_a_stacked[0].shape[2]), dtype=torch.float32, device=x.device, ) @@ -348,18 +299,18 @@ def apply(self, x: torch.Tensor) -> torch.Tensor: # remains is a standard all_reduce. User should be aware though that # the output is not the same as a normal row_parallel, it should be # reduced before being used - shard_size = self.lora_b_stacked.shape[2] - start_idx = self.tp_rank * shard_size - - if self.bias_stacked is not None: - bias = self.bias_stacked.view(-1, self.bias_stacked.shape[-1]) - bias = bias[self.punica_wrapper.token_lora_indices] - bias[self.punica_wrapper.token_lora_indices == -1] = 0 - output += bias - - self.punica_wrapper.add_expand_slice(output, buffer, - self.lora_b_stacked, start_idx, - shard_size) + # NOTE offset are based on the rank. + shard_size = self.lora_b_stacked[0].shape[2] + offset_start = self.tp_rank * shard_size + self.punica_wrapper.add_expand( + output, + buffer, + self.lora_b_stacked, + self.lora_bias_stacked, + self.output_slices, + offset_start=offset_start, + add_input=True, + ) output = output.view(*out_orig_shape) return output diff --git a/vllm/lora/layers.py b/vllm/lora/layers.py index 3701988ff692f..a6c93a3d8bfe9 100644 --- a/vllm/lora/layers.py +++ b/vllm/lora/layers.py @@ -1,7 +1,7 @@ # pylint: disable=unused-argument import math from dataclasses import dataclass -from typing import TYPE_CHECKING, Dict, List, Optional, Tuple, Union +from typing import TYPE_CHECKING, Dict, List, Optional, Tuple, Union, cast import torch import torch.nn as nn @@ -17,20 +17,23 @@ tensor_model_parallel_all_reduce, tensor_model_parallel_gather) from vllm.distributed.utils import divide -from vllm.lora.punica import PunicaWrapper +# yapf: disable from vllm.model_executor.layers.linear import (ColumnParallelLinear, + LinearBase, MergedColumnParallelLinear, QKVParallelLinear, ReplicatedLinear, RowParallelLinear) +# yapf: enable from vllm.model_executor.layers.logits_processor import LogitsProcessor from vllm.model_executor.layers.rotary_embedding import ( LinearScalingRotaryEmbedding, RotaryEmbedding) from vllm.model_executor.layers.vocab_parallel_embedding import ( VocabParallelEmbedding) +from vllm.platforms import current_platform if TYPE_CHECKING: - pass + from vllm.lora.punica_wrapper import PunicaWrapperBase def _get_lora_device(base_layer: nn.Module) -> torch.device: @@ -67,63 +70,6 @@ def dec(*args, **kwargs): return dec -def apply_bias( - indices: torch.Tensor, - output: torch.Tensor, - bias_stacked: torch.Tensor, -): - """Applies bias to output - - Input shapes: - bias_stacked: (num_loras, output_dim) - indices: (batch_size) - output: (batch_size, output_dim) - """ - org_output = output - output = output.view(-1, output.shape[-1]) - indices = indices.view(-1) - - bias_stacked = bias_stacked.view(-1, bias_stacked.shape[-1]) - bias_stacked = bias_stacked[indices] - bias_stacked[indices == -1] = 0 - output += bias_stacked - - return output.view_as(org_output) - - -def apply_bias_packed_nslice( - indices: torch.Tensor, - output: torch.Tensor, - output_slices: Tuple[int, ...], - bias_stacked: Tuple[torch.Tensor, torch.Tensor, torch.Tensor], -): - """Applies bias to output - - Input shapes: - bias_stacked: 3 element tuple of (num_loras, output_dim) - indices: (batch_size) - output: (batch_size, q_slice_size + 2*kv_slice_size) - output_slices: n-1 element tuple of (slice_size...), - where n is number of slices - """ - org_output = output - output = output.view(-1, output.shape[-1]) - indices = indices.view(-1) - - offset_left = 0 - for slice_idx, slice in enumerate(output_slices): - bias = bias_stacked[slice_idx] - if bias is not None: - bias = bias.view(-1, bias.shape[-1]) - bias = bias[indices] - bias[indices == -1] = 0 - output[:, offset_left:offset_left + slice] += bias - - offset_left += slice - - return output.view_as(org_output) - - @dataclass class LoRAMapping(AdapterMapping): is_prefill: bool = False @@ -169,9 +115,9 @@ def set_lora( def set_mapping( self, - punica_wrapper: PunicaWrapper, + punica_wrapper, ): - self.punica_wrapper: PunicaWrapper = punica_wrapper + self.punica_wrapper: PunicaWrapperBase = punica_wrapper @classmethod def can_replace_layer( @@ -306,12 +252,10 @@ def forward(self, x: torch.Tensor) -> torch.Tensor: full_lora_a_embeddings.shape[1], -1, ) - - # Embedding layer only need expand op - self.punica_wrapper.add_expand(full_output, - full_lora_a_embeddings, - self.lora_b_stacked, - add_input=True) + self.punica_wrapper.add_lora_embedding(full_output, + full_lora_a_embeddings, + self.lora_b_stacked, + add_input=True) return full_output.view_as(full_output_org) @classmethod @@ -325,14 +269,19 @@ def can_replace_layer( return type(source_layer) is VocabParallelEmbedding -class ReplicatedLinearWithLoRA(BaseLayerWithLoRA): +class BaseLinearLayerWithLoRA(BaseLayerWithLoRA): - def __init__(self, base_layer: ReplicatedLinear) -> None: + def __init__(self, base_layer: LinearBase): super().__init__() self.base_layer = base_layer self.input_size = self.base_layer.input_size - self.output_size = self.base_layer.output_size self.device = _get_lora_device(self.base_layer) + self.lora_bias_stacked: Optional[Tuple[torch.Tensor, ...]] = None + + self.output_slices: Tuple[int, ...] + self.tp_size: int + self.output_size: int + self.n_slices: int def create_lora_weights( self, @@ -341,39 +290,64 @@ def create_lora_weights( model_config: Optional[PretrainedConfig] = None, ) -> None: self.lora_config = lora_config - lora_a_output_size = lora_config.max_lora_rank - self.lora_a_stacked = torch.zeros( - max_loras, - 1, - lora_a_output_size, - self.input_size, - dtype=lora_config.lora_dtype, - device=self.device, - ) - self.lora_b_stacked = torch.zeros( - max_loras, - 1, - self.output_size, - lora_config.max_lora_rank, - dtype=lora_config.lora_dtype, - device=self.device, - ) - if lora_config.bias_enabled: - self.bias_stacked = torch.zeros( + # + if isinstance(self.base_layer, ReplicatedLinear): + lora_a_out_size = lora_config.max_lora_rank + lora_b_out_size = self.output_size + + elif isinstance(self.base_layer, ColumnParallelLinear): + lora_a_out_size = (lora_config.max_lora_rank if + not lora_config.fully_sharded_loras else divide( + lora_config.max_lora_rank, self.tp_size)) + lora_b_out_size = self.output_size + + elif isinstance(self.base_layer, RowParallelLinear): + lora_a_out_size = lora_config.max_lora_rank + lora_b_out_size = (self.output_size if + not lora_config.fully_sharded_loras else divide( + self.output_size, self.tp_size)) + else: + raise NotImplementedError + + self.lora_a_stacked = tuple( + torch.zeros( max_loras, 1, - self.output_size, + lora_a_out_size, + self.input_size, dtype=lora_config.lora_dtype, device=self.device, - ) - else: - self.bias_stacked = None + ) for _ in range(self.n_slices)) + self.lora_b_stacked = tuple( + torch.zeros( + max_loras, + 1, + lora_b_out_size, + lora_config.max_lora_rank, + dtype=lora_config.lora_dtype, + device=self.device, + ) for _ in range(self.n_slices)) + if lora_config.bias_enabled: + lora_bias_out_size = lora_b_out_size + self.lora_bias_stacked = tuple( + torch.zeros( + max_loras, + 1, + lora_bias_out_size, + dtype=lora_config.lora_dtype, + device=self.device, + ) for _ in range(self.n_slices)) + self.output_slices = (self.lora_b_stacked[0].shape[2], ) def reset_lora(self, index: int): - self.lora_a_stacked[index] = 0 - self.lora_b_stacked[index] = 0 - if self.lora_config.bias_enabled: - self.bias_stacked[index] = 0 + for s_index in range(self.n_slices): + self.lora_a_stacked[s_index][index] = 0 + self.lora_b_stacked[s_index][index] = 0 + if self.lora_config.bias_enabled: + # Make mypy happy + self.lora_bias_stacked = cast(Tuple[torch.Tensor, ...], + self.lora_bias_stacked) + self.lora_bias_stacked[s_index][index] = 0 def set_lora( self, @@ -381,35 +355,56 @@ def set_lora( lora_a: torch.Tensor, lora_b: torch.Tensor, embeddings_tensor: Optional[torch.Tensor], - bias: Optional[torch.Tensor] = None, + lora_bias: Optional[torch.Tensor] = None, ): - self.reset_lora(index) + # Except for QKVParallelLinearWithLora and + # MergedColumnParallelLinearWithLoRA, all other linear LoRA layers + # store weights in a tuple of size 1. These two layers will + # override this function. + assert (len(self.lora_a_stacked) == len(self.lora_b_stacked) == + self.n_slices == 1) - self.lora_a_stacked[index, - 0, :lora_a.shape[1], :lora_a.shape[0]].copy_( - lora_a.T, non_blocking=True) - self.lora_b_stacked[index, - 0, :lora_b.shape[1], :lora_b.shape[0]].copy_( - lora_b.T, non_blocking=True) - if bias is not None: - self.bias_stacked[index, - 0, :bias.shape[0]].copy_(bias.T, - non_blocking=True) - - def apply(self, x: torch.Tensor, - bias: Optional[torch.Tensor]) -> torch.Tensor: + self.reset_lora(index) + if self.tp_size > 1: + lora_a = self.slice_lora_a(lora_a) + lora_b = self.slice_lora_b(lora_b) + if lora_bias is not None: + lora_bias = self.slice_bias(lora_bias) + + self.lora_a_stacked[0][index, + 0, :lora_a.shape[1], :lora_a.shape[0]].copy_( + lora_a.T, non_blocking=True) + self.lora_b_stacked[0][index, + 0, :lora_b.shape[1], :lora_b.shape[0]].copy_( + lora_b.T, non_blocking=True) + if lora_bias is not None: + + self.lora_bias_stacked = cast(Tuple[torch.Tensor, ...], + self.lora_bias_stacked) + assert len(self.lora_bias_stacked) + self.lora_bias_stacked[0][index, 0, :lora_bias.shape[0]].copy_( + lora_bias.T, non_blocking=True) + + def apply(self, + x: torch.Tensor, + bias: Optional[torch.Tensor] = None) -> torch.Tensor: output = self.base_layer.quant_method.apply(self.base_layer, x, bias) - if self.bias_stacked is not None: - self.indices = self.punica_wrapper.token_lora_indices - output = apply_bias( - self.indices, - output, - self.bias_stacked, - ) - self.punica_wrapper.add_lora(output, x, self.lora_a_stacked, - self.lora_b_stacked, 1.0) + self.punica_wrapper.add_lora_linear(output, x, self.lora_a_stacked, + self.lora_b_stacked, + self.lora_bias_stacked, 1.0, + self.output_slices) return output + +class ReplicatedLinearWithLoRA(BaseLinearLayerWithLoRA): + + def __init__(self, base_layer: ReplicatedLinear) -> None: + super().__init__(base_layer, ) + # To ensure interface compatibility, set to 1 always. + self.tp_size = 1 + self.output_size = self.base_layer.output_size + self.n_slices = 1 + def forward(self, input_): """Forward of ReplicatedLinearWithLoRA @@ -442,73 +437,26 @@ def can_replace_layer( return type(source_layer) is ReplicatedLinear -class ColumnParallelLinearWithLoRA(BaseLayerWithLoRA): +class ColumnParallelLinearWithLoRA(BaseLinearLayerWithLoRA): """ LoRA on top of ColumnParallelLinear layer. - LoRA B is sliced for tensor parallelism. + There are two types for the `base_layer`: + 1. ColumnParallelLinear, e.g.`dense_h_to_4h` in `FalconForCausalLM`. + 2. MergedColumnParallelLinear, e.g.`gate_up_proj` in `Phi3ForCausalLM`. """ def __init__(self, base_layer: ColumnParallelLinear) -> None: - super().__init__() + super().__init__(base_layer) # The base_layer type is ColumnParallelLinear or # MergedColumnParallelLinear, their weight sharding logic is # inconsistent when TP is greater than 1. self.is_merged_col_linear = type( base_layer) is MergedColumnParallelLinear - - self.base_layer = base_layer self.tp_size = get_tensor_model_parallel_world_size() - self.input_size = self.base_layer.input_size self.output_size = self.base_layer.output_size_per_partition - self.device = _get_lora_device(self.base_layer) - - def create_lora_weights( - self, - max_loras: int, - lora_config: LoRAConfig, - model_config: Optional[PretrainedConfig] = None, - ) -> None: - self.lora_config = lora_config - self.tp_size = get_tensor_model_parallel_world_size() - lora_a_output_size_per_partition = ( - lora_config.max_lora_rank if not lora_config.fully_sharded_loras - else divide(lora_config.max_lora_rank, self.tp_size)) - self.lora_a_stacked = torch.zeros( - max_loras, - 1, - lora_a_output_size_per_partition, - self.input_size, - dtype=lora_config.lora_dtype, - device=self.device, - ) - self.lora_b_stacked = torch.zeros( - max_loras, - 1, - self.output_size, - lora_config.max_lora_rank, - dtype=lora_config.lora_dtype, - device=self.device, - ) - - if lora_config.bias_enabled: - self.bias_stacked = torch.zeros( - max_loras, - 1, - self.output_size, - dtype=lora_config.lora_dtype, - device=self.device, - ) - else: - self.bias_stacked = None - - self.output_dim = self.lora_b_stacked.shape[2] - - def reset_lora(self, index: int): - self.lora_a_stacked[index] = 0 - self.lora_b_stacked[index] = 0 - if self.lora_config.bias_enabled: - self.bias_stacked[index] = 0 + # There is only one LoRA layer + self.n_slices = 1 def slice_lora_a(self, lora_a: torch.Tensor) -> torch.Tensor: return lora_a @@ -547,46 +495,6 @@ def slice_bias(self, bias: torch.Tensor) -> torch.Tensor: bias = bias[start_idx:end_idx] return bias - def set_lora( - self, - index: int, - lora_a: torch.Tensor, - lora_b: torch.Tensor, - embeddings_tensor: Optional[torch.Tensor], - bias: Optional[torch.Tensor] = None, - ): - self.reset_lora(index) - - if self.tp_size > 1: - lora_a = self.slice_lora_a(lora_a) - lora_b = self.slice_lora_b(lora_b) - bias = self.slice_bias(bias) - - self.lora_a_stacked[index, - 0, :lora_a.shape[1], :lora_a.shape[0]].copy_( - lora_a.T, non_blocking=True) - self.lora_b_stacked[index, - 0, :lora_b.shape[1], :lora_b.shape[0]].copy_( - lora_b.T, non_blocking=True) - if bias is not None: - self.bias_stacked[index, - 0, :bias.shape[0]].copy_(bias.T, - non_blocking=True) - - def apply(self, x: torch.Tensor, - bias: Optional[torch.Tensor]) -> torch.Tensor: - output = self.base_layer.quant_method.apply(self.base_layer, x, bias) - if self.bias_stacked is not None: - self.indices = self.punica_wrapper.token_lora_indices - output = apply_bias( - self.indices, - output, - self.bias_stacked, - ) - self.punica_wrapper.add_lora(output, x, self.lora_a_stacked, - self.lora_b_stacked, 1.0) - return output - def forward(self, input_): """Forward of ColumnParallelLinear @@ -634,8 +542,20 @@ class MergedColumnParallelLinearWithLoRA(ColumnParallelLinearWithLoRA): Both slices must have the same size. """ - def __init__(self, base_layer: MergedColumnParallelLinear) -> None: + def __init__( + self, base_layer: Union[MergedColumnParallelLinear, + QKVParallelLinear]) -> None: super().__init__(base_layer) + # There are two LoRA layers + self.tp_size = get_tensor_model_parallel_world_size() + self.tp_rank = get_tensor_model_parallel_rank() + # the output_sizes in MergedColumnParallelLinear is not sharded by tp + # we need to divide it by the tp_size to get correct slices size + output_sizes = self.base_layer.output_sizes + self.output_slices = tuple( + divide(output_size, self.tp_size) for output_size in output_sizes) + self.n_slices = len(self.output_slices) + self.output_ids = (self.tp_rank, ) * self.n_slices def create_lora_weights( self, @@ -643,16 +563,11 @@ def create_lora_weights( lora_config: LoRAConfig, model_config: Optional[PretrainedConfig] = None, ) -> None: + """ + The main reason for overriding this function is to enhance code + maintainability. + """ self.lora_config = lora_config - n_slices = 2 - if not (len(self.base_layer.output_sizes) == n_slices - and self.base_layer.output_sizes[0] - == self.base_layer.output_sizes[1]): - raise ValueError( - "LoRAColumnParallelLinear2Slice requires 2 slices with " - "the same size.") - self.tp_size = get_tensor_model_parallel_world_size() - self.tp_rank = get_tensor_model_parallel_rank() lora_a_output_size_per_partition = ( lora_config.max_lora_rank if not lora_config.fully_sharded_loras @@ -666,38 +581,25 @@ def create_lora_weights( self.input_size, dtype=lora_config.lora_dtype, device=self.device, - ) for _ in range(n_slices)) + ) for _ in range(self.n_slices)) self.lora_b_stacked = tuple( torch.zeros( max_loras, 1, - self.output_size // 2, + output_size, lora_config.max_lora_rank, dtype=lora_config.lora_dtype, device=self.device, - ) for _ in range(n_slices)) + ) for output_size in self.output_slices) if lora_config.bias_enabled: - self.bias_stacked = tuple( + self.lora_bias_stacked = tuple( torch.zeros( max_loras, 1, - self.output_size // 2, + output_size, dtype=lora_config.lora_dtype, device=self.device, - ) for _ in range(n_slices)) - else: - self.bias_stacked = None - - self.output_dim = self.lora_b_stacked[0].shape[2] - - def reset_lora(self, index: int): - self.lora_a_stacked[0][index] = 0 - self.lora_a_stacked[1][index] = 0 - self.lora_b_stacked[0][index] = 0 - self.lora_b_stacked[1][index] = 0 - if self.lora_config.bias_enabled: - self.bias_stacked[0][index] = 0 - self.bias_stacked[1][index] = 0 + ) for output_size in self.output_slices) def slice_lora_a( self, lora_a: List[Union[torch.Tensor, None]] @@ -707,27 +609,21 @@ def slice_lora_a( def slice_lora_b( self, lora_b: List[Union[torch.Tensor, None]] ) -> List[Union[torch.Tensor, None]]: - #NOTE: lora_b contains 2 subloras, and each sublora could be None. - shard_size = self.output_dim - start_idx = self.tp_rank * shard_size - end_idx = (self.tp_rank + 1) * shard_size - lora_b = [ - lora_b[0][:, start_idx:end_idx] if lora_b[0] is not None else None, - lora_b[1][:, start_idx:end_idx] if lora_b[1] is not None else None, - ] + for i, (shard_id, shard_size) in enumerate( + zip(self.output_ids, self.output_slices)): + if (lora_b_i := lora_b[i]) is not None: + lora_b[i] = lora_b_i[:, shard_size * shard_id:shard_size * + (shard_id + 1)] return lora_b def slice_bias( self, bias: List[Union[torch.Tensor, None]]) -> List[Union[torch.Tensor, None]]: - # NOTE : each bias could be None. - shard_size = self.output_dim - start_idx = self.tp_rank * shard_size - end_idx = (self.tp_rank + 1) * shard_size - bias = [ - bias[0][start_idx:end_idx] if bias[0] is not None else None, - bias[1][start_idx:end_idx] if bias[1] is not None else None - ] + for i, (shard_id, shard_size) in enumerate( + zip(self.output_ids, self.output_slices)): + if (bias_i := bias[i]) is not None: + bias[i] = bias_i[shard_size * shard_id:shard_size * + (shard_id + 1)] return bias def set_lora( @@ -736,54 +632,35 @@ def set_lora( lora_a: torch.Tensor, lora_b: torch.Tensor, embeddings_tensor: Optional[torch.Tensor], - bias: Optional[torch.Tensor] = None, + lora_bias: Optional[torch.Tensor] = None, ): self.reset_lora(index) if self.tp_size > 1: lora_a = self.slice_lora_a(lora_a) lora_b = self.slice_lora_b(lora_b) - if bias is not None: - bias = self.slice_bias(bias) - - if lora_a[0] is not None: - self.lora_a_stacked[0][ - index, 0, :lora_a[0].shape[1], :lora_a[0].shape[0]].copy_( - lora_a[0].T, non_blocking=True) - self.lora_b_stacked[0][ - index, 0, :lora_b[0].shape[1], :lora_b[0].shape[0]].copy_( - lora_b[0].T, non_blocking=True) - if bias is not None and bias[0] is not None: - self.bias_stacked[0][index, - 0, :bias[0].shape[0]].copy_(bias[0].T, - non_blocking=True) - if lora_a[1] is not None: - self.lora_a_stacked[1][ - index, 0, :lora_a[1].shape[1], :lora_a[1].shape[0]].copy_( - lora_a[1].T, non_blocking=True) - self.lora_b_stacked[1][ - index, 0, :lora_b[1].shape[1], :lora_b[1].shape[0]].copy_( - lora_b[1].T, non_blocking=True) - if bias is not None and bias[1] is not None: - self.bias_stacked[1][index, - 0, :bias[1].shape[0]].copy_(bias[1].T, - non_blocking=True) - - def apply(self, x: torch.Tensor, - bias: Optional[torch.Tensor]) -> torch.Tensor: - output = self.base_layer.quant_method.apply(self.base_layer, x, bias) - if self.bias_stacked is not None: - self.indices = self.punica_wrapper.token_lora_indices - output = apply_bias_packed_nslice( - self.indices, - output, - (self.output_dim, self.output_dim), - self.bias_stacked, - ) - self.punica_wrapper.add_lora_packed_nslice( - output, x, self.lora_a_stacked, self.lora_b_stacked, 1.0, - (self.output_dim, self.output_dim)) - return output + if lora_bias is not None: + lora_bias = self.slice_bias(lora_bias) + + for i in range(self.n_slices): + if (lora_a_i := lora_a[i]) is not None: + self.lora_a_stacked[i][ + index, 0, :lora_a_i.shape[1], :lora_a_i.shape[0]].copy_( + lora_a_i.T, non_blocking=True) + if (lora_b_i := lora_b[i]) is not None: + self.lora_b_stacked[i][ + index, 0, :lora_b_i.shape[1], :lora_b_i.shape[0]].copy_( + lora_b_i.T, non_blocking=True) + + if lora_bias is not None: + self.lora_bias_stacked = cast(Tuple[torch.Tensor, ...], + self.lora_bias_stacked) + for i in range(self.n_slices): + if (lora_bias_i := lora_bias[i]) is not None: + self.lora_bias_stacked[i][index, + 0, :lora_bias_i.shape[0]].copy_( + lora_bias_i.T, + non_blocking=True) @classmethod @_not_fully_sharded_can_replace @@ -813,7 +690,6 @@ class QKVParallelLinearWithLora(ColumnParallelLinearWithLoRA): def __init__(self, base_layer: QKVParallelLinear) -> None: super().__init__(base_layer) - self.tp_size = get_tensor_model_parallel_world_size() self.q_proj_total_size = (self.base_layer.total_num_heads * self.base_layer.head_size) self.q_proj_shard_size = (self.base_layer.num_heads * @@ -822,6 +698,8 @@ def __init__(self, base_layer: QKVParallelLinear) -> None: self.base_layer.head_size) self.kv_proj_total_size = (self.base_layer.total_num_kv_heads * self.base_layer.head_size) + # There is only one LoRA layer + self.n_slices = 1 def slice_lora_b(self, lora_b: torch.Tensor) -> torch.Tensor: tp_rank = get_tensor_model_parallel_rank() @@ -856,32 +734,6 @@ def slice_bias(self, bias: torch.Tensor) -> torch.Tensor: bias = torch.cat([bias_q, bias_k, bias_v], dim=1) return bias - def set_lora( - self, - index: int, - lora_a: torch.Tensor, - lora_b: torch.Tensor, - embeddings_tensor: Optional[torch.Tensor], - bias: Optional[torch.Tensor] = None, - ): - self.reset_lora(index) - if self.tp_size > 1: - lora_a = self.slice_lora_a(lora_a) - lora_b = self.slice_lora_b(lora_b) - if bias is not None: - bias = self.slice_bias(bias) - - self.lora_a_stacked[index, - 0, :lora_a.shape[1], :lora_a.shape[0]].copy_( - lora_a.T, non_blocking=True) - self.lora_b_stacked[index, - 0, :lora_b.shape[1], :lora_b.shape[0]].copy_( - lora_b.T, non_blocking=True) - if bias is not None: - self.bias_stacked[index, - 0, :bias.shape[0]].copy_(bias.T, - non_blocking=True) - @classmethod @_not_fully_sharded_can_replace def can_replace_layer(cls, source_layer: nn.Module, @@ -891,8 +743,8 @@ def can_replace_layer(cls, source_layer: nn.Module, packed_modules_list) == 1 -class MergedQKVParallelLinearWithLora(ColumnParallelLinearWithLoRA): - """ColumnParallelLinear layer that is composed of 3 sublayers (slices) +class MergedQKVParallelLinearWithLora(MergedColumnParallelLinearWithLoRA): + """MergedColumnParallelLinear layer that is composed of 3 sublayers (slices) packed together in qkv proj fashion (q_proj + k_proj + v_proj -> qkv_proj). @@ -904,16 +756,11 @@ class MergedQKVParallelLinearWithLora(ColumnParallelLinearWithLoRA): def __init__(self, base_layer: QKVParallelLinear) -> None: super().__init__(base_layer) - - def create_lora_weights( - self, - max_loras: int, - lora_config: LoRAConfig, - model_config: Optional[PretrainedConfig] = None, - ) -> None: - self.lora_config = lora_config + # There are three LoRA layer. + self.n_slices = len(self.base_layer.output_sizes) self.tp_size = get_tensor_model_parallel_world_size() self.tp_rank = get_tensor_model_parallel_rank() + self.q_proj_shard_size = (self.base_layer.num_heads * self.base_layer.head_size) self.kv_proj_shard_size = (self.base_layer.num_kv_heads * @@ -921,227 +768,28 @@ def create_lora_weights( self.q_shard_id = self.tp_rank self.kv_shard_id = self.tp_rank // self.base_layer.num_kv_head_replicas - lora_a_output_size_per_partition = ( - lora_config.max_lora_rank if not lora_config.fully_sharded_loras - else divide(lora_config.max_lora_rank, self.tp_size)) - # q, k, v - self.lora_a_stacked = ( - torch.zeros( - max_loras, - 1, - lora_a_output_size_per_partition, - self.input_size, - dtype=lora_config.lora_dtype, - device=self.device, - ), - torch.zeros( - max_loras, - 1, - lora_a_output_size_per_partition, - self.input_size, - dtype=lora_config.lora_dtype, - device=self.device, - ), - torch.zeros( - max_loras, - 1, - lora_a_output_size_per_partition, - self.input_size, - dtype=lora_config.lora_dtype, - device=self.device, - ), - ) - self.lora_b_stacked = ( - torch.zeros( - max_loras, - 1, - self.q_proj_shard_size, - lora_config.max_lora_rank, - dtype=lora_config.lora_dtype, - device=self.device, - ), - torch.zeros( - max_loras, - 1, - self.kv_proj_shard_size, - lora_config.max_lora_rank, - dtype=lora_config.lora_dtype, - device=self.device, - ), - torch.zeros( - max_loras, - 1, - self.kv_proj_shard_size, - lora_config.max_lora_rank, - dtype=lora_config.lora_dtype, - device=self.device, - ), - ) - if lora_config.bias_enabled: - self.bias_stacked = ( - torch.zeros( - max_loras, - 1, - self.q_proj_shard_size, - dtype=lora_config.lora_dtype, - device=self.device, - ), - torch.zeros( - max_loras, - 1, - self.kv_proj_shard_size, - dtype=lora_config.lora_dtype, - device=self.device, - ), - torch.zeros( - max_loras, - 1, - self.kv_proj_shard_size, - dtype=lora_config.lora_dtype, - device=self.device, - ), - ) - else: - self.bias_stacked = None - self.output_slices = ( self.q_proj_shard_size, self.kv_proj_shard_size, self.kv_proj_shard_size, ) - self.packed_indices: Optional[torch.Tensor] = None - self.standard_indices: Optional[torch.Tensor] = None - # lazily initialized. - self.indices: torch.Tensor - self.indices_len: List[int] - - def reset_lora(self, index: int): - self.lora_a_stacked[0][index] = 0 - self.lora_b_stacked[0][index] = 0 - self.lora_a_stacked[1][index] = 0 - self.lora_b_stacked[1][index] = 0 - self.lora_a_stacked[2][index] = 0 - self.lora_b_stacked[2][index] = 0 - if self.lora_config.bias_enabled: - self.bias_stacked[0][index] = 0 - self.bias_stacked[1][index] = 0 - self.bias_stacked[2][index] = 0 - - def slice_lora_a( - self, lora_a: List[Union[torch.Tensor, None]] - ) -> List[Union[torch.Tensor, None]]: - return lora_a - - def slice_lora_b( - self, lora_b: List[Union[torch.Tensor, None]] - ) -> List[Union[torch.Tensor, None]]: - lora_b_q, lora_b_k, lora_b_v = None, None, None - if lora_b[0] is not None: - lora_b_q = lora_b[0][:, self.q_proj_shard_size * - self.q_shard_id:self.q_proj_shard_size * - (self.q_shard_id + 1), ] - if lora_b[1] is not None: - lora_b_k = lora_b[1][:, self.kv_proj_shard_size * - self.kv_shard_id:self.kv_proj_shard_size * - (self.kv_shard_id + 1), ] - if lora_b[2] is not None: - lora_b_v = lora_b[2][:, self.kv_proj_shard_size * - self.kv_shard_id:self.kv_proj_shard_size * - (self.kv_shard_id + 1), ] - lora_b = [lora_b_q, lora_b_k, lora_b_v] - return lora_b - - def slice_bias( - self, bias: List[Union[torch.Tensor, - None]]) -> List[Union[torch.Tensor, None]]: - bias_q, bias_k, bias_v = bias - if bias_q is not None: - bias_q = bias_q[self.q_proj_shard_size * - self.q_shard_id:self.q_proj_shard_size * - (self.q_shard_id + 1)] - if bias_k is not None: - bias_k = bias_k[self.kv_proj_shard_size * - self.kv_shard_id:self.kv_proj_shard_size * - (self.kv_shard_id + 1)] - if bias_v is not None: - bias_v = bias_v[self.kv_proj_shard_size * - self.kv_shard_id:self.kv_proj_shard_size * - (self.kv_shard_id + 1)] - bias = [bias_q, bias_k, bias_v] - return bias + self.output_ids = ( + self.q_shard_id, + self.kv_shard_id, + self.kv_shard_id, + ) - def set_lora( + def create_lora_weights( self, - index: int, - lora_a: torch.Tensor, - lora_b: torch.Tensor, - embeddings_tensor: Optional[torch.Tensor], - bias: Optional[torch.Tensor] = None, - ): - self.reset_lora(index) - - if self.tp_size > 1: - lora_a = self.slice_lora_a(lora_a) - lora_b = self.slice_lora_b(lora_b) - if bias is not None: - bias = self.slice_bias(bias) - - if lora_b[0] is not None: - lora_b_q = lora_b[0] - self.lora_b_stacked[0][ - index, 0, :lora_b_q.shape[1], :lora_b_q.shape[0]].copy_( - lora_b_q.T, non_blocking=True) - if lora_b[1] is not None: - lora_b_k = lora_b[1] - self.lora_b_stacked[1][ - index, 0, :lora_b_k.shape[1], :lora_b_k.shape[0]].copy_( - lora_b_k.T, non_blocking=True) - if lora_b[2] is not None: - lora_b_v = lora_b[2] - self.lora_b_stacked[2][ - index, 0, :lora_b_v.shape[1], :lora_b_v.shape[0]].copy_( - lora_b_v.T, non_blocking=True) - - if lora_a[0] is not None: - self.lora_a_stacked[0][ - index, 0, :lora_a[0].shape[1], :lora_a[0].shape[0]].copy_( - lora_a[0].T, non_blocking=True) - if lora_a[1] is not None: - self.lora_a_stacked[1][ - index, 0, :lora_a[1].shape[1], :lora_a[1].shape[0]].copy_( - lora_a[1].T, non_blocking=True) - if lora_a[2] is not None: - self.lora_a_stacked[2][ - index, 0, :lora_a[2].shape[1], :lora_a[2].shape[0]].copy_( - lora_a[2].T, non_blocking=True) - - if bias is not None: - if bias[0] is not None: - self.bias_stacked[0][index, 0, :bias[0].shape[0]].copy_( - bias[0].T, non_blocking=True) - if bias[1] is not None: - self.bias_stacked[1][index, 0, :bias[1].shape[0]].copy_( - bias[1].T, non_blocking=True) - if bias[2] is not None: - self.bias_stacked[2][index, 0, :bias[2].shape[0]].copy_( - bias[2].T, non_blocking=True) - - def apply(self, x: torch.Tensor, - bias: Optional[torch.Tensor]) -> torch.Tensor: - output = self.base_layer.quant_method.apply(self.base_layer, x, bias) - if self.bias_stacked is not None: - self.indices = self.punica_wrapper.token_lora_indices - output = apply_bias_packed_nslice( - self.indices, - output, - self.output_slices, - self.bias_stacked, - ) - self.punica_wrapper.add_lora_packed_nslice(output, x, - self.lora_a_stacked, - self.lora_b_stacked, 1.0, - self.output_slices) - return output + max_loras: int, + lora_config: LoRAConfig, + model_config: Optional[PretrainedConfig] = None, + ) -> None: + """ + The main reason for overloading this function is to handle inconsistent + weight dimensions in qkv lora. + """ + super().create_lora_weights(max_loras, lora_config, model_config) @classmethod @_not_fully_sharded_can_replace @@ -1156,76 +804,25 @@ def can_replace_layer( and len(packed_modules_list) == 3) -class RowParallelLinearWithLoRA(BaseLayerWithLoRA): +class RowParallelLinearWithLoRA(BaseLinearLayerWithLoRA): def __init__(self, base_layer: RowParallelLinear) -> None: - super().__init__() - self.base_layer = base_layer + super().__init__(base_layer) + + self.tp_size = get_tensor_model_parallel_world_size() + # reset input_size self.input_size = self.base_layer.input_size_per_partition self.output_size = self.base_layer.output_size - self.device = _get_lora_device(self.base_layer) - def create_lora_weights( - self, - max_loras: int, - lora_config: LoRAConfig, - model_config: Optional[PretrainedConfig] = None, - ) -> None: - self.lora_config = lora_config self.tp_rank = get_tensor_model_parallel_rank() - self.lora_a_stacked = torch.zeros( - ( - max_loras, - 1, - lora_config.max_lora_rank, - self.input_size, - ), - dtype=lora_config.lora_dtype, - device=self.device, - ) - tp_size = get_tensor_model_parallel_world_size() - lora_b_output_size_per_partition = ( - self.output_size if not lora_config.fully_sharded_loras else - divide(self.output_size, tp_size)) - - self.lora_b_stacked = torch.zeros( - ( - max_loras, - 1, - lora_b_output_size_per_partition, - lora_config.max_lora_rank, - ), - dtype=lora_config.lora_dtype, - device=self.device, - ) - - if lora_config.bias_enabled: - self.bias_stacked = torch.zeros( - ( - max_loras, - 1, - self.output_size, - ), - dtype=lora_config.lora_dtype, - device=self.device, - ) - else: - self.bias_stacked = None - # Lazily initialized - self.indices: torch.Tensor - self.indices_len: List[int] - - def reset_lora(self, index: int): - self.lora_a_stacked[index] = 0 - self.lora_b_stacked[index] = 0 - if self.lora_config.bias_enabled: - self.bias_stacked[index] = 0 + # There is only one LoRA layer. + self.n_slices = 1 def slice_lora_a(self, lora_a: torch.Tensor) -> torch.Tensor: - tensor_model_parallel_rank = get_tensor_model_parallel_rank() + shard_size = self.input_size - start_idx = tensor_model_parallel_rank * shard_size - end_idx = (tensor_model_parallel_rank + 1) * shard_size + start_idx = self.tp_rank * shard_size + end_idx = (self.tp_rank + 1) * shard_size lora_a = lora_a[start_idx:end_idx, :] return lora_a @@ -1235,46 +832,6 @@ def slice_lora_b(self, lora_b: torch.Tensor) -> torch.Tensor: def slice_bias(self, bias: torch.Tensor) -> torch.Tensor: return bias - def set_lora( - self, - index: int, - lora_a: torch.Tensor, - lora_b: torch.Tensor, - embeddings_tensor: Optional[torch.Tensor], - bias: Optional[torch.Tensor] = None, - ): - self.reset_lora(index) - - if self.base_layer.tp_size > 1: - lora_a = self.slice_lora_a(lora_a) - lora_b = self.slice_lora_b(lora_b) - if bias is not None: - bias = self.slice_bias(bias) - - self.lora_a_stacked[index, - 0, :lora_a.shape[1], :lora_a.shape[0]].copy_( - lora_a.T, non_blocking=True) - self.lora_b_stacked[index, - 0, :lora_b.shape[1], :lora_b.shape[0]].copy_( - lora_b.T, non_blocking=True) - if bias is not None: - self.bias_stacked[index, - 0, :bias.shape[0]].copy_(bias.T, - non_blocking=True) - - def apply(self, x: torch.Tensor) -> torch.Tensor: - output = self.base_layer.quant_method.apply(self.base_layer, x) - if self.bias_stacked is not None: - self.indices = self.punica_wrapper.token_lora_indices - output = apply_bias( - self.indices, - output, - self.bias_stacked, - ) - self.punica_wrapper.add_lora(output, x, self.lora_a_stacked, - self.lora_b_stacked, 1.0) - return output - def forward(self, input_): """Forward of RowParallelLinear @@ -1292,10 +849,9 @@ def forward(self, input_): input_parallel = input_ else: # TODO: simplify code below - tp_rank = get_tensor_model_parallel_rank() splitted_input = split_tensor_along_last_dim( input_, num_partitions=self.base_layer.tp_size) - input_parallel = splitted_input[tp_rank].contiguous() + input_parallel = splitted_input[self.tp_rank].contiguous() # Matrix multiply. output_parallel = self.apply(input_parallel) @@ -1513,6 +1069,11 @@ def _get_logits( ).index_select(0, indices_padded).nan_to_num_(nan=float("-inf"), posinf=float("inf"), neginf=float("-inf"))) + + # HPU needs special handling to prune out dummy samples. + if current_platform.is_hpu(): + lora_logits = lora_logits[:logits.shape[0], :] + logits[:, self.base_layer.org_vocab_size:self.base_layer.org_vocab_size + lora_logits.shape[1]] = lora_logits diff --git a/vllm/lora/lora.py b/vllm/lora/lora.py index b648312ba76ec..dde347b78bf81 100644 --- a/vllm/lora/lora.py +++ b/vllm/lora/lora.py @@ -4,6 +4,7 @@ import torch import torch.types +from vllm.lora.peft_helper import PEFTHelper from vllm.utils import is_pin_memory_available @@ -59,6 +60,23 @@ def extra_vocab_size(self) -> int: return self.embeddings_tensor.shape[ 0] if self.embeddings_tensor is not None else 0 + @classmethod + def from_config( + cls, + module_name: str, + peft_helper: PEFTHelper, + embeddings_tensor: Optional[torch.Tensor] = None, + ) -> "LoRALayerWeights": + return cls( + module_name, + peft_helper.r, + peft_helper.lora_alpha, + None, + None, + None, + embeddings_tensor, + ) + @classmethod def create_dummy_lora_weights( cls, diff --git a/vllm/lora/models.py b/vllm/lora/models.py index 2ffefe61427e3..70806a77b9fff 100644 --- a/vllm/lora/models.py +++ b/vllm/lora/models.py @@ -21,7 +21,8 @@ LinearScalingRotaryEmbeddingWithLora, LoRAMapping) from vllm.lora.lora import LoRALayerWeights, PackedLoRALayerWeights -from vllm.lora.punica import PunicaWrapper +from vllm.lora.peft_helper import PEFTHelper +from vllm.lora.punica_wrapper import get_punica_wrapper from vllm.lora.utils import (from_layer, from_layer_logits_processor, is_regex_target_modules, parse_fine_tuned_lora_name, replace_submodule) @@ -104,14 +105,12 @@ def get_lora(self, module_name: str) -> Optional[LoRALayerWeights]: def from_lora_tensors( cls, lora_model_id: int, - rank: int, - lora_alpha: int, tensors: Dict[str, torch.Tensor], + peft_helper: PEFTHelper, device: str = "cuda", dtype: Optional[torch.dtype] = None, embeddings: Optional[Dict[str, torch.Tensor]] = None, target_embedding_padding: Optional[int] = None, - scaling_factor: Optional[float] = None, embedding_modules: Optional[Dict[str, str]] = None, embedding_padding_modules: Optional[List[str]] = None, ) -> "LoRAModel": @@ -135,10 +134,9 @@ def from_lora_tensors( if pin_memory: lora_embeddings_tensor = ( lora_embeddings_tensor.pin_memory()) - loras[module_name] = LoRALayerWeights(module_name, rank, - lora_alpha, None, None, - None, - lora_embeddings_tensor) + loras[module_name] = LoRALayerWeights.from_config( + module_name, peft_helper, lora_embeddings_tensor) + if is_bias: loras[module_name].bias = tensor.to(device=device, dtype=dtype).t() @@ -170,7 +168,11 @@ def from_lora_tensors( for lora in loras.values(): lora.optimize() - return cls(lora_model_id, rank, loras, scaling_factor=scaling_factor) + + return cls(lora_model_id, + peft_helper.r, + loras, + scaling_factor=peft_helper.vllm_scaling_factor) @classmethod def from_local_checkpoint( @@ -212,6 +214,9 @@ def from_local_checkpoint( "new_embeddings.bin") with open(lora_config_path) as f: config = json.load(f) + + config["vllm_max_position_embeddings"] = max_position_embeddings + peft_helper = PEFTHelper.from_dict(config) if os.path.isfile(lora_tensor_path): tensors: Dict[str, torch.Tensor] = {} # Find unexpected modules. @@ -242,7 +247,7 @@ def from_local_checkpoint( # When a bin file is provided, we rely on config to find unexpected # modules. unexpected_modules = [] - target_modules = config["target_modules"] + target_modules = peft_helper.target_modules if not isinstance(target_modules, list): target_modules = [target_modules] for module in target_modules: @@ -256,7 +261,7 @@ def from_local_checkpoint( # https://github.com/vllm-project/vllm/pull/5909. But there's no # other better mechanism. if unexpected_modules and not is_regex_target_modules( - config["target_modules"], expected_lora_modules): + peft_helper.target_modules, expected_lora_modules): raise ValueError( f"While loading {lora_dir}, expected" f" target modules in {expected_lora_modules}" @@ -274,30 +279,17 @@ def from_local_checkpoint( embeddings = torch.load(new_embeddings_bin_file_path, map_location=device) - rank = config["r"] - lora_alpha = config["lora_alpha"] - context_length = config.get("context_length", None) - scaling_factor = None - if context_length: - if max_position_embeddings is None: - max_position_embeddings = context_length - scaling_factor = float( - math.ceil(context_length / max_position_embeddings)) - return cls.from_lora_tensors( lora_model_id=get_lora_id() if lora_model_id is None else lora_model_id, - rank=rank, - lora_alpha=lora_alpha, tensors=tensors, + peft_helper=peft_helper, device=device, dtype=dtype, embeddings=embeddings, target_embedding_padding=target_embedding_padding, - scaling_factor=scaling_factor, embedding_modules=embedding_modules, - embedding_padding_modules=embedding_padding_modules, - ) + embedding_padding_modules=embedding_padding_modules) class LoRAModelManager(AdapterModelManager): @@ -331,9 +323,9 @@ def __init__( self.lora_index_to_id: List[Optional[int]] = [None] * self.lora_slots self.vocab_size = vocab_size self.long_lora_context: Optional[LongContextLoRAContext] = None - self.punica_wrapper = PunicaWrapper(max_num_batched_tokens, - max_batches=self.max_num_seqs, - device=self.device) + self.punica_wrapper = get_punica_wrapper(max_num_batched_tokens, + max_batches=self.max_num_seqs, + device=self.device) # Scaling factor -> offset to the sin_cos_cache to it. # Used for long context lora. self.scaling_factor_to_offset: Dict[float, int] = {} @@ -555,17 +547,17 @@ def create_dummy_lora( input_dim, output_dim, rank, - module.lora_a_stacked.dtype, + module.lora_a_stacked[0].dtype, "cpu", embeddings_tensor_dim=embeddings_tensor_dim, bias_enabled=bias_enabled) else: lora = LoRALayerWeights.create_dummy_lora_weights( module_name, - module.lora_a_stacked.shape[-1], - module.lora_b_stacked.shape[-2], + module.lora_a_stacked[0].shape[-1], + module.lora_b_stacked[0].shape[-2], rank, - module.lora_a_stacked.dtype, + module.lora_a_stacked[0].dtype, "cpu", bias_enabled=bias_enabled, ) diff --git a/vllm/lora/peft_helper.py b/vllm/lora/peft_helper.py new file mode 100644 index 0000000000000..edf4ba5659575 --- /dev/null +++ b/vllm/lora/peft_helper.py @@ -0,0 +1,70 @@ +# Adapted from: https://github.com/huggingface/peft/blob/main/src/peft/tuners/lora/config.py + +import math +from dataclasses import MISSING, dataclass, field, fields +from typing import Literal, Optional, Union + + +@dataclass +class PEFTHelper: + # Required fields + r: int + lora_alpha: int + target_modules: Union[list[str], str] + + bias: Literal["none", "all", "lora_only"] = field(default="none") + modules_to_save: Optional[list[str]] = field(default=None) + use_rslora: bool = field(default=False) + use_dora: bool = field(default=False) + # long lora field + context_length: int = field(default=0) + # Extra vllm field, start with 'vllm_' to avoid conflict + vllm_max_position_embeddings: Optional[int] = field(default=False) + vllm_scaling_factor: Optional[float] = field(default=None) + + def _validate_features(self): + error_msg = [] + + if self.modules_to_save: + error_msg.append("vLLM only supports modules_to_save being None.") + if self.use_rslora: + error_msg.append("vLLM does not yet support RSLoRA.") + + if self.use_dora: + error_msg.append("vLLM does not yet support DoRA.") + + if error_msg: + raise ValueError(f"{', '.join(error_msg)}") + + def __post_init__(self): + self._validate_features() + if self.context_length: + if self.vllm_max_position_embeddings is None: + self.vllm_max_position_embeddings = self.context_length + self.vllm_scaling_factor = float( + math.ceil(self.context_length / + self.vllm_max_position_embeddings)) + + @classmethod + def from_dict(cls, config_dict: dict) -> "PEFTHelper": + # Get all field information from the class + class_fields = {f.name: f for f in fields(cls)} + # Check for required fields + required_fields = { + name + for name, f in class_fields.items() + if f.default is MISSING and f.default_factory is MISSING + } + + # Identify any missing required fields + missing_fields = required_fields - set(config_dict.keys()) + if missing_fields: + raise ValueError( + f"Missing required configuration fields: {missing_fields}") + + # Filter out fields that aren't defined in the class + filtered_dict = { + k: v + for k, v in config_dict.items() if k in class_fields + } + return cls(**filtered_dict) diff --git a/vllm/lora/punica.py b/vllm/lora/punica.py deleted file mode 100644 index 082041f390750..0000000000000 --- a/vllm/lora/punica.py +++ /dev/null @@ -1,611 +0,0 @@ -""" -Based on: -Chen, L., Ye, Z., Wu, Y., Zhuo, D., Ceze, L., & Krishnamurthy, A. (2023). -Punica: Multi-Tenant LoRA Serving. -https://arxiv.org/abs/2310.18547 -""" - -from typing import TYPE_CHECKING, Callable, List, Optional, Tuple, Union - -import torch - -from vllm.triton_utils import HAS_TRITON - -if HAS_TRITON: - from vllm.lora.ops.bgmv_expand import bgmv_expand - from vllm.lora.ops.bgmv_expand_slice import bgmv_expand_slice - from vllm.lora.ops.bgmv_shrink import bgmv_shrink - from vllm.lora.ops.sgmv_expand import sgmv_expand - from vllm.lora.ops.sgmv_expand_slice import sgmv_expand_slice - from vllm.lora.ops.sgmv_shrink import sgmv_shrink - -if TYPE_CHECKING: - # avoid circuit import - from vllm.lora.layers import LoRAMapping - from vllm.lora.models import LongContextLoRAContext - - -def compute_meta( - token_lora_tensor: torch.Tensor -) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, int, int, int, bool]: - """ - Get the information required for the sgmv kernel. With the features: - 1. If consecutive requests in the batch use the same LoRA, this function - will combine them into a single request, improving sgmv kernel inference - performance. - 2. At the beginning of each prefill stage inference, recalculations are - needed based on the input, but only once. - """ - - lora_indices_tensor, seq_length_tensor = torch.unique_consecutive( - token_lora_tensor, return_counts=True) - cum_result = torch.cumsum(seq_length_tensor, dim=0) - b_seq_start_tensor = torch.zeros_like(seq_length_tensor) - b_seq_start_tensor[1:].copy_(cum_result[:-1]) - max_length = seq_length_tensor.max().item() - token_nums = seq_length_tensor.sum().item() - batch_size = lora_indices_tensor.size(0) - no_lora = False - # -1 means no lora should be applied. Use `no_lora` to determine whether - # the current step requires LoRA. If LoRA is not needed, the prefill stage - # does not need to launch the triton kernel, which can improve performance - if batch_size == 1 and lora_indices_tensor == -1: - no_lora = True - return (b_seq_start_tensor, seq_length_tensor, lora_indices_tensor, - batch_size, max_length, token_nums, no_lora) - - -# TODO see if this can be vectorized -def convert_mapping( - mapping: "LoRAMapping", - lora_index_to_id: List[Optional[int]], - max_loras: int, - vocab_size: int, - extra_vocab_size: int, - device: torch.device, - long_lora_context: Optional["LongContextLoRAContext"] = None, -) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, - Optional[torch.Tensor], List[int]]: - """Converts LoRAMapping to index tensors. - - Args: - mapping: LoRAMapping mapping rows in a batch to LoRA ids. - lora_index_to_id: List mapping LoRA ids to LoRA indices. - max_loras: Maximum number of LoRAs. - vocab_size: Model vocab size. - extra_vocab_size: Extra vocab size each LoRA can have. - long_lora_context: Passed if there are long context lora in a batch. - - Returns: - A tuple of tensors: - base_indices: Tensor of shape [batch_size] mapping batch rows to - LoRA indices. - sampler_indices: Tensor of shape [batch_size] mapping requests to - LoRA indices for sampler. For generation, this will be the - same as base_indicies. For prefill, this will map requests - to LoRA indices. - sampler_indices_padded: Tensor of shape [batch_size] mapping - requests to LoRA indices for sampler with padding. - Same as sampler_indicies, but -1 is replaced with - max_loras. - embeddings_indices: Tensor of shape [2, batch_size] mapping - requests to embedding indices. First row is for embeddings - added by the LoRAs, second row is for the LoRA.lora_a - embeddings. - long_lora_indices: Tensor of shape [batch_size] mapping - requests to RoPE offsets and rot dims for long LoRAs. - None if long context lora doesn't exist. - indices_len: List of lengths of the above tensors. It contains - (base_indices, sampler_indices, sampler_indices_padded, - embeddings_indices, long_lora_indices). - """ - index_mapping_indices: List[int] = list(mapping.index_mapping).copy() - embedding_indices = index_mapping_indices.copy() - lora_indices = index_mapping_indices.copy() - long_lora_offsets: Optional[torch.Tensor] = None - if long_lora_context: - long_lora_offsets = torch.zeros(len(index_mapping_indices), - device=device, - dtype=torch.long) - prompt_mapping: List[int] = [ - lora_index_to_id.index(x) if x > 0 else -1 - for x in mapping.prompt_mapping - ] - lora_idx = None - for i in range(len(index_mapping_indices)): - # TODO index can be slow. optimize - lora_idx = (lora_index_to_id.index(index_mapping_indices[i]) - if index_mapping_indices[i] > 0 else -1) - embedding_indices[i] = lora_idx if index_mapping_indices[i] > 0 else 0 - lora_indices[i] = lora_idx - if long_lora_context: - assert long_lora_offsets is not None - lora_offset: int = long_lora_context.offsets_by_lora_id.get( - index_mapping_indices[i], 0) - long_lora_offsets[i] = lora_offset - - indices_list: List[Union[List[int], torch.Tensor]] = [ - index_mapping_indices, - lora_indices, - embedding_indices, - ] - if long_lora_context: - assert long_lora_offsets is not None - indices_list.append(long_lora_offsets) - indices = torch.tensor(indices_list, dtype=torch.long, device=device) - prompt_mapping_tensor = torch.tensor(prompt_mapping, - dtype=torch.long, - device=device) - embeddings_indices = torch.stack([ - indices[2] * extra_vocab_size, - indices[2] * (vocab_size + extra_vocab_size), - ]) - embeddings_indices[embeddings_indices == -1] = max_loras - 1 - base_indices = indices[1] - sampler_indices = prompt_mapping_tensor - sampler_indices_padded = sampler_indices.clone() - sampler_indices_padded[sampler_indices_padded == -1] = max_loras - 1 - sampler_indices_padded = torch.arange( - 0, len(sampler_indices_padded), device=device, dtype=torch.long) + ( - sampler_indices_padded * len(sampler_indices_padded)) - long_lora_indices = None - long_lora_indices_len: Optional[int] = None - if long_lora_context: - long_lora_indices = indices[3] - long_lora_indices_len = long_lora_indices.shape[-1] - # Contain length of indices tensors. Used to index into each tensor. - indices_len = [ - base_indices.shape[-1], - sampler_indices.shape[-1], - sampler_indices_padded.shape[-1], - embeddings_indices.shape[-1], - ] - if long_lora_indices_len is not None: - indices_len.append(long_lora_indices_len) - else: - # If long_lora doesn't exist,append None - indices_len.append(None) - - return ( - base_indices, - sampler_indices, - sampler_indices_padded, - embeddings_indices, - long_lora_indices, - indices_len, - ) - - -class PunicaWrapper: - """ - PunicaWrapper is designed to manage and provide metadata for the punica - kernel. The main function is to maintain the state information for - Multi-LoRA, and to provide the interface for the punica kernel. - """ - - def __init__(self, max_num_batched_tokens: int, max_batches: int, - device: Union[torch.device, str]): - self._token_lora_indices = torch.empty(max_num_batched_tokens, - dtype=torch.long, - device=device) - self._sampler_indices = torch.empty(max_num_batched_tokens, - dtype=torch.long, - device=device) - self._sampler_indices_padded = torch.empty(max_num_batched_tokens, - dtype=torch.long, - device=device) - self._embeddings_indices = torch.empty(2, - max_num_batched_tokens, - dtype=torch.long, - device=device) - self._long_lora_indices = torch.empty(max_num_batched_tokens, - dtype=torch.long, - device=device) - - # 5 is the number of indicies tensors. - # base_indices, sampler_indices, sampler_indices_padded, - # embeddings_indices,long_lora_indices - self.indices_len: List[Optional[int]] = [None] * 5 - # these attributes are the information required for sgmv kernel - self._seq_start_locs = torch.empty(max_batches, - dtype=torch.long, - device=device) - self._seq_lengths = torch.empty(max_batches, - dtype=torch.long, - device=device) - self._lora_indices_per_batch = torch.empty(max_batches, - dtype=torch.long, - device=device) - self.device: torch.device = device - self.max_length: int = 0 - self.token_nums: int = 0 - self.batch_size: int = -1 - self.is_prefill = False - self.no_lora = False - - def update_metadata( - self, - mapping: "LoRAMapping", - lora_index_to_id: List[Optional[int]], - max_loras: int, - vocab_size: int, - extra_vocab_size: int, - long_lora_context: Optional["LongContextLoRAContext"] = None, - ): - - self._update_base_metadata(mapping, lora_index_to_id, max_loras, - vocab_size, extra_vocab_size, - long_lora_context) - if mapping.is_prefill: - # Update metadata required for prefill-related operators. - self._update_prefill_metada(self.token_lora_indices) - self.is_prefill = True - else: - self.is_prefill = False - - def _update_base_metadata( - self, - mapping: "LoRAMapping", - lora_index_to_id: List[Optional[int]], - max_loras: int, - vocab_size: int, - extra_vocab_size: int, - long_lora_context: Optional["LongContextLoRAContext"] = None, - ): - ( - base_indices, - sampler_indices, - sampler_indices_padded, - embeddings_indices, - long_lora_offsets_tensor, - indices_len, - ) = convert_mapping( - mapping, - lora_index_to_id, - max_loras, - vocab_size, - extra_vocab_size, - self.device, - long_lora_context, - ) - self._token_lora_indices[:base_indices.shape[0]].copy_(base_indices) - self._sampler_indices[:sampler_indices.shape[0]].copy_(sampler_indices) - self._sampler_indices_padded[:sampler_indices_padded.shape[0]].copy_( - sampler_indices_padded) - self._embeddings_indices[:embeddings_indices. - shape[0], :embeddings_indices.shape[1]].copy_( - embeddings_indices) - if long_lora_offsets_tensor is not None: - self._long_lora_indices[:long_lora_offsets_tensor.shape[0]].copy_( - long_lora_offsets_tensor) - else: - self._long_lora_indices.zero_() - self.indices_len[:] = indices_len - - def _update_prefill_metada(self, token_lora_tensor: torch.Tensor) -> None: - - (b_seq_start_tensor, seq_length_tensor, lora_indices_tensor, - batch_size, max_length, token_nums, - no_lora) = compute_meta(token_lora_tensor) - - self._seq_start_locs[:b_seq_start_tensor.shape[0]].copy_( - b_seq_start_tensor) - self._seq_lengths[:seq_length_tensor.shape[0]].copy_(seq_length_tensor) - self._lora_indices_per_batch[:lora_indices_tensor.shape[0]].copy_( - lora_indices_tensor) - self.batch_size = batch_size - self.max_length = max_length - self.token_nums = token_nums - self.no_lora = no_lora - - @property - def prefill_metadata( - self - ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, int, int, int]: - """ - This property provides a convenient way to access the necessary - metadata for prefill-related kernel computations. - 1. seq_start_locs: Tensor of sequence start positions. - 2. seq_lengths: Tensor of sequence lengths. - 3. lora_indices_per_batch: Tensor of lora indices, and an index of - -1 means no lora should be applied. - 4. batch_size: Batch size after clustering identical lora indices. - 5. max_length: The maximum sequence length in the batch. - 6. token_nums: The token numbers in the batch. - """ - return (self._seq_start_locs[:self.batch_size], - self._seq_lengths[:self.batch_size], - self._lora_indices_per_batch[:self.batch_size], - self.batch_size, self.max_length, self.token_nums) - - @property - def token_lora_indices(self) -> torch.Tensor: - """ - This property provides the lora indices corresponding to each token - in the batch. An index of -1 means no lora should be applied. - """ - token_lora_len = self.indices_len[0] - return self._token_lora_indices[:token_lora_len] - - @property - def sampler_indices(self) -> torch.Tensor: - """ - This property is used to access the lora indices specifically for - LogitsProcessorWithLoRA. - """ - sampler_indices_len = self.indices_len[1] - return self._sampler_indices[:sampler_indices_len] - - @property - def sampler_indices_padded(self) -> torch.Tensor: - """ - This property provides access to padded sampler indices. - """ - indices_padded_len = self.indices_len[2] - return self._sampler_indices_padded[:indices_padded_len] - - @property - def embeddings_indices(self) -> torch.Tensor: - """ - This property provides access to the indices used for lora embeddings, - specifically for VocabParallelEmbeddingWithLoRA. - """ - embeddings_indices_len = self.indices_len[3] - return self._embeddings_indices[:, :embeddings_indices_len] - - @property - def long_lora_indices(self) -> torch.Tensor: - """ - This property provides access to the indices used for long context - lora, specifically for LinearScalingRotaryEmbeddingWithLora. - """ - long_lora_len = self.indices_len[4] - return self._long_lora_indices[:long_lora_len] - - def shrink_prefill( - self, - y: torch.Tensor, - x: torch.Tensor, - w_t_all: torch.Tensor, - scale: float, - ): - #No LoRA request, so return directly - if self.no_lora: - return - sgmv_shrink( - x, - w_t_all, - y, - *self.prefill_metadata, - scale, - ) - - def shrink_decode( - self, - y: torch.Tensor, - x: torch.Tensor, - w_t_all: torch.Tensor, - scale: float, - ): - bgmv_shrink(x, w_t_all, y, self.token_lora_indices, scale) - - def expand_prefill( - self, - y: torch.Tensor, - x: torch.Tensor, - w_t_all: torch.Tensor, - add_input: bool, - ): - #No LoRA request, so return directly - if self.no_lora: - return - sgmv_expand( - x, - w_t_all, - y, - *self.prefill_metadata, - add_input, - ) - - def expand_decode( - self, - y: torch.Tensor, - x: torch.Tensor, - w_t_all: torch.Tensor, - add_input: bool, - ): - bgmv_expand(x, w_t_all, y, self.token_lora_indices, add_input) - - def expand_slice_prefill( - self, - y: torch.Tensor, - x: torch.Tensor, - w_t_all: torch.Tensor, - y_offset: Optional[int], - y_slice_size: Optional[int], - add_input: bool, - ): - #No LoRA request, so return directly - if self.no_lora: - return - sgmv_expand_slice( - x, - w_t_all, - y, - *self.prefill_metadata, - y_offset, - y_slice_size, - add_input, - ) - - def expand_slice_decode( - self, - y: torch.Tensor, - x: torch.Tensor, - w_t_all: torch.Tensor, - y_offset: Optional[int], - y_slice_size: Optional[int], - add_input: bool, - ): - bgmv_expand_slice(x, w_t_all, y, self.token_lora_indices, y_offset, - y_slice_size, add_input) - - def add_shrink( - self, - y: torch.Tensor, - x: torch.Tensor, - w_t_all: torch.Tensor, - scale: float, - ): - """ - Perform the ` y+=x@w_t_all` computation, which is suitable for the - GEMM of lora'a. - When `is_prefill is` true, it indicates that it is currently the - prefill stage, and the `shrink_prefill` function should be called. - Otherwise, it is the decode stage, and the shrink_decode function - should be called. - """ - shrink_fun: Callable = (self.shrink_prefill - if self.is_prefill else self.shrink_decode) - shrink_fun(y, x, w_t_all, scale) - - def add_expand( - self, - y: torch.Tensor, - x: torch.Tensor, - w_t_all: torch.Tensor, - add_input: bool = True, - ): - """ - Perform the ` y+=x@w_t_all` computation, which is suitable for the - GEMM of lora'b. - When `is_prefill` is true, it indicates that it is currently the - prefill stage, and the `expand_prefill` function should be called. - Otherwise, it is the decode stage, and the expand_decode function - should be called. - """ - - expand_fun: Callable = (self.expand_prefill - if self.is_prefill else self.expand_decode) - expand_fun(y, x, w_t_all, add_input) - - def add_expand_slice(self, - y: torch.Tensor, - x: torch.Tensor, - w_t_all: torch.Tensor, - y_offset: Optional[int], - y_slice_size: Optional[int], - add_input: bool = True): - """ - Similar to `add_expand` - """ - - expand_slice_fun: Callable = (self.expand_slice_prefill - if self.is_prefill else - self.expand_slice_decode) - expand_slice_fun(y, x, w_t_all, y_offset, y_slice_size, add_input) - - def add_lora(self, - y: torch.Tensor, - x: torch.Tensor, - wa_t_all: torch.Tensor, - wb_t_all: torch.Tensor, - scale: float, - y_offset: Optional[int] = None, - y_slice_size: Optional[int] = None, - *, - buffer: Optional[torch.Tensor] = None) -> None: - """ - Semantics: - y[i] += ( - x[i].unsqueeze(0) - @ wa_t_all[indices[i], layer_idx, :, :].transpose(-1, -2) - @ wb_t_all[indices[i], layer_idx, :, :].transpose(-1, -2) - * scale - ).squeeze(0) - Args: - y (torch.Tensor): Output tensor. Will be changed in-place. - x (torch.Tensor): Input tensor - wa_t_all (torch.Tensor): lora_a's weight - wb_t_all (torch.Tensor): lora_b's weight - scale (float): Scaling factor. - y_offset (Optional[int], optional): Offset to apply to the starting - column of y. - y_slice_size (Optional[int], optional): Size of the y column slice. - buffer (Optional[torch.Tensor], optional): Defaults to None. - """ - y_org = y - y = y.view(-1, y.shape[-1]) - x = x.view(-1, x.shape[-1]) - r = wb_t_all.size(-1) - if buffer is None: - # We set the buffer to be float32 by default ,refer to: - # https://github.com/triton-lang/triton/issues/1387 - buffer = torch.zeros((x.size(0), r), - dtype=torch.float32, - device=x.device) - - self.add_shrink(buffer, x, wa_t_all, scale) - if y_offset is None and y_slice_size is None: - self.add_expand(y, buffer, wb_t_all, add_input=True) - else: - self.add_expand_slice(y, - buffer, - wb_t_all, - y_offset, - y_slice_size, - add_input=True) - y = y.view_as(y_org) - - def add_lora_packed_nslice(self, y: torch.Tensor, x: torch.Tensor, - lora_a_stacked: Tuple[torch.Tensor, - torch.Tensor, - torch.Tensor], - lora_b_stacked: Tuple[torch.Tensor, - torch.Tensor, - torch.Tensor], - scale: float, - output_slices: Tuple[int, ...]) -> None: - """ - Applies lora to each input. Similar to add_lora, This method is - used for layers that are composed of multiple sublayers - (slices) packed together. - """ - y_org = y - x = x.view(-1, x.shape[-1]) - y = y.view(-1, y.shape[-1]) - offset_left = 0 - # TODO fuse these kernels - for slice_idx in range(len(output_slices)): - self.add_lora(y, x, lora_a_stacked[slice_idx], - lora_b_stacked[slice_idx], scale, offset_left, - output_slices[slice_idx]) - offset_left += output_slices[slice_idx] - - y = y.view_as(y_org) - - def add_lora_logits(self, - y: torch.Tensor, - x: torch.Tensor, - wa_t_all: torch.Tensor, - wb_t_all: torch.Tensor, - scale, - *, - buffer: Optional[torch.Tensor] = None) -> None: - """ - LogitsProcessorWithLoRA always using bgmv - """ - y_org = y - y = y.view(-1, y.shape[-1]) - x = x.view(-1, x.shape[-1]) - r = wb_t_all.size(-1) - if buffer is None: - # We set the buffer to be float32 by default ,refer to: - # https://github.com/triton-lang/triton/issues/1387 - buffer = torch.zeros((x.size(0), r), - dtype=torch.float32, - device=x.device) - - bgmv_shrink(x, wa_t_all, buffer, self.sampler_indices, scale) - bgmv_expand(buffer, wb_t_all, y, self.sampler_indices, add_inputs=True) - y = y.view_as(y_org) diff --git a/vllm/lora/punica_wrapper/__init__.py b/vllm/lora/punica_wrapper/__init__.py new file mode 100644 index 0000000000000..48ada3926ea46 --- /dev/null +++ b/vllm/lora/punica_wrapper/__init__.py @@ -0,0 +1,7 @@ +from vllm.lora.punica_wrapper.punica_base import PunicaWrapperBase +from vllm.lora.punica_wrapper.punica_selector import get_punica_wrapper + +__all__ = [ + "PunicaWrapperBase", + "get_punica_wrapper", +] diff --git a/vllm/lora/punica_wrapper/punica_base.py b/vllm/lora/punica_wrapper/punica_base.py new file mode 100644 index 0000000000000..b9ec0c4bc6323 --- /dev/null +++ b/vllm/lora/punica_wrapper/punica_base.py @@ -0,0 +1,482 @@ +""" +Based on: +Chen, L., Ye, Z., Wu, Y., Zhuo, D., Ceze, L., & Krishnamurthy, A. (2023). +Punica: Multi-Tenant LoRA Serving. +https://arxiv.org/abs/2310.18547 +""" + +from abc import ABC, abstractmethod +from typing import TYPE_CHECKING, List, Optional, Tuple, Union + +import torch + +from .utils import compute_meta, convert_mapping + +if TYPE_CHECKING: + # avoid circuit import + from vllm.lora.layers import LoRAMapping + from vllm.lora.models import LongContextLoRAContext + + +class PunicaWrapperABC(ABC): + """ + PunicaWrapper ABC. + """ + + @abstractmethod + def update_metadata( + self, + mapping: "LoRAMapping", + lora_index_to_id: List[Optional[int]], + max_loras: int, + vocab_size: int, + extra_vocab_size: int, + long_lora_context: Optional["LongContextLoRAContext"] = None, + **kwargs, + ) -> None: + """ + Update the lora-related metadata + """ + raise NotImplementedError + + @abstractmethod + def add_shrink( + self, + y: Union[Tuple[torch.Tensor, ...], torch.Tensor], + x: torch.Tensor, + lora_a_stacked: Tuple[torch.Tensor, ...], + scale: float, + **kwargs, + ) -> None: + """ + Performs GEMM for multiple slices of lora_a. + """ + + raise NotImplementedError + + @abstractmethod + def add_expand( + self, + y: torch.Tensor, + x: Union[Tuple[torch.Tensor, ...], torch.Tensor], + lora_b_stacked: Tuple[torch.Tensor, ...], + lora_bias_stacked: Optional[Tuple[torch.Tensor, ...]], + output_slices: Tuple[int, ...], + offset_start: int = 0, + add_inputs=True, + **kwargs, + ) -> None: + """ + Performs GEMM and bias addition for multiple slices of lora_b. + """ + raise NotImplementedError + + @abstractmethod + def add_lora_embedding( + self, + y: torch.Tensor, + x: torch.Tensor, + lora_b_stacked: torch.Tensor, + add_inputs: bool = True, + **kwargs, + ) -> None: + """ + Applies lora specifically for VocabParallelEmbeddingWithLoRA, + and this layer only requires the expand operation. + """ + raise NotImplementedError + + @abstractmethod + def add_lora_linear(self, + y: torch.Tensor, + x: torch.Tensor, + lora_a_stacked: Tuple[torch.Tensor, ...], + lora_b_stacked: Tuple[torch.Tensor, ...], + lora_bias_stacked: Optional[Tuple[torch.Tensor, ...]], + scale: float, + output_slices: Tuple[int, ...], + *, + buffer: Optional[Tuple[torch.Tensor, ...]] = None, + **kwargs) -> None: + """ + Applicable to linear-related lora. + """ + + raise NotImplementedError + + @abstractmethod + def add_lora_logits(self, + y: torch.Tensor, + x: torch.Tensor, + lora_a_stacked: torch.Tensor, + lora_b_stacked: torch.Tensor, + scale, + *, + buffer: Optional[torch.Tensor] = None, + **kwargs) -> None: + """ + Applies lora specifically for LogitsProcessorWithLoRA. + """ + raise NotImplementedError + + +class PunicaWrapperBase(PunicaWrapperABC): + """ + PunicaWrapperBase is designed to manage and provide metadata for the punica + kernel. The main function is to maintain the state information for + Multi-LoRA, and to provide the interface for the punica. + """ + + def __init__(self, max_num_batched_tokens: int, max_batches: int, + device: Union[torch.device, str], **kwargs): + self._token_lora_indices = torch.empty(max_num_batched_tokens, + dtype=torch.long, + device=device) + self._sampler_indices = torch.empty(max_num_batched_tokens, + dtype=torch.long, + device=device) + self._sampler_indices_padded = torch.empty(max_num_batched_tokens, + dtype=torch.long, + device=device) + self._embeddings_indices = torch.empty(2, + max_num_batched_tokens, + dtype=torch.long, + device=device) + self._long_lora_indices = torch.empty(max_num_batched_tokens, + dtype=torch.long, + device=device) + + # 5 is the number of indicies tensors. + # base_indices, sampler_indices, sampler_indices_padded, + # embeddings_indices,long_lora_indices + self.indices_len: List[Optional[int]] = [None] * 5 + # these attributes are the information required for sgmv kernel + self._seq_start_locs = torch.empty(max_batches, + dtype=torch.long, + device=device) + self._seq_lengths = torch.empty(max_batches, + dtype=torch.long, + device=device) + self._lora_indices_per_batch = torch.empty(max_batches, + dtype=torch.long, + device=device) + self.device: torch.device = device + self.max_length: int = 0 + self.token_nums: int = 0 + self.batch_size: int = -1 + self.is_prefill = False + self.no_lora = False + + def _update_base_metadata( + self, + mapping: "LoRAMapping", + lora_index_to_id: List[Optional[int]], + max_loras: int, + vocab_size: int, + extra_vocab_size: int, + long_lora_context: Optional["LongContextLoRAContext"] = None, + ): + ( + base_indices, + sampler_indices, + sampler_indices_padded, + embeddings_indices, + long_lora_offsets_tensor, + indices_len, + ) = convert_mapping( + mapping, + lora_index_to_id, + max_loras, + vocab_size, + extra_vocab_size, + self.device, + long_lora_context, + ) + self._token_lora_indices[:base_indices.shape[0]].copy_(base_indices) + self._sampler_indices[:sampler_indices.shape[0]].copy_(sampler_indices) + self._sampler_indices_padded[:sampler_indices_padded.shape[0]].copy_( + sampler_indices_padded) + self._embeddings_indices[:embeddings_indices. + shape[0], :embeddings_indices.shape[1]].copy_( + embeddings_indices) + if long_lora_offsets_tensor is not None: + self._long_lora_indices[:long_lora_offsets_tensor.shape[0]].copy_( + long_lora_offsets_tensor) + else: + self._long_lora_indices.zero_() + self.indices_len[:] = indices_len + + def _update_prefill_metada(self, token_lora_tensor: torch.Tensor) -> None: + + (b_seq_start_tensor, seq_length_tensor, lora_indices_tensor, + batch_size, max_length, token_nums, + no_lora) = compute_meta(token_lora_tensor) + + self._seq_start_locs[:b_seq_start_tensor.shape[0]].copy_( + b_seq_start_tensor) + self._seq_lengths[:seq_length_tensor.shape[0]].copy_(seq_length_tensor) + self._lora_indices_per_batch[:lora_indices_tensor.shape[0]].copy_( + lora_indices_tensor) + self.batch_size = batch_size + self.max_length = max_length + self.token_nums = token_nums + self.no_lora = no_lora + + def _apply_bias( + self, + indices: torch.Tensor, + output: torch.Tensor, + output_slices: Tuple[int, ...], + lora_bias_stacked: Tuple[Optional[torch.Tensor], ...], + ): + """Applies bias to output + + Input shapes: + lora_bias_stacked: 3 element tuple of (num_loras, output_dim) + indices: (batch_size) + output: (batch_size, q_slice_size + 2*kv_slice_size) + output_slices: n-1 element tuple of (slice_size...), + where n is number of slices + """ + org_output = output + output = output.view(-1, output.shape[-1]) + indices = indices.view(-1) + + offset_left = 0 + for slice_idx, slice in enumerate(output_slices): + bias = lora_bias_stacked[slice_idx] + if bias is not None: + bias = bias.view(-1, bias.shape[-1]) + bias = bias[indices] + bias[indices == -1] = 0 + output[:, offset_left:offset_left + slice] += bias + offset_left += slice + + return output.view_as(org_output) + + @property + def prefill_metadata( + self + ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, int, int, int]: + """ + This property provides a convenient way to access the necessary + metadata for prefill-related kernel computations. + 1. seq_start_locs: Tensor of sequence start positions. + 2. seq_lengths: Tensor of sequence lengths. + 3. lora_indices_per_batch: Tensor of lora indices, and an index of + -1 means no lora should be applied. + 4. batch_size: Batch size after clustering identical lora indices. + 5. max_length: The maximum sequence length in the batch. + 6. token_nums: The token numbers in the batch. + """ + return (self._seq_start_locs[:self.batch_size], + self._seq_lengths[:self.batch_size], + self._lora_indices_per_batch[:self.batch_size], + self.batch_size, self.max_length, self.token_nums) + + @property + def token_lora_indices(self) -> torch.Tensor: + """ + This property provides the lora indices corresponding to each token + in the batch. An index of -1 means no lora should be applied. + """ + token_lora_len = self.indices_len[0] + return self._token_lora_indices[:token_lora_len] + + @property + def sampler_indices(self) -> torch.Tensor: + """ + This property is used to access the lora indices specifically for + LogitsProcessorWithLoRA. + """ + sampler_indices_len = self.indices_len[1] + return self._sampler_indices[:sampler_indices_len] + + @property + def sampler_indices_padded(self) -> torch.Tensor: + """ + This property provides access to padded sampler indices. + """ + indices_padded_len = self.indices_len[2] + return self._sampler_indices_padded[:indices_padded_len] + + @property + def embeddings_indices(self) -> torch.Tensor: + """ + This property provides access to the indices used for lora embeddings, + specifically for VocabParallelEmbeddingWithLoRA. + """ + embeddings_indices_len = self.indices_len[3] + return self._embeddings_indices[:, :embeddings_indices_len] + + @property + def long_lora_indices(self) -> torch.Tensor: + """ + This property provides access to the indices used for long context + lora, specifically for LinearScalingRotaryEmbeddingWithLora. + """ + long_lora_len = self.indices_len[4] + return self._long_lora_indices[:long_lora_len] + + def update_metadata( + self, + mapping: "LoRAMapping", + lora_index_to_id: List[Optional[int]], + max_loras: int, + vocab_size: int, + extra_vocab_size: int, + long_lora_context: Optional["LongContextLoRAContext"] = None, + **kwargs): + + self._update_base_metadata(mapping, lora_index_to_id, max_loras, + vocab_size, extra_vocab_size, + long_lora_context) + if mapping.is_prefill: + # Update metadata required for prefill-related operators. + self._update_prefill_metada(self.token_lora_indices) + self.is_prefill = True + else: + self.is_prefill = False + + @abstractmethod + def add_shrink(self, y: Union[Tuple[torch.Tensor, ...], torch.Tensor], + x: torch.Tensor, lora_a_stacked: Tuple[torch.Tensor, ...], + scale: float, **kwargs) -> None: + """ + Performs GEMM for multiple slices of lora_a. + + Semantics: + for i in range(len(lora_a_stacked)): + y[i] += (x @ lora_a_stacked[i]) * scale + + Args: + y (Union[Tuple[torch.Tensor, ...], torch.Tensor]): Output tensors + x (torch.Tensor): Input tensor + lora_a_stacked (Tuple[torch.Tensor, ...]): lora_a's weights + scale (float): Scaling factor for the operation + + """ + # TODO: implement it based on torch ops + raise NotImplementedError + + @abstractmethod + def add_expand(self, + y: torch.Tensor, + x: Union[Tuple[torch.Tensor, ...], torch.Tensor], + lora_b_stacked: Tuple[torch.Tensor, ...], + lora_bias_stacked: Optional[Tuple[torch.Tensor, ...]], + output_slices: Tuple[int, ...], + offset_start: int = 0, + add_inputs=True, + **kwargs) -> None: + """ + Performs GEMM and bias addition for multiple slices of lora_b. + + Semantics: + offset = offset_start + for i in range(len(lora_b_stacked)): + slice = output_slices[i] + y[:, offset:offset+slice] += x[i] @ lora_b_stacked[i] + + lora_bias_stacked[i] + offset += slice + + Args: + y (torch.Tensor): Output tensor. + x (Union[Tuple[torch.Tensor, ...], torch.Tensor]): Input tensors + lora_b_stacked (Tuple[torch.Tensor, ...]): lora_b's weight + lora_bias_stacked (Optional[Tuple[torch.Tensor, ...]]): + bias's weight + output_slices (Tuple[int, ...]): Every slice's size + offset_start (int): The starting position of y, defaults to 0 + add_inputs (bool): Defaults to True. + + """ + # TODO: implement it based on torch ops + raise NotImplementedError + + @abstractmethod + def add_lora_embedding(self, + y: torch.Tensor, + x: torch.Tensor, + lora_b_stacked: torch.Tensor, + add_inputs: bool = True, + **kwargs) -> None: + """ + Applies lora specifically for VocabParallelEmbeddingWithLoRA. + and this layer only requires the expand operation. + Semantics: + y += x @ lora_b_stacked + + Args: + y (torch.Tensor): Output tensor. + x (torch.Tensor): Input tensor. + lora_b_stacked (torch.Tensor): lora_b's weights. + add_inputs (bool): Default to True. + """ + # TODO: implement it based on torch ops + raise NotImplementedError + + @abstractmethod + def add_lora_linear(self, + y: torch.Tensor, + x: torch.Tensor, + lora_a_stacked: Tuple[torch.Tensor, ...], + lora_b_stacked: Tuple[torch.Tensor, ...], + lora_bias_stacked: Optional[Tuple[torch.Tensor, ...]], + scale: float, + output_slices: Tuple[int, ...], + *, + buffer: Optional[Tuple[torch.Tensor, ...]] = None, + **kwargs) -> None: + """ + Applicable to linear-related lora. + + Semantics: + for i in range(len(lora_a_stacked)): + y[i] += ( + x[i].unsqueeze(0) + @ lora_a_stacked[indices[i], layer_idx, :, :] + @ lora_b_stacked[indices[i], layer_idx, :, :] + * scale + ).squeeze(0)+lora_bias_stacked[i] + + Args: + y (torch.Tensor): Output tensor. Will be changed in-place. + x (torch.Tensor): Input tensor + lora_a_stacked (Tuple[torch.Tensor, ...]): lora_a's weight. + lora_b_stacked (Tuple[torch.Tensor, ...]): lora_b's weight. + lora_bias_stacked (Optional[Tuple[torch.Tensor, ...]]): lora's bias. + scale (float): Scaling factor. + output_slices (Tuple[int, ...]): Every slice's size. + buffer (Optional[Tuple[torch.Tensor, ...]]): Defaults to None. + """ + # TODO: implement it based on torch ops + raise NotImplementedError + + @abstractmethod + def add_lora_logits(self, + y: torch.Tensor, + x: torch.Tensor, + lora_a_stacked: torch.Tensor, + lora_b_stacked: torch.Tensor, + scale, + *, + buffer: Optional[torch.Tensor] = None, + **kwargs) -> None: + """ + Applies lora specifically for LogitsProcessorWithLoRA. + + Semantics: + buffer = (x @ lora_a_stacked) * scale + y += buffer @ lora_b_stacked + + Args: + y (torch.Tensor): Output tensor. + x (torch.Tensor): Input tensor. + lora_a_stacked (torch.Tensor): lora_a's weights. + lora_b_stacked (torch.Tensor):lora_b's weights. + scale (float): Scaling factor. + buffer (Optional[torch.Tensor]):Default to None. + """ + # TODO: implement it based on torch ops + raise NotImplementedError diff --git a/vllm/lora/punica_wrapper/punica_gpu.py b/vllm/lora/punica_wrapper/punica_gpu.py new file mode 100644 index 0000000000000..de378df8b3cfa --- /dev/null +++ b/vllm/lora/punica_wrapper/punica_gpu.py @@ -0,0 +1,358 @@ +""" +Based on: +Chen, L., Ye, Z., Wu, Y., Zhuo, D., Ceze, L., & Krishnamurthy, A. (2023). +Punica: Multi-Tenant LoRA Serving. +https://arxiv.org/abs/2310.18547 +""" + +from typing import Callable, Optional, Tuple, Union, final + +import torch + +from vllm.triton_utils import HAS_TRITON + +if HAS_TRITON: + from vllm.lora.ops.bgmv_expand import bgmv_expand + from vllm.lora.ops.bgmv_expand_slice import bgmv_expand_slice + from vllm.lora.ops.bgmv_shrink import bgmv_shrink + from vllm.lora.ops.sgmv_expand import sgmv_expand + from vllm.lora.ops.sgmv_expand_slice import sgmv_expand_slice + from vllm.lora.ops.sgmv_shrink import sgmv_shrink + +from .punica_base import PunicaWrapperBase + + +@final +class PunicaWrapperGPU(PunicaWrapperBase): + """ + PunicaWrapperGPU is designed to manage and provide metadata for the punica + kernel. The main function is to maintain the state information for + Multi-LoRA, and to provide the interface for the punica triton kernel. + """ + + def __init__(self, max_num_batched_tokens: int, max_batches: int, + device: Union[torch.device, str], **kwargs): + PunicaWrapperBase.__init__(self, max_num_batched_tokens, max_batches, + device) + + def _shrink_prefill( + self, + y: torch.Tensor, + x: torch.Tensor, + w_t_all: torch.Tensor, + scale: float, + ): + #No LoRA request, so return directly + if self.no_lora: + return + sgmv_shrink( + x, + w_t_all, + y, + *self.prefill_metadata, + scale, + ) + + def _shrink_decode( + self, + y: torch.Tensor, + x: torch.Tensor, + w_t_all: torch.Tensor, + scale: float, + ): + bgmv_shrink(x, w_t_all, y, self.token_lora_indices, scale) + + def _expand_prefill( + self, + y: torch.Tensor, + x: torch.Tensor, + w_t_all: torch.Tensor, + add_inputs: bool, + ): + #No LoRA request, so return directly + if self.no_lora: + return + sgmv_expand( + x, + w_t_all, + y, + *self.prefill_metadata, + add_inputs, + ) + + def _expand_decode( + self, + y: torch.Tensor, + x: torch.Tensor, + w_t_all: torch.Tensor, + add_inputs: bool, + ): + bgmv_expand(x, w_t_all, y, self.token_lora_indices, add_inputs) + + def _expand_slice_prefill( + self, + y: torch.Tensor, + x: torch.Tensor, + w_t_all: torch.Tensor, + y_offset: Optional[int], + y_slice_size: Optional[int], + add_inputs: bool, + ): + #No LoRA request, so return directly + if self.no_lora: + return + sgmv_expand_slice( + x, + w_t_all, + y, + *self.prefill_metadata, + y_offset, + y_slice_size, + add_inputs, + ) + + def _expand_slice_decode( + self, + y: torch.Tensor, + x: torch.Tensor, + w_t_all: torch.Tensor, + y_offset: Optional[int], + y_slice_size: Optional[int], + add_inputs: bool, + ): + bgmv_expand_slice(x, w_t_all, y, self.token_lora_indices, y_offset, + y_slice_size, add_inputs) + + def _apply_expand( + self, + y: torch.Tensor, + x: torch.Tensor, + w_t_all: torch.Tensor, + y_offset: Optional[int], + y_slice_size: Optional[int], + add_inputs: bool = True, + ): + """ + Perform the ` y[:,y_offset:y_offset+y_slice_size]+=x@w_t_all` + computation, which is suitable for the + GEMM of lora'b. + """ + + expand_slice_fun: Callable = (self._expand_slice_prefill + if self.is_prefill else + self._expand_slice_decode) + expand_slice_fun(y, x, w_t_all, y_offset, y_slice_size, add_inputs) + + def _apply_shrink(self, y: torch.Tensor, x: torch.Tensor, + w_t_all: torch.Tensor, scale: float): + """ + Perform the ` y+=x@w_t_all` computation, which is suitable for the + GEMM of lora'a. + When `is_prefill is` true, it indicates that it is currently the + prefill stage, and the `_shrink_prefill` function should be called. + Otherwise, it is the decode stage, and the _shrink_decode function + should be called. + """ + y_org = y + y = y.view(-1, y.shape[-1]) + shrink_fun: Callable = (self._shrink_prefill + if self.is_prefill else self._shrink_decode) + shrink_fun(y, x, w_t_all, scale) + y = y.view_as(y_org) + + def add_shrink(self, y: Union[Tuple[torch.Tensor, ...], torch.Tensor], + x: torch.Tensor, lora_a_stacked: Tuple[torch.Tensor, ...], + scale: float, **kwargs): + """ + Performs GEMM for multiple slices of lora_a. + When `is_prefill is` true, it indicates that it is currently the + prefill stage, and the `_shrink_prefill` function should be called. + Otherwise, it is the decode stage, and the _shrink_decode function + should be called. + + Semantics: + for i in range(len(lora_a_stacked)): + y[i] += (x @ lora_a_stacked[i]) * scale + + Args: + y (Union[Tuple[torch.Tensor, ...], torch.Tensor]): Output tensors + x (torch.Tensor): Input tensor + lora_a_stacked (Tuple[torch.Tensor, ...]): lora_a's weights + scale (float): Scaling factor for the operation + """ + + x = x.view(-1, x.shape[-1]) + # TODO fuse these kernels + for slice_idx in range(len(lora_a_stacked)): + self._apply_shrink(y[slice_idx], x, lora_a_stacked[slice_idx], + scale) + + def add_expand(self, + y: torch.Tensor, + x: Union[Tuple[torch.Tensor, ...], torch.Tensor], + lora_b_stacked: Tuple[torch.Tensor, ...], + lora_bias_stacked: Optional[Tuple[torch.Tensor, ...]], + output_slices: Tuple[int, ...], + offset_start: int = 0, + add_inputs=True, + **kwargs) -> None: + """ + Performs GEMM and bias addition for multiple slices of lora_b. + + Semantics: + for i in range(len(lora_b_stacked)): + slice = output_slices[i] + y[:, offset:offset+slice] += x[i] @ lora_b_stacked[i] + + lora_bias_stacked[i] + offset += slice + + Args: + y (torch.Tensor): Output tensor. + x (Union[Tuple[torch.Tensor, ...], torch.Tensor]): Input tensors + lora_b_stacked (Tuple[torch.Tensor, ...]): lora_b's weight + lora_bias_stacked (Optional[Tuple[torch.Tensor, ...]]): + bias's weight + output_slices (Tuple[int, ...]): Every slice's size + add_inputs (bool): Defaults to True. + """ + y_org = y + y = y.view(-1, y.shape[-1]) + offset_left = offset_start + if lora_bias_stacked is not None: + self._apply_bias(self.token_lora_indices, y, output_slices, + lora_bias_stacked) + for slice_idx in range(len(lora_b_stacked)): + self._apply_expand( + y, + x[slice_idx], + lora_b_stacked[slice_idx], + offset_left, + output_slices[slice_idx], + add_inputs=add_inputs, + ) + offset_left += output_slices[slice_idx] + y = y.view_as(y_org) + + def add_lora_embedding(self, + y: torch.Tensor, + x: torch.Tensor, + lora_b_stacked: torch.Tensor, + add_inputs: bool = True, + **kwargs) -> None: + """ + Applies lora specifically for VocabParallelEmbeddingWithLoRA. + + Semantics: + y += x @ lora_b_stacked + + Args: + y (torch.Tensor): Output tensor. + x (torch.Tensor): Input tensor. + lora_b_stacked (torch.Tensor): lora_b's weights. + add_inputs (bool): Default to True. + """ + + # Embedding layer only need expand op + expand_fun: Callable = (self._expand_prefill + if self.is_prefill else self._expand_decode) + expand_fun(y, x, lora_b_stacked, add_inputs) + + def add_lora_linear(self, + y: torch.Tensor, + x: torch.Tensor, + lora_a_stacked: Tuple[torch.Tensor, ...], + lora_b_stacked: Tuple[torch.Tensor, ...], + lora_bias_stacked: Optional[Tuple[torch.Tensor, ...]], + scale: float, + output_slices: Tuple[int, ...], + *, + buffer: Optional[Tuple[torch.Tensor, ...]] = None, + **kwargs) -> None: + """ + Applicable to linear-related lora. + + Semantics: + for i in range(len(lora_a_stacked)): + y[i] += ( + x[i].unsqueeze(0) + @ lora_a_stacked[indices[i], layer_idx, :, :] + @ lora_b_stacked[indices[i], layer_idx, :, :] + * scale + ).squeeze(0)+lora_bias_stacked[i] + + Args: + y (torch.Tensor): Output tensor. Will be changed in-place. + x (torch.Tensor): Input tensor + lora_a_stacked (Tuple[torch.Tensor, ...]): lora_a's weight. + lora_b_stacked (Tuple[torch.Tensor, ...]): lora_b's weight. + lora_bias_stacked (Optional[Tuple[torch.Tensor, ...]]): lora's bias. + scale (float): Scaling factor. + output_slices (Tuple[int, ...]): Every slice's size. + buffer (Optional[Tuple[torch.Tensor, ...]]): Defaults to None. + """ + + assert len(lora_a_stacked) == len(lora_b_stacked) == len(output_slices) + if lora_bias_stacked is not None: + assert len(lora_bias_stacked) == len(output_slices) + y = self._apply_bias(self.token_lora_indices, y, output_slices, + lora_bias_stacked) + + if buffer is None: + r = lora_b_stacked[0].size(-1) + # We set the buffer to be float32 by default ,refer to: + # https://github.com/triton-lang/triton/issues/1387 + buffer = tuple( + torch.zeros( + (x.size(0), r), dtype=torch.float32, device=x.device) + for _ in range(len(output_slices))) + self.add_shrink(buffer, x, lora_a_stacked, scale, **kwargs) + self.add_expand(y, + buffer, + lora_b_stacked, + None, + output_slices, + add_inputs=True, + **kwargs) + + def add_lora_logits(self, + y: torch.Tensor, + x: torch.Tensor, + lora_a_stacked: torch.Tensor, + lora_b_stacked: torch.Tensor, + scale, + *, + buffer: Optional[torch.Tensor] = None, + **kwargs) -> None: + """ + Applies lora specifically for LogitsProcessorWithLoRA. + + Semantics: + buffer = (x @ lora_a_stacked) * scale + y += buffer @ lora_b_stacked + + Args: + y (torch.Tensor): Output tensor. + x (torch.Tensor): Input tensor. + lora_a_stacked (torch.Tensor): lora_a's weights. + lora_b_stacked (torch.Tensor):lora_b's weights. + scale (float): Scaling factor. + buffer (Optional[torch.Tensor]):Default to None. + """ + y_org = y + y = y.view(-1, y.shape[-1]) + x = x.view(-1, x.shape[-1]) + r = lora_b_stacked.size(-1) + if buffer is None: + # We set the buffer to be float32 by default ,refer to: + # https://github.com/triton-lang/triton/issues/1387 + buffer = torch.zeros((x.size(0), r), + dtype=torch.float32, + device=x.device) + # LogitsProcessorWithLoRA always using bgmv. + bgmv_shrink(x, lora_a_stacked, buffer, self.sampler_indices, scale) + bgmv_expand(buffer, + lora_b_stacked, + y, + self.sampler_indices, + add_inputs=True) + y = y.view_as(y_org) diff --git a/vllm/lora/punica_wrapper/punica_hpu.py b/vllm/lora/punica_wrapper/punica_hpu.py new file mode 100644 index 0000000000000..d9c4f44a1c282 --- /dev/null +++ b/vllm/lora/punica_wrapper/punica_hpu.py @@ -0,0 +1,87 @@ +from typing import Optional, Tuple, Union, final + +import torch +from vllm_hpu_extension.ops import (dispatch_bgmv_embedding, + dispatch_bgmv_linear) + +from .punica_base import PunicaWrapperBase + + +@final +class PunicaWrapperHPU(PunicaWrapperBase): + + def __init__(self, max_num_batched_tokens: int, max_batches: int, + device: Union[torch.device, str], **kwargs): + # Increasing max_num_batched_tokens by 3x to handle increase in + # tensor size due to padding. + PunicaWrapperBase.__init__(self, 3 * max_num_batched_tokens, + max_batches, device) + + def add_lora_embedding(self, + y: torch.Tensor, + x: torch.Tensor, + lora_b_stacked: torch.Tensor, + add_inputs: bool = True, + **kwargs) -> None: + dispatch_bgmv_embedding(y, x, lora_b_stacked, 0) + + def add_lora_linear(self, + y: torch.Tensor, + x: torch.Tensor, + lora_a_stacked: Tuple[torch.Tensor, ...], + lora_b_stacked: Tuple[torch.Tensor, ...], + lora_bias_stacked: Optional[Tuple[torch.Tensor, ...]], + scale: float, + output_slices: Tuple[int, ...], + *, + buffer: Optional[Tuple[torch.Tensor, ...]] = None, + **kwargs) -> None: + y_org = y + x = x.view(-1, x.shape[-1]) + y = y.view(-1, y.shape[-1]) + offset_left = 0 + + for slice_idx in range(len(output_slices)): + dispatch_bgmv_linear( + y[:, offset_left:offset_left + output_slices[slice_idx]], x, + lora_a_stacked[slice_idx], lora_b_stacked[slice_idx], 0, scale) + offset_left += output_slices[slice_idx] + y = y.view_as(y_org) + + def add_lora_logits(self, + y: torch.Tensor, + x: torch.Tensor, + lora_a_stacked: torch.Tensor, + lora_b_stacked: torch.Tensor, + scale, + *, + buffer: Optional[torch.Tensor] = None, + **kwargs) -> None: + y_org = y + y = y.view(-1, y.shape[-1]) + x = x.view(-1, x.shape[-1]) + dispatch_bgmv_linear(y, x, lora_a_stacked, lora_b_stacked, 0, scale) + y = y.view_as(y_org) + + def add_shrink( + self, + y: Union[Tuple[torch.Tensor, ...], torch.Tensor], + x: torch.Tensor, + lora_a_stacked: Tuple[torch.Tensor, ...], + scale: float, + **kwargs, + ) -> None: + raise NotImplementedError + + def add_expand( + self, + y: torch.Tensor, + x: Union[Tuple[torch.Tensor, ...], torch.Tensor], + lora_b_stacked: Tuple[torch.Tensor, ...], + lora_bias_stacked: Optional[Tuple[torch.Tensor, ...]], + output_slices: Tuple[int, ...], + offset_start: int = 0, + add_inputs=True, + **kwargs, + ) -> None: + raise NotImplementedError diff --git a/vllm/lora/punica_wrapper/punica_selector.py b/vllm/lora/punica_wrapper/punica_selector.py new file mode 100644 index 0000000000000..cd64878d95ae3 --- /dev/null +++ b/vllm/lora/punica_wrapper/punica_selector.py @@ -0,0 +1,19 @@ +from vllm.platforms import current_platform +from vllm.utils import print_info_once + +from .punica_base import PunicaWrapperBase + + +def get_punica_wrapper(*args, **kwargs) -> PunicaWrapperBase: + if current_platform.is_cuda_alike(): + # Lazy import to avoid ImportError + from vllm.lora.punica_wrapper.punica_gpu import PunicaWrapperGPU + print_info_once("Using PunicaWrapperGPU.") + return PunicaWrapperGPU(*args, **kwargs) + elif current_platform.is_hpu(): + # Lazy import to avoid ImportError + from vllm.lora.punica_wrapper.punica_hpu import PunicaWrapperHPU + print_info_once("Using PunicaWrapperHPU.") + return PunicaWrapperHPU(*args, **kwargs) + else: + raise NotImplementedError diff --git a/vllm/lora/punica_wrapper/utils.py b/vllm/lora/punica_wrapper/utils.py new file mode 100644 index 0000000000000..7360c8c09e3ac --- /dev/null +++ b/vllm/lora/punica_wrapper/utils.py @@ -0,0 +1,159 @@ +from typing import TYPE_CHECKING, List, Optional, Tuple, Union + +import torch + +if TYPE_CHECKING: + # avoid circuit import + from vllm.lora.layers import LoRAMapping + from vllm.lora.models import LongContextLoRAContext + + +def compute_meta( + token_lora_tensor: torch.Tensor +) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, int, int, int, bool]: + """ + Get the information required for the sgmv kernel. With the features: + 1. If consecutive requests in the batch use the same LoRA, this function + will combine them into a single request, improving sgmv kernel inference + performance. + 2. At the beginning of each prefill stage inference, recalculations are + needed based on the input, but only once. + """ + + lora_indices_tensor, seq_length_tensor = torch.unique_consecutive( + token_lora_tensor, return_counts=True) + cum_result = torch.cumsum(seq_length_tensor, dim=0) + b_seq_start_tensor = torch.zeros_like(seq_length_tensor) + b_seq_start_tensor[1:].copy_(cum_result[:-1]) + max_length = seq_length_tensor.max().item() + token_nums = seq_length_tensor.sum().item() + batch_size = lora_indices_tensor.size(0) + no_lora = False + # -1 means no lora should be applied. Use `no_lora` to determine whether + # the current step requires LoRA. If LoRA is not needed, the prefill stage + # does not need to launch the triton kernel, which can improve performance + if batch_size == 1 and lora_indices_tensor == -1: + no_lora = True + return (b_seq_start_tensor, seq_length_tensor, lora_indices_tensor, + batch_size, max_length, token_nums, no_lora) + + +# TODO see if this can be vectorized +def convert_mapping( + mapping: "LoRAMapping", + lora_index_to_id: List[Optional[int]], + max_loras: int, + vocab_size: int, + extra_vocab_size: int, + device: torch.device, + long_lora_context: Optional["LongContextLoRAContext"] = None, +) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, + Optional[torch.Tensor], List[int]]: + """Converts LoRAMapping to index tensors. + + Args: + mapping: LoRAMapping mapping rows in a batch to LoRA ids. + lora_index_to_id: List mapping LoRA ids to LoRA indices. + max_loras: Maximum number of LoRAs. + vocab_size: Model vocab size. + extra_vocab_size: Extra vocab size each LoRA can have. + long_lora_context: Passed if there are long context lora in a batch. + + Returns: + A tuple of tensors: + base_indices: Tensor of shape [batch_size] mapping batch rows to + LoRA indices. + sampler_indices: Tensor of shape [batch_size] mapping requests to + LoRA indices for sampler. For generation, this will be the + same as base_indicies. For prefill, this will map requests + to LoRA indices. + sampler_indices_padded: Tensor of shape [batch_size] mapping + requests to LoRA indices for sampler with padding. + Same as sampler_indicies, but -1 is replaced with + max_loras. + embeddings_indices: Tensor of shape [2, batch_size] mapping + requests to embedding indices. First row is for embeddings + added by the LoRAs, second row is for the LoRA.lora_a + embeddings. + long_lora_indices: Tensor of shape [batch_size] mapping + requests to RoPE offsets and rot dims for long LoRAs. + None if long context lora doesn't exist. + indices_len: List of lengths of the above tensors. It contains + (base_indices, sampler_indices, sampler_indices_padded, + embeddings_indices, long_lora_indices). + """ + index_mapping_indices: List[int] = list(mapping.index_mapping).copy() + embedding_indices = index_mapping_indices.copy() + lora_indices = index_mapping_indices.copy() + long_lora_offsets: Optional[torch.Tensor] = None + if long_lora_context: + long_lora_offsets = torch.zeros(len(index_mapping_indices), + device=device, + dtype=torch.long) + prompt_mapping: List[int] = [ + lora_index_to_id.index(x) if x > 0 else -1 + for x in mapping.prompt_mapping + ] + lora_idx = None + for i in range(len(index_mapping_indices)): + # TODO index can be slow. optimize + lora_idx = (lora_index_to_id.index(index_mapping_indices[i]) + if index_mapping_indices[i] > 0 else -1) + embedding_indices[i] = lora_idx if index_mapping_indices[i] > 0 else 0 + lora_indices[i] = lora_idx + if long_lora_context: + assert long_lora_offsets is not None + lora_offset: int = long_lora_context.offsets_by_lora_id.get( + index_mapping_indices[i], 0) + long_lora_offsets[i] = lora_offset + + indices_list: List[Union[List[int], torch.Tensor]] = [ + index_mapping_indices, + lora_indices, + embedding_indices, + ] + if long_lora_context: + assert long_lora_offsets is not None + indices_list.append(long_lora_offsets) + indices = torch.tensor(indices_list, dtype=torch.long, device=device) + prompt_mapping_tensor = torch.tensor(prompt_mapping, + dtype=torch.long, + device=device) + embeddings_indices = torch.stack([ + indices[2] * extra_vocab_size, + indices[2] * (vocab_size + extra_vocab_size), + ]) + embeddings_indices[embeddings_indices == -1] = max_loras - 1 + base_indices = indices[1] + sampler_indices = prompt_mapping_tensor + sampler_indices_padded = sampler_indices.clone() + sampler_indices_padded[sampler_indices_padded == -1] = max_loras - 1 + sampler_indices_padded = torch.arange( + 0, len(sampler_indices_padded), device=device, dtype=torch.long) + ( + sampler_indices_padded * len(sampler_indices_padded)) + long_lora_indices = None + long_lora_indices_len: Optional[int] = None + if long_lora_context: + long_lora_indices = indices[3] + long_lora_indices_len = long_lora_indices.shape[-1] + # Contain length of indices tensors. Used to index into each tensor. + indices_len = [ + base_indices.shape[-1], + sampler_indices.shape[-1], + sampler_indices_padded.shape[-1], + embeddings_indices.shape[-1], + ] + if long_lora_indices_len is not None: + indices_len.append(long_lora_indices_len) + else: + # If long_lora doesn't exist,append None + indices_len.append(None) + + return ( + base_indices, + sampler_indices, + sampler_indices_padded, + embeddings_indices, + long_lora_indices, + indices_len, + ) diff --git a/vllm/model_executor/guided_decoding/__init__.py b/vllm/model_executor/guided_decoding/__init__.py index d7b67425fcbc0..e631aec928ec5 100644 --- a/vllm/model_executor/guided_decoding/__init__.py +++ b/vllm/model_executor/guided_decoding/__init__.py @@ -1,14 +1,96 @@ -from typing import Optional +from __future__ import annotations -from vllm.logits_process import LogitsProcessor -from vllm.sampling_params import GuidedDecodingParams +from typing import TYPE_CHECKING + +from vllm.logger import init_logger +from vllm.platforms import CpuArchEnum, current_platform + +if TYPE_CHECKING: + from transformers import PreTrainedTokenizer + + from vllm.config import ModelConfig + from vllm.logits_process import LogitsProcessor + from vllm.sampling_params import GuidedDecodingParams + +logger = init_logger(__name__) + + +def has_xgrammar_unsupported_json_features(schema: dict) -> bool: + """Check if JSON schema contains features unsupported by xgrammar.""" + + def check_object(obj: dict) -> bool: + if not isinstance(obj, dict): + return False + + # Check for pattern restrictions + if "pattern" in obj: + return True + + # Check for numeric ranges + if obj.get("type") in ("integer", "number") and any( + key in obj for key in [ + "minimum", "maximum", "exclusiveMinimum", + "exclusiveMaximum", "multipleOf" + ]): + return True + + # Recursively check all nested objects and arrays + for value in obj.values(): + if isinstance(value, dict): + if check_object(value): + return True + elif isinstance(value, list): + for item in value: + if isinstance(item, dict) and check_object(item): + return True + + return False + + return check_object(schema) + + +def maybe_backend_fallback( + guided_params: GuidedDecodingParams) -> GuidedDecodingParams: + # lm-format-enforce doesn't support grammar, fallback to xgrammar + if (guided_params.backend == "lm-format-enforcer" + and guided_params.grammar is not None): + logger.warning( + "lm-format-enforcer does not support grammar guided decoding. " + "Falling back to use xgrammar instead.") + guided_params.backend = "xgrammar" + + if guided_params.backend == "xgrammar": + # xgrammar only has x86 wheels for linux, fallback to outlines + if current_platform.get_cpu_architecture() is not CpuArchEnum.X86: + logger.warning("xgrammar is only supported on x86 CPUs. " + "Falling back to use outlines instead.") + guided_params.backend = "outlines" + + # xgrammar doesn't support regex or choice, fallback to outlines + if guided_params.regex is not None or guided_params.choice is not None: + logger.warning( + "xgrammar only supports json or grammar guided decoding. " + "Falling back to use outlines instead.") + guided_params.backend = "outlines" + + # xgrammar doesn't support some JSON schema features + elif (guided_params.json is not None + and has_xgrammar_unsupported_json_features(guided_params.json)): + logger.warning( + "xgrammar does not support advanced JSON schema features like " + "patterns or numeric ranges. " + "Falling back to use outlines instead.") + guided_params.backend = "outlines" + + return guided_params async def get_guided_decoding_logits_processor( - guided_params: GuidedDecodingParams, - tokenizer) -> Optional[LogitsProcessor]: + guided_params: GuidedDecodingParams, tokenizer: PreTrainedTokenizer, + model_config: ModelConfig) -> LogitsProcessor | None: + guided_params = maybe_backend_fallback(guided_params) # CFG grammar not supported by LMFE, so we use outlines instead - if guided_params.backend == 'outlines' or guided_params.grammar: + if guided_params.backend == 'outlines': # NOTE: lazy import outlines to avoid https://github.com/vllm-project/vllm/issues/4193 from vllm.model_executor.guided_decoding.outlines_decoding import ( # noqa get_outlines_guided_decoding_logits_processor) @@ -19,17 +101,23 @@ async def get_guided_decoding_logits_processor( get_local_lm_format_enforcer_guided_decoding_logits_processor) return get_local_lm_format_enforcer_guided_decoding_logits_processor( guided_params, tokenizer) + if guided_params.backend == 'xgrammar': + from vllm.model_executor.guided_decoding.xgrammar_decoding import ( # noqa + get_local_xgrammar_guided_decoding_logits_processor) + return get_local_xgrammar_guided_decoding_logits_processor( + guided_params, tokenizer, model_config) raise ValueError( f"Unknown guided decoding backend '{guided_params.backend}'. " - "Must be one of 'outlines, 'lm-format-enforcer'") + "Must be one of 'outlines, 'lm-format-enforcer', 'xgrammar'") def get_local_guided_decoding_logits_processor( - guided_params: GuidedDecodingParams, - tokenizer) -> Optional[LogitsProcessor]: + guided_params: GuidedDecodingParams, tokenizer: PreTrainedTokenizer, + model_config: ModelConfig) -> LogitsProcessor | None: + guided_params = maybe_backend_fallback(guided_params) # CFG grammar not supported by LMFE, so we use outlines instead - if guided_params.backend == 'outlines' or guided_params.grammar: + if guided_params.backend == 'outlines': # NOTE: lazy import outlines to avoid https://github.com/vllm-project/vllm/issues/4193 from vllm.model_executor.guided_decoding.outlines_decoding import ( # noqa get_local_outlines_guided_decoding_logits_processor) @@ -40,7 +128,12 @@ def get_local_guided_decoding_logits_processor( get_local_lm_format_enforcer_guided_decoding_logits_processor) return get_local_lm_format_enforcer_guided_decoding_logits_processor( guided_params, tokenizer) + if guided_params.backend == 'xgrammar': + from vllm.model_executor.guided_decoding.xgrammar_decoding import ( # noqa + get_local_xgrammar_guided_decoding_logits_processor) + return get_local_xgrammar_guided_decoding_logits_processor( + guided_params, tokenizer, model_config) raise ValueError( f"Unknown guided decoding backend '{guided_params.backend}'. " - "Must be one of 'outlines, 'lm-format-enforcer'") + "Must be one of 'outlines, 'lm-format-enforcer', 'xgrammar'") diff --git a/vllm/model_executor/guided_decoding/outlines_decoding.py b/vllm/model_executor/guided_decoding/outlines_decoding.py index 8a7ff38bfeb1a..eb8db882435e6 100644 --- a/vllm/model_executor/guided_decoding/outlines_decoding.py +++ b/vllm/model_executor/guided_decoding/outlines_decoding.py @@ -1,5 +1,6 @@ import asyncio import concurrent.futures +import os from enum import Enum from json import dumps as json_dumps from re import escape as regex_escape @@ -48,6 +49,11 @@ class GuidedDecodingMode(Enum): global_thread_pool = None # used for generating logits processor fsm +# It's not yet clear that using more provides a benefit, and it could +# potentially starve other processes on the machine. We'll cap this for now and +# adjust later if testing proves it to help overcome a bottleneck. +_MAX_THREADPOOL_WORKERS = 16 + async def get_outlines_guided_decoding_logits_processor( guided_params: GuidedDecodingParams, tokenizer: PreTrainedTokenizerBase @@ -65,8 +71,11 @@ async def get_outlines_guided_decoding_logits_processor( return None if global_thread_pool is None: + max_workers = os.cpu_count() or 2 + if max_workers > _MAX_THREADPOOL_WORKERS: + max_workers = _MAX_THREADPOOL_WORKERS global_thread_pool = concurrent.futures.ThreadPoolExecutor( - max_workers=2) + max_workers=max_workers) loop = asyncio.get_running_loop() return await loop.run_in_executor(global_thread_pool, diff --git a/vllm/model_executor/guided_decoding/outlines_logits_processors.py b/vllm/model_executor/guided_decoding/outlines_logits_processors.py index e1309c31f77e7..b63fed1c8a8c3 100644 --- a/vllm/model_executor/guided_decoding/outlines_logits_processors.py +++ b/vllm/model_executor/guided_decoding/outlines_logits_processors.py @@ -25,7 +25,7 @@ from outlines import grammars from outlines.caching import cache from outlines.fsm.guide import CFGGuide, Generate, Guide, RegexGuide, Write -from outlines.fsm.json_schema import build_regex_from_schema +from outlines_core.fsm.json_schema import build_regex_from_schema from pydantic import BaseModel from transformers import PreTrainedTokenizerBase @@ -99,7 +99,7 @@ class RegexLogitsProcessor(BaseLogitsProcessor): def _get_guide(cls, regex_string: str, tokenizer: PreTrainedTokenizerBase) -> Guide: tokenizer = _adapt_tokenizer(tokenizer) - return RegexGuide(regex_string, tokenizer) + return RegexGuide.from_regex(regex_string, tokenizer) def __init__(self, regex_string: str, tokenizer: PreTrainedTokenizerBase): """Compile the FSM that drives the regex-structured generation. diff --git a/vllm/model_executor/guided_decoding/xgrammar_decoding.py b/vllm/model_executor/guided_decoding/xgrammar_decoding.py new file mode 100644 index 0000000000000..fc45e37cf6f06 --- /dev/null +++ b/vllm/model_executor/guided_decoding/xgrammar_decoding.py @@ -0,0 +1,278 @@ +# noqa: UP007 +from __future__ import annotations + +import json +from dataclasses import dataclass, field +from typing import TYPE_CHECKING, Any, NamedTuple + +import torch +from transformers import PreTrainedTokenizerFast + +try: + import xgrammar as xgr + from xgrammar.base import _core as xgr_core +except ImportError: + pass + +from vllm.model_executor.guided_decoding.xgrammar_utils import ( + convert_lark_to_gbnf, grammar_is_likely_lark) + +if TYPE_CHECKING: + from transformers import PreTrainedTokenizer + + from vllm.config import ModelConfig + from vllm.sampling_params import GuidedDecodingParams + + +# TODO: passing batch size to max threads here +def get_local_xgrammar_guided_decoding_logits_processor( + guided_params: GuidedDecodingParams, + tokenizer: PreTrainedTokenizer, + model_config: ModelConfig, + max_threads: int = 8): + config = GrammarConfig.from_guided_params(guided_params=guided_params, + model_config=model_config, + tokenizer=tokenizer, + max_threads=max_threads) + return XGrammarLogitsProcessor(config) + + +class TokenizerData(NamedTuple): + """Immutable container for cached tokenizer data.""" + encoded_vocab: list[str] + stop_token_ids: list[int] | None + backend_str: str + + +class TokenizerDataCache: + """Cache manager for tokenizer data to avoid repeated processing.""" + _cache: dict[int, TokenizerData] = {} + + @classmethod + def get_tokenizer_data(cls, + tokenizer: PreTrainedTokenizer) -> TokenizerData: + tokenizer_hash = hash(tokenizer) + + if tokenizer_hash not in cls._cache: + # Vendored from xgrammar logic since we cannot pickle the tokenizer + # https://github.com/mlc-ai/xgrammar/blob/d77c0a0173ef14779c918e3be7966ba852f7910f/python/xgrammar/tokenizer_info.py#L98 # noqa: E501 + try: + encoded_vocab = [ + token for token, _ in sorted(tokenizer.get_vocab().items(), + key=lambda x: x[1]) + ] + except AttributeError as e: + raise ValueError( + f"Cannot get the vocabulary of the tokenizer " + f"{type(tokenizer)}. The tokenizer should have a " + "get_vocab method.") from e + + stop_token_ids = None + backend_str = xgr.VocabType.RAW + if isinstance(tokenizer, PreTrainedTokenizerFast): + backend_str = tokenizer.backend_tokenizer.to_str() + if stop_token_ids is None and hasattr( + tokenizer, + "eos_token_id") and tokenizer.eos_token_id is not None: + stop_token_ids = [tokenizer.eos_token_id] + + cls._cache[tokenizer_hash] = TokenizerData( + encoded_vocab=encoded_vocab, + stop_token_ids=stop_token_ids, + backend_str=backend_str) + + return cls._cache[tokenizer_hash] + + +class GrammarCompilerCache: + """ + Cache for GrammarCompiler instances based on tokenizer. + + This cache reduces the overhead of creating new compiler instances when + using the same tokenizer configuration. + """ + _cache: dict[str, xgr.GrammarCompiler] = {} + + @classmethod + def get_compiler(cls, config: GrammarConfig) -> xgr.GrammarCompiler: + cache_key = str(config.tokenizer_hash) + + if cache_key not in cls._cache: + assert config.encoded_vocab is not None + tokenizer_info = xgr.TokenizerInfo._create_from_handle( + xgr_core.TokenizerInfo.from_huggingface( + config.encoded_vocab, config.backend_str, + config.vocab_size, config.stop_token_ids)) + cls._cache[cache_key] = xgr.GrammarCompiler( + tokenizer_info, max_threads=config.max_threads) + + return cls._cache[cache_key] + + +@dataclass +class GrammarConfig: + """Serializable configuration for grammar compilation""" + tokenizer_hash: int + vocab_size: int + json_str: str | None = None + grammar_str: str | None = None + json_object: bool | None = None + max_threads: int = 8 + # Only populated if tokenizer_hash not in cache + encoded_vocab: list[str] | None = None + stop_token_ids: list[int] | None = None + backend_str: str | None = None + + @classmethod + def from_guided_params(cls, + guided_params: GuidedDecodingParams, + model_config: ModelConfig, + tokenizer: PreTrainedTokenizer, + max_threads: int = 8) -> GrammarConfig: + + tokenizer_hash = hash(tokenizer) + tokenizer_data = TokenizerDataCache.get_tokenizer_data(tokenizer) + encoded_vocab = tokenizer_data.encoded_vocab + stop_token_ids = tokenizer_data.stop_token_ids + backend_str = tokenizer_data.backend_str + + if guided_params.json: + if not isinstance(guided_params.json, str): + json_str = json.dumps(guided_params.json) + else: + json_str = guided_params.json + + # Validate the schema and raise ValueError here if it is invalid. + # This is to avoid exceptions in model execution, which will crash + # the engine worker process. + try: + xgr.Grammar.from_json_schema(json_str) + except RuntimeError as err: + raise ValueError(str(err)) from err + + return cls(json_str=json_str, + vocab_size=model_config.hf_text_config.vocab_size, + encoded_vocab=encoded_vocab, + stop_token_ids=stop_token_ids, + backend_str=backend_str, + tokenizer_hash=tokenizer_hash, + max_threads=max_threads) + elif guided_params.grammar: + # XGrammar only supports GBNF grammars, so we must convert Lark + if grammar_is_likely_lark(guided_params.grammar): + try: + grammar_str = convert_lark_to_gbnf(guided_params.grammar) + except ValueError as e: + raise ValueError( + "Failed to convert the grammar from Lark to GBNF. " + "Please either use GBNF grammar directly or specify" + " --guided-decoding-backend=outlines.\n" + f"Conversion error: {str(e)}") from e + else: + grammar_str = guided_params.grammar + + # Validate the grammar and raise ValueError here if it is invalid. + # This is to avoid exceptions in model execution, which will crash + # the engine worker process. + try: + xgr.Grammar.from_ebnf(grammar_str) + except RuntimeError as err: + raise ValueError(str(err)) from err + + return cls(grammar_str=grammar_str, + vocab_size=model_config.hf_text_config.vocab_size, + encoded_vocab=encoded_vocab, + stop_token_ids=stop_token_ids, + backend_str=backend_str, + tokenizer_hash=tokenizer_hash, + max_threads=max_threads) + elif guided_params.json_object: + return cls(json_object=True, + vocab_size=model_config.hf_text_config.vocab_size, + encoded_vocab=encoded_vocab, + stop_token_ids=stop_token_ids, + backend_str=backend_str, + tokenizer_hash=tokenizer_hash, + max_threads=max_threads) + else: + raise ValueError( + "Currently only support JSON and EBNF grammar mode for xgrammar" + ) + + +@dataclass +class XGrammarLogitsProcessor: + """Wrapper class to support pickle protocol""" + config: GrammarConfig + + ctx: xgr.CompiledGrammar | None = None + token_bitmask: torch.Tensor = None # type: ignore[assignment] + matchers: list[xgr.GrammarMatcher] = field(default_factory=list) + batch_size: int = field(default=1) + prefilled: bool = field(default=False) + + def __getstate__(self) -> dict[str, Any]: + return {'config': self.config} + + def __setstate__(self, state: dict[str, Any]): + self.config = state['config'] + + self.ctx = None + self.matchers = [] + self.batch_size = 1 + self.token_bitmask = None # type: ignore[assignment] + self.prefilled = False + + def _ensure_ctx(self): + """Lazily initialize the processor in the worker process""" + if self.ctx is None: + compiler = GrammarCompilerCache.get_compiler(self.config) + if self.config.json_str is not None: + self.ctx = compiler.compile_json_schema(self.config.json_str) + elif self.config.grammar_str is not None: + self.ctx = compiler.compile_grammar(self.config.grammar_str) + elif self.config.json_object: + self.ctx = compiler.compile_builtin_json_grammar() + else: + raise ValueError( + "Invalid configuration for xgrammar logits processor") + + def __call__(self, input_ids: list[int], + scores: torch.Tensor) -> torch.Tensor: + if self.ctx is None: + self._ensure_ctx() + + if len(self.matchers) == 0: + self.matchers = [ + xgr.GrammarMatcher(self.ctx) for _ in range(self.batch_size) + ] + self.token_bitmask = xgr.allocate_token_bitmask( + self.batch_size, self.config.vocab_size) + + if not self.prefilled: + # Have not sampled a token yet + self.prefilled = True + else: + for i, matcher in enumerate(self.matchers): + if not matcher.is_terminated(): + sampled_token = input_ids[-1] + assert self.matchers[i].accept_token(sampled_token) + + for i, matcher in enumerate(self.matchers): + if not matcher.is_terminated(): + # @ubospica: ideally, fill_next_token_bitmask should be + # parallelized with model decoding + # See https://github.com/vllm-project/vllm/pull/10785/files#r1864278303 + matcher.fill_next_token_bitmask(self.token_bitmask, i) + + # token_bitmask is a CPU tensor for use with accept_token and + # fill_next_token_bitmask so we move it to the device of scores + device_type = scores.device.type + if device_type != "cuda": + scores = scores.to("cpu") + xgr.apply_token_bitmask_inplace(scores, + self.token_bitmask.to(scores.device)) + if device_type != "cuda": + scores = scores.to(device_type) + + return scores diff --git a/vllm/model_executor/guided_decoding/xgrammar_utils.py b/vllm/model_executor/guided_decoding/xgrammar_utils.py new file mode 100644 index 0000000000000..9a0463964de49 --- /dev/null +++ b/vllm/model_executor/guided_decoding/xgrammar_utils.py @@ -0,0 +1,158 @@ +import re + + +def grammar_is_likely_lark(grammar_str: str) -> bool: + """ + Check if grammar appears to use Lark syntax. + + Args: + grammar_str: Input grammar string + + Returns: + bool: True if grammar appears to be in Lark format, False otherwise + + Examples: + >>> grammar_is_likely_lark("rule: 'abc'") + True + >>> grammar_is_likely_lark("rule ::= 'abc'") + False + """ + if not grammar_str or not isinstance(grammar_str, str): + return False + + for line in grammar_str.split('\n'): + # Remove both comment styles + line = re.sub(r'(#|//).*$', '', line).strip() + if not line: + continue + + # Look for GBNF rule definition + if '::=' in line: + return False + + return True + + +def convert_lark_to_gbnf(grammar_str: str) -> str: + """ + Convert a Lark grammar string to GBNF format. + + GBNF reference: + https://github.com/ggerganov/llama.cpp/blob/master/grammars/README.md + Lark grammar reference: + https://lark-parser.readthedocs.io/en/latest/grammar.html + + Args: + grammar_str: Input grammar in Lark format + + Returns: + str: Converted grammar in GBNF format + + Examples: + >>> print(convert_lark_to_gbnf("rule: 'hello'")) + root ::= rule + rule ::= "hello" + """ + if not isinstance(grammar_str, str): + raise ValueError(f"Grammar must be a string, got {type(grammar_str)}") + if not grammar_str.strip(): + raise ValueError("Grammar string cannot be empty") + + defined_rules = set() + referenced_rules = set() + output_lines = [] + + def clean_line(line: str) -> str: + """Remove comments and whitespace from line.""" + return re.sub(r'(#|//).*$', '', line).strip() + + def check_quotes(text: str, rule_name: str, line_num: int) -> None: + """Validate quote matching in text.""" + if text.count("'") % 2 != 0 or text.count('"') % 2 != 0: + raise ValueError( + f"Mismatched quotes in {rule_name} on line {line_num}") + + def extract_references(text: str) -> set: + """Extract rule references from text.""" + # Remove quoted strings and special characters + text = re.sub(r'"[^"]*"', '', text) + text = re.sub(r'[+*?()|\[\]{}]', ' ', text) + return set(re.findall(r'\b[a-zA-Z_][a-zA-Z0-9_]*\b', text)) + + # First pass: Find root rule and validate rule definitions + lines = [clean_line(line) for line in grammar_str.split('\n')] + first_rule = None + + for line_num, line in enumerate(lines, 1): + if not line or line.startswith('|'): + continue + + if ':' in line: + try: + name = line.split(':', 1)[0].strip().strip('?') + defined_rules.add(name) + if first_rule is None: + first_rule = name + if name == 'start': + first_rule = 'start' + except IndexError as e: + raise ValueError(f"Invalid rule format on line {line_num}. " + "Expected 'rule_name: definition'") from e + + if not defined_rules: + raise ValueError("No valid rules found in grammar") + + # Add root rule + output_lines.append(f"root ::= {first_rule}") + + # Second pass: Process rule definitions and alternatives + current_rule = None + current_definition = [] + + for line_num, line in enumerate(lines, 1): + if not line: + continue + + try: + if ':' in line and not line.startswith('|'): + # Save previous rule if exists + if current_rule: + output_lines.append( + f"{current_rule} ::= {' | '.join(current_definition)}") + + # Process new rule + name, definition = line.split(':', 1) + current_rule = name.strip().strip('?') + + check_quotes(definition, f"rule '{current_rule}'", line_num) + definition = re.sub(r"'([^']*)'", r'"\1"', definition) + referenced_rules.update(extract_references(definition)) + current_definition = [definition.strip()] + + elif line.startswith('|'): + if not current_rule: + raise ValueError(f"Alternative '|' on line {line_num} " + "without a preceding rule definition") + + alt_def = line[1:].strip() + check_quotes(alt_def, f"alternative for rule '{current_rule}'", + line_num) + alt_def = re.sub(r"'([^']*)'", r'"\1"', alt_def) + referenced_rules.update(extract_references(alt_def)) + current_definition.append(alt_def) + + except ValueError as e: + raise ValueError(f"Error on line {line_num}: {str(e)}") from e + + # Add final rule if exists + if current_rule: + output_lines.append( + f"{current_rule} ::= {' | '.join(current_definition)}") + + # Validate all rules are defined + undefined_rules = referenced_rules - defined_rules - {'root'} + if undefined_rules: + raise ValueError("Referenced rules are not defined: " + f"{', '.join(sorted(undefined_rules))}") + + return '\n'.join(output_lines) diff --git a/vllm/model_executor/layers/fused_moe/layer.py b/vllm/model_executor/layers/fused_moe/layer.py index 5570771ac917b..8c6f7c6e06515 100644 --- a/vllm/model_executor/layers/fused_moe/layer.py +++ b/vllm/model_executor/layers/fused_moe/layer.py @@ -242,7 +242,7 @@ def _load_per_tensor_weight_scale(self, shard_id: str, def _load_model_weight_or_group_weight_scale(self, shard_dim: int, expert_data: torch.Tensor, shard_id: str, - loaded_weight: torch.tensor, + loaded_weight: torch.Tensor, tp_rank: int): # Load grouped weight scales for group quantization # or model weights @@ -261,7 +261,7 @@ def _load_model_weight_or_group_weight_scale(self, shard_dim: int, def _load_per_channel_weight_scale(self, expert_data: torch.Tensor, shard_dim: int, shard_id: str, - loaded_weight: torch.tensor, + loaded_weight: torch.Tensor, tp_rank: int): # for per channel weight quantization if shard_id == "w2": @@ -274,7 +274,7 @@ def _load_per_channel_weight_scale(self, expert_data: torch.Tensor, tp_rank=tp_rank) def _load_w13(self, expert_data: torch.Tensor, shard_dim: int, - shard_id: str, loaded_weight: torch.tensor, tp_rank: int): + shard_id: str, loaded_weight: torch.Tensor, tp_rank: int): # Index the loaded weight for tp sharding. # gate_up_proj: "MergedColumnParallel", so tp sharding on output_dim @@ -292,7 +292,7 @@ def _load_w13(self, expert_data: torch.Tensor, shard_dim: int, expert_data.copy_(loaded_weight) def _load_w2(self, expert_data: torch.Tensor, shard_dim: int, - shard_id: str, loaded_weight: torch.tensor, tp_rank: int): + shard_id: str, loaded_weight: torch.Tensor, tp_rank: int): # Index the loaded weight for tp sharding. # down_proj: "RowParallel" so tp sharding on input_dim @@ -311,7 +311,7 @@ def _load_single_value(self, param: torch.nn.Parameter, param_data[expert_id] = loaded_weight def _load_g_idx(self, shard_id: str, expert_data: torch.Tensor, - shard_dim: int, loaded_weight: torch.tensor, tp_rank: int): + shard_dim: int, loaded_weight: torch.Tensor, tp_rank: int): if shard_id == "w2": self._load_w2(shard_id=shard_id, diff --git a/vllm/model_executor/layers/layernorm.py b/vllm/model_executor/layers/layernorm.py index 345919c5d1636..43ea4eb5a4d1a 100644 --- a/vllm/model_executor/layers/layernorm.py +++ b/vllm/model_executor/layers/layernorm.py @@ -20,6 +20,7 @@ def __init__( hidden_size: int, eps: float = 1e-6, var_hidden_size: Optional[int] = None, + has_weight: bool = True, ) -> None: super().__init__() @@ -27,7 +28,11 @@ def __init__( self.variance_epsilon = eps self.variance_size_override = (None if var_hidden_size == hidden_size else var_hidden_size) - self.weight = nn.Parameter(torch.ones(hidden_size)) + self.has_weight = has_weight + + self.weight = torch.ones(hidden_size) + if self.has_weight: + self.weight = nn.Parameter(self.weight) def forward_native( self, @@ -59,7 +64,9 @@ def forward_native( variance = x_var.pow(2).mean(dim=-1, keepdim=True) x = x * torch.rsqrt(variance + self.variance_epsilon) - x = x.to(orig_dtype) * self.weight + x = x.to(orig_dtype) + if self.has_weight: + x = x * self.weight if residual is None: return x else: diff --git a/vllm/model_executor/layers/linear.py b/vllm/model_executor/layers/linear.py index 46ef11e7d02c6..678a1171e745d 100644 --- a/vllm/model_executor/layers/linear.py +++ b/vllm/model_executor/layers/linear.py @@ -23,6 +23,16 @@ logger = init_logger(__name__) + +from torch.distributed.tensor import ( + DeviceMesh, + distribute_module, + distribute_tensor, + DTensor, + Replicate, + Shard, +) + WEIGHT_LOADER_V2_SUPPORTED = [ "CompressedTensorsLinearMethod", "AWQMarlinLinearMethod", "AWQLinearMethod", "GPTQMarlinLinearMethod", "Fp8LinearMethod", @@ -274,8 +284,9 @@ class ColumnParallelLinear(LinearBase): """ def __init__(self, - input_size: int, - output_size: int, + *, + input_size: int = None, + output_size: int=None, bias: bool = True, gather_output: bool = False, skip_bias_add: bool = False, @@ -283,6 +294,10 @@ def __init__(self, quant_config: Optional[QuantizationConfig] = None, output_sizes: Optional[List[int]] = None, prefix: str = ""): + + self.input_size = (input_size or Replicate(),) + self.output_size = (output_size or Shard(-1),) + super().__init__(input_size, output_size, skip_bias_add, params_dtype, quant_config, prefix) @@ -987,6 +1002,7 @@ class RowParallelLinear(LinearBase): """ def __init__(self, + *, input_size: int, output_size: int, bias: bool = True, @@ -996,6 +1012,10 @@ def __init__(self, reduce_results: bool = True, quant_config: Optional[QuantizationConfig] = None, prefix: str = ""): + + self.input_size = (input_size or Replicate(),) + self.output_size = (output_size or Shard(-1),) + super().__init__(input_size, output_size, skip_bias_add, params_dtype, quant_config, prefix) diff --git a/vllm/model_executor/layers/logits_processor.py b/vllm/model_executor/layers/logits_processor.py index fb76b1b17925e..2bc7e458494f7 100644 --- a/vllm/model_executor/layers/logits_processor.py +++ b/vllm/model_executor/layers/logits_processor.py @@ -5,6 +5,7 @@ import torch import torch.nn as nn +import vllm.envs as envs from vllm.distributed import (tensor_model_parallel_all_gather, tensor_model_parallel_gather) from vllm.model_executor.layers.vocab_parallel_embedding import ( @@ -42,7 +43,9 @@ def __init__(self, # Soft cap the logits. Used in Gemma 2. self.soft_cap = soft_cap # Whether to use gather or all-gather to gather the logits. - self.use_gather = not current_platform.is_tpu() + + self.use_gather = not current_platform.is_tpu( + ) and not envs.VLLM_USE_V1 def forward( self, diff --git a/vllm/model_executor/layers/mamba/mamba_mixer.py b/vllm/model_executor/layers/mamba/mamba_mixer.py index 8ef0a6cdf2c52..10bec75f49fdf 100644 --- a/vllm/model_executor/layers/mamba/mamba_mixer.py +++ b/vllm/model_executor/layers/mamba/mamba_mixer.py @@ -40,6 +40,7 @@ def __init__(self, use_conv_bias: bool, use_bias: bool, use_rms_norm: bool, + rms_norm_has_weight: bool = True, rms_norm_eps: float = 1e-5, activation="silu"): super().__init__() @@ -105,14 +106,23 @@ def A_weight_loader(param: Parameter, loaded_weight: torch.Tensor): input_is_parallel=True, ) - self.dt_layernorm = RMSNorm(time_step_rank, - eps=rms_norm_eps) if use_rms_norm else None - - self.b_layernorm = RMSNorm(ssm_state_size, - eps=rms_norm_eps) if use_rms_norm else None - - self.c_layernorm = RMSNorm(ssm_state_size, - eps=rms_norm_eps) if use_rms_norm else None + self.dt_layernorm = RMSNorm( + time_step_rank, + eps=rms_norm_eps, + has_weight=rms_norm_has_weight, + ) if use_rms_norm else None + + self.b_layernorm = RMSNorm( + ssm_state_size, + eps=rms_norm_eps, + has_weight=rms_norm_has_weight, + ) if use_rms_norm else None + + self.c_layernorm = RMSNorm( + ssm_state_size, + eps=rms_norm_eps, + has_weight=rms_norm_has_weight, + ) if use_rms_norm else None def forward_native(self, hidden_states: torch.Tensor, attn_metadata: AttentionMetadata, diff --git a/vllm/model_executor/layers/pooler.py b/vllm/model_executor/layers/pooler.py index f9437b4112ceb..75bf33dc70a51 100644 --- a/vllm/model_executor/layers/pooler.py +++ b/vllm/model_executor/layers/pooler.py @@ -1,14 +1,16 @@ from enum import IntEnum -from typing import List, Optional +from typing import List, Optional, Union import torch import torch.nn as nn +import torch.nn.functional as F from transformers import PretrainedConfig +from typing_extensions import assert_never from vllm.config import PoolerConfig from vllm.model_executor.pooling_metadata import (PoolingMetadata, PoolingTensors) -from vllm.sequence import EmbeddingSequenceGroupOutput, PoolerOutput +from vllm.sequence import PoolerOutput, PoolingSequenceGroupOutput from vllm.transformers_utils.config import ( get_cross_encoder_activation_function) @@ -22,7 +24,7 @@ class PoolingType(IntEnum): MEAN = 4 -class Pooler(nn.Module): +class SimplePooler(nn.Module): """A layer that pools specific information from hidden states. This layer does the following: @@ -35,21 +37,203 @@ class Pooler(nn.Module): normalize: Whether to normalize the pooled data. """ + @staticmethod + def from_pooling_type( + pooling_type: PoolingType, + *, + normalize: bool, + softmax: bool, + step_tag_id: Optional[int] = None, + returned_token_ids: Optional[List[int]] = None, + ) -> "SimplePooler": + if pooling_type == PoolingType.LAST: + assert step_tag_id is None and returned_token_ids is None + return LastPool(normalize=normalize, softmax=softmax) + if pooling_type == PoolingType.ALL: + assert step_tag_id is None and returned_token_ids is None + return AllPool(normalize=normalize, softmax=softmax) + if pooling_type == PoolingType.CLS: + assert step_tag_id is None and returned_token_ids is None + return CLSPool(normalize=normalize, softmax=softmax) + if pooling_type == PoolingType.MEAN: + assert step_tag_id is None and returned_token_ids is None + return MeanPool(normalize=normalize, softmax=softmax) + if pooling_type == PoolingType.STEP: + return StepPool(normalize=normalize, + softmax=softmax, + step_tag_id=step_tag_id, + returned_token_ids=returned_token_ids) + + assert_never(pooling_type) + + def __init__(self, *, normalize: bool, softmax: bool) -> None: + super().__init__() + + self.head = PoolerHead(normalize=normalize, softmax=softmax) + + def get_prompt_lens( + self, + hidden_states: torch.Tensor, + pooling_metadata: PoolingMetadata, + ) -> torch.Tensor: + return PoolingTensors.from_pooling_metadata( + pooling_metadata, hidden_states.device).prompt_lens + + def extract_states( + self, + hidden_states: torch.Tensor, + pooling_metadata: PoolingMetadata, + ) -> Union[list[torch.Tensor], torch.Tensor]: + raise NotImplementedError + + def build_output(self, data: torch.Tensor) -> PoolingSequenceGroupOutput: + return PoolingSequenceGroupOutput(data) + + def forward( + self, + hidden_states: torch.Tensor, + pooling_metadata: PoolingMetadata, + ) -> PoolerOutput: + pooled_data = self.extract_states(hidden_states, pooling_metadata) + pooled_data = self.head(pooled_data) + pooled_outputs = [self.build_output(data) for data in pooled_data] + return PoolerOutput(outputs=pooled_outputs) + + +class CLSPool(SimplePooler): + + def extract_states( + self, + hidden_states: torch.Tensor, + pooling_metadata: PoolingMetadata, + ) -> Union[list[torch.Tensor], torch.Tensor]: + prompt_lens = self.get_prompt_lens(hidden_states, pooling_metadata) + + first_token_flat_indices = torch.zeros_like(prompt_lens) + first_token_flat_indices[1:] += torch.cumsum(prompt_lens, dim=0)[:-1] + return hidden_states[first_token_flat_indices] + + +class LastPool(SimplePooler): + + def extract_states( + self, + hidden_states: torch.Tensor, + pooling_metadata: PoolingMetadata, + ) -> Union[list[torch.Tensor], torch.Tensor]: + prompt_lens = self.get_prompt_lens(hidden_states, pooling_metadata) + + last_token_flat_indices = torch.cumsum(prompt_lens, dim=0) - 1 + return hidden_states[last_token_flat_indices] + + +class AllPool(SimplePooler): + + def extract_states( + self, + hidden_states: torch.Tensor, + pooling_metadata: PoolingMetadata, + ) -> Union[list[torch.Tensor], torch.Tensor]: + prompt_lens = self.get_prompt_lens(hidden_states, pooling_metadata) + + offset = 0 + pooled_data = list[torch.Tensor]() + for prompt_len in prompt_lens: + pooled_data.append(hidden_states[offset:offset + prompt_len]) + offset += prompt_len + + return pooled_data + + +class MeanPool(SimplePooler): + + def extract_states( + self, + hidden_states: torch.Tensor, + pooling_metadata: PoolingMetadata, + ) -> Union[list[torch.Tensor], torch.Tensor]: + prompt_lens = self.get_prompt_lens(hidden_states, pooling_metadata) + + cumsum = torch.cumsum(hidden_states, dim=0) + start_indices = torch.cat([ + torch.tensor([0], device=hidden_states.device), + torch.cumsum(prompt_lens[:-1], dim=0) + ]) + end_indices = torch.cumsum(prompt_lens, dim=0) + return (cumsum[end_indices - 1] - cumsum[start_indices] + + hidden_states[start_indices]) / prompt_lens.unsqueeze(1) + + +class StepPool(SimplePooler): + def __init__( self, - pooling_type: PoolingType, + *, normalize: bool, softmax: bool, step_tag_id: Optional[int] = None, returned_token_ids: Optional[List[int]] = None, ): + super().__init__(normalize=normalize, softmax=softmax) + + self.step_tag_id = step_tag_id + self.returned_token_ids = returned_token_ids + + def extract_states( + self, + hidden_states: torch.Tensor, + pooling_metadata: PoolingMetadata, + ) -> Union[list[torch.Tensor], torch.Tensor]: + prompt_lens = self.get_prompt_lens(hidden_states, pooling_metadata) + + returned_token_ids = self.returned_token_ids + if returned_token_ids is not None and len(returned_token_ids) > 0: + hidden_states = hidden_states[:, returned_token_ids] + + step_tag_id = self.step_tag_id + + offset = 0 + pooled_data = list[torch.Tensor]() + for prompt_len, seq_data_i in zip(prompt_lens, + pooling_metadata.seq_data.values()): + pooled_data_i = hidden_states[offset:offset + prompt_len] + if step_tag_id is not None: + token_ids = torch.tensor(seq_data_i.prompt_token_ids) + pooled_data_i = pooled_data_i[token_ids == step_tag_id] + + offset += prompt_len + pooled_data.append(pooled_data_i) + + return pooled_data + + +class PoolerHead(nn.Module): + + def __init__(self, *, normalize: bool, softmax: bool) -> None: super().__init__() - self.pooling_type = pooling_type self.normalize = normalize self.softmax = softmax - self.step_tag_id = step_tag_id - self.returned_token_ids = returned_token_ids + + def forward(self, pooled_data: Union[list[torch.Tensor], torch.Tensor]): + if self.normalize: + if isinstance(pooled_data, list): + pooled_data = [ + F.normalize(data, p=2, dim=1) for data in pooled_data + ] + else: + pooled_data = F.normalize(pooled_data, p=2, dim=1) + + if self.softmax: + if isinstance(pooled_data, list): + pooled_data = [F.softmax(data, dim=-1) for data in pooled_data] + else: + pooled_data = F.softmax(pooled_data, dim=-1) + + return pooled_data + + +class Pooler(nn.Module): @classmethod def from_config_with_defaults( @@ -60,10 +244,8 @@ def from_config_with_defaults( softmax: bool, step_tag_id: Optional[int] = None, returned_token_ids: Optional[List[int]] = None, - ) -> Optional["Pooler"]: - if pooler_config is None: - return None - return cls( + ) -> SimplePooler: + return SimplePooler.from_pooling_type( pooling_type=PoolingType[pooler_config.pooling_type] if pooler_config.pooling_type is not None else pooling_type, normalize=pooler_config.normalize @@ -77,85 +259,6 @@ def from_config_with_defaults( returned_token_ids, ) - def forward( - self, - hidden_states: torch.Tensor, - pooling_metadata: PoolingMetadata, - ) -> PoolerOutput: - """Pools specific information from hidden states based on metadata.""" - - prompt_lens = PoolingTensors.from_pooling_metadata( - pooling_metadata, hidden_states.device).prompt_lens - - if self.pooling_type is PoolingType.CLS: - first_token_flat_indices = torch.zeros_like(prompt_lens) - first_token_flat_indices[1:] += torch.cumsum(prompt_lens, - dim=0)[:-1] - pooled_data = hidden_states[first_token_flat_indices] - elif self.pooling_type == PoolingType.LAST: - last_token_flat_indices = torch.cumsum(prompt_lens, dim=0) - 1 - pooled_data = hidden_states[last_token_flat_indices] - elif self.pooling_type == PoolingType.ALL: - offset = 0 - pooled_data = [] - for prompt_len in prompt_lens: - pooled_data.append(hidden_states[offset:offset + prompt_len]) - offset += prompt_len - elif self.pooling_type == PoolingType.MEAN: - # Calculate mean pooling - cumsum = torch.cumsum(hidden_states, dim=0) - start_indices = torch.cat([ - torch.tensor([0], device=hidden_states.device), - torch.cumsum(prompt_lens[:-1], dim=0) - ]) - end_indices = torch.cumsum(prompt_lens, dim=0) - pooled_data = ( - cumsum[end_indices - 1] - cumsum[start_indices] + - hidden_states[start_indices]) / prompt_lens.unsqueeze(1) - elif self.pooling_type == PoolingType.STEP: - returned_token_ids = self.returned_token_ids - if returned_token_ids is not None and len(returned_token_ids) > 0: - hidden_states = hidden_states[:, returned_token_ids] - - step_tag_id = self.step_tag_id - - offset = 0 - pooled_data = [] - for prompt_len, seq_data_i in zip( - prompt_lens, pooling_metadata.seq_data.values()): - pooled_data_i = hidden_states[offset:offset + prompt_len] - if step_tag_id is not None: - token_ids = torch.tensor(seq_data_i.prompt_token_ids) - pooled_data_i = pooled_data_i[token_ids == step_tag_id] - - offset += prompt_len - pooled_data.append(pooled_data_i) - else: - raise ValueError(f"Invalid pooling type: {self.pooling_type}") - - if self.normalize: - if isinstance(pooled_data, list): - pooled_data = [ - nn.functional.normalize(data, p=2, dim=1) - for data in pooled_data - ] - else: - pooled_data = nn.functional.normalize(pooled_data, p=2, dim=1) - - if self.softmax: - if isinstance(pooled_data, list): - pooled_data = [ - nn.functional.softmax(data, dim=-1) for data in pooled_data - ] - else: - pooled_data = nn.functional.softmax(pooled_data, dim=-1) - - pooled_outputs = [ - EmbeddingSequenceGroupOutput(data.tolist()) for data in pooled_data - ] - - return PoolerOutput(outputs=pooled_outputs) - class CrossEncodingPooler(nn.Module): """A layer that pools specific information from hidden states. @@ -210,9 +313,8 @@ def forward( if self.pooler is not None: # apply classifier once on the full batch if possible pooled_output = self.classifier(pooled_output) - logits = self.default_activation_function(pooled_output) - pooled_outputs = [ - EmbeddingSequenceGroupOutput(data.tolist()) for data in logits - ] + scores = self.default_activation_function(pooled_output).squeeze(-1) + + pooled_outputs = [PoolingSequenceGroupOutput(data) for data in scores] return PoolerOutput(outputs=pooled_outputs) diff --git a/vllm/model_executor/layers/quantization/bitsandbytes.py b/vllm/model_executor/layers/quantization/bitsandbytes.py index e01c713dd14db..5dc872933282c 100644 --- a/vllm/model_executor/layers/quantization/bitsandbytes.py +++ b/vllm/model_executor/layers/quantization/bitsandbytes.py @@ -145,12 +145,12 @@ class BitsAndBytesLinearMethod(LinearMethodBase): def __init__(self, quant_config: BitsAndBytesConfig): try: import bitsandbytes - if bitsandbytes.__version__ < "0.44.0": + if bitsandbytes.__version__ < "0.45.0": raise ImportError("bitsandbytes version is wrong. Please " - "install bitsandbytes>=0.44.0.") + "install bitsandbytes>=0.45.0.") except ImportError as err: - raise ImportError("Please install bitsandbytes>=0.44.0 via " - "`pip install bitsandbytes>=0.44.0` to use " + raise ImportError("Please install bitsandbytes>=0.45.0 via " + "`pip install bitsandbytes>=0.45.0` to use " "bitsandbytes quantizer.") from err self.quant_config = quant_config diff --git a/vllm/model_executor/layers/quantization/gguf.py b/vllm/model_executor/layers/quantization/gguf.py index 24138662eb25c..f0943efa0039d 100644 --- a/vllm/model_executor/layers/quantization/gguf.py +++ b/vllm/model_executor/layers/quantization/gguf.py @@ -2,6 +2,7 @@ import gguf import torch +from gguf import GGMLQuantizationType as WeightType from torch.nn.parameter import Parameter, UninitializedParameter from vllm import _custom_ops as ops @@ -49,19 +50,65 @@ def get_quant_method(self, layer: torch.nn.Module, return None +UNQUANTIZED_TYPES = {WeightType.F32, WeightType.F16, WeightType.BF16} +STANDARD_QUANT_TYPES = { + WeightType.Q4_0, + WeightType.Q4_1, + WeightType.Q5_0, + WeightType.Q5_1, + WeightType.Q8_0, + WeightType.Q8_1, +} +KQUANT_TYPES = { + WeightType.Q2_K, + WeightType.Q3_K, + WeightType.Q4_K, + WeightType.Q5_K, + WeightType.Q6_K, +} +IMATRIX_QUANT_TYPES = { + WeightType.IQ1_M, + WeightType.IQ1_S, + WeightType.IQ2_XXS, + WeightType.IQ2_XS, + WeightType.IQ2_S, + WeightType.IQ3_XXS, + WeightType.IQ3_S, + WeightType.IQ4_XS, + WeightType.IQ4_NL, +} +# TODO(Isotr0py): Currently, we don't have MMQ kernel for I-Matrix quantization. +# Consolidate DEQUANT_TYPES, MMVQ_QUANT_TYPES and MMQ_QUANT_TYPES after we add +# MMQ kernel for I-Matrix quantization. +DEQUANT_TYPES = STANDARD_QUANT_TYPES | KQUANT_TYPES | IMATRIX_QUANT_TYPES +MMVQ_QUANT_TYPES = STANDARD_QUANT_TYPES | KQUANT_TYPES | IMATRIX_QUANT_TYPES +MMQ_QUANT_TYPES = STANDARD_QUANT_TYPES | KQUANT_TYPES + + def _fuse_mul_mat(x: torch.Tensor, qweight: torch.Tensor, qweight_type: int) -> torch.Tensor: - # use dequantize mulmat for IQmatrix, mmq for k-quants - if x.shape[0] == 1: - # enable mmvq in contiguous batching + # there is no need to call any kernel for fp16/bf16 + if qweight_type in UNQUANTIZED_TYPES: + return x @ qweight.T + # enable MMVQ in contiguous batching with batch_size=1 + if x.shape[0] == 1 and qweight_type in MMVQ_QUANT_TYPES: y = ops.ggml_mul_mat_vec_a8(qweight, x, qweight_type, qweight.shape[0]) - elif qweight_type >= 16: + # Use MMQ Kernel if it's available (standard + k-quants) + elif qweight_type in MMQ_QUANT_TYPES: + y = ops.ggml_mul_mat_a8(qweight, x, qweight_type, qweight.shape[0]) + # If there is no available MMQ kernel, fallback to dequantize + elif qweight_type in DEQUANT_TYPES: block_size, type_size = gguf.GGML_QUANT_SIZES[qweight_type] shape = (qweight.shape[0], qweight.shape[1] // type_size * block_size) weight = ops.ggml_dequantize(qweight, qweight_type, *shape) y = x @ weight.T else: - y = ops.ggml_mul_mat_a8(qweight, x, qweight_type, qweight.shape[0]) + # Raise an error if the quantization type is not supported. + # Might be useful if llama.cpp adds a new quantization type. + # Wrap to GGMLQuantizationType IntEnum to make sure it's a valid type. + qweight_type = WeightType(qweight_type) + raise NotImplementedError( + f"Unsupported GGUF quantization type: {qweight_type}") return y @@ -121,9 +168,9 @@ def apply(self, shard_id = ["q", "k", "v"] if "q" in shard_id else shard_id qweight = layer.qweight.unbind(0) result = [] - for id in shard_id: - q_idx = layer.qweight.shard_id_map[id] - qweight_type = layer.qweight_type.shard_weight_type[id] + for idx in shard_id: + q_idx = layer.qweight.shard_id_map[idx] + qweight_type = layer.qweight_type.shard_weight_type[idx] result.append(_fuse_mul_mat(x, qweight[q_idx], qweight_type)) out = torch.cat(result, axis=1) else: @@ -163,9 +210,13 @@ class GGUFUninitializedParameter(UninitializedParameter): data_container: List[torch.Tensor] def materialize_nested(self) -> Parameter: + dtype = {data.dtype for data in self.data_container} + assert len(dtype) == 1, ValueError( + f"Data container has mixed dtypes: {dtype}") + dtype = next(iter(dtype)) nested_data = torch.nested.nested_tensor(self.data_container, device=self.device, - dtype=torch.uint8) + dtype=dtype) self.data_container.clear() param = torch.Tensor._make_subclass(self.cls_to_become, nested_data, diff --git a/vllm/model_executor/model_loader/loader.py b/vllm/model_executor/model_loader/loader.py index 37c2d789030b6..fdc4c6305bd5e 100644 --- a/vllm/model_executor/model_loader/loader.py +++ b/vllm/model_executor/model_loader/loader.py @@ -6,9 +6,9 @@ import glob import inspect import itertools -import json import math import os +import warnings from abc import ABC, abstractmethod from contextlib import contextmanager from typing import Any, Dict, Generator, Iterable, List, Optional, Tuple, cast @@ -17,7 +17,7 @@ import huggingface_hub import numpy as np import torch -from huggingface_hub import HfApi, hf_hub_download +from huggingface_hub import HfApi from torch import nn from transformers import AutoModelForCausalLM from transformers.utils import SAFE_WEIGHTS_INDEX_NAME @@ -97,22 +97,29 @@ def device_loading_context(module: torch.nn.Module, logger = init_logger(__name__) -def _initialize_model(vllm_config: VllmConfig, prefix: str = "") -> nn.Module: +def _initialize_model( + vllm_config: VllmConfig, + *, + prefix: str = "", +) -> nn.Module: """Initialize a model with the given configurations.""" model_config = vllm_config.model_config model_class, _ = get_model_architecture(model_config) + signatures = inspect.signature(model_class.__init__) all_params = [param.name for param in signatures.parameters.values()] if "vllm_config" in all_params and "prefix" in all_params: # new-style model class with set_current_vllm_config(vllm_config): return model_class(vllm_config=vllm_config, prefix=prefix) + msg = ("vLLM model class should accept `vllm_config` and `prefix` as " "input arguments. Possibly you have an old-style model class" " registered from out of tree and it is used for new vLLM version. " "Check https://docs.vllm.ai/en/latest/design/arch_overview.html " "for the design and update the model class accordingly.") - logger.warning(msg) + warnings.warn(msg, DeprecationWarning, stacklevel=2) + logger.warning( "Trying to guess the arguments for old-style model class %s", model_class, @@ -356,7 +363,7 @@ def load_model(self, vllm_config: VllmConfig) -> nn.Module: weights_to_load = {name for name, _ in model.named_parameters()} loaded_weights = model.load_weights( self._get_all_weights(model_config, model)) - # We only enable strict check for non-quantiized models + # We only enable strict check for non-quantized models # that have loaded weights tracking currently. if model_config.quantization is None and loaded_weights is not None: weights_not_loaded = weights_to_load - loaded_weights @@ -694,51 +701,9 @@ def __init__(self, load_config: LoadConfig): self.unsharded_weights_modules: List[str] = [] # Save the module names that are sharded by column. self.column_sharded_weights_modules: List[str] = [] - # we don't need to quantize the whole model, only the target modules - # that are specified in the adapter config file. If the adapter config - # file is not provided, we will quantize the default modules. - if (not load_config.model_loader_extra_config - or "qlora_adapter_name_or_path" - not in load_config.model_loader_extra_config): - self.target_modules = [] - return - - qlora_adapter = load_config.model_loader_extra_config[ - "qlora_adapter_name_or_path"] - - config_file_path = self._get_config_file(qlora_adapter) - - with open(config_file_path) as f: - config = json.load(f) - self.target_modules = config["target_modules"] - # TODO: target_modules could be either a list or a regex string. - # We need to handle both cases. - assert isinstance(self.target_modules, - list), "Unsupported target_modules: " - f"{self.target_modules}" - - def _get_config_file(self, qlora_adapter: str) -> str: - is_local = os.path.isdir(qlora_adapter) - config_file_path = None - if is_local: - for file in self.possible_config_file_names: - config_file_path = os.path.join(qlora_adapter, file) - if os.path.exists(config_file_path): - break - else: - hf_api = HfApi() - repo_files = hf_api.list_repo_files(repo_id=qlora_adapter) - for file in self.possible_config_file_names: - if file in repo_files: - config_file_path = hf_hub_download(repo_id=qlora_adapter, - filename=file) - break - - if not config_file_path: - raise ValueError( - f"Cannot find adapter config file in {qlora_adapter}") - - return config_file_path + # Store all module names (from transformers) that support + # BNB quantization. + self.target_modules: List[str] = [] def _get_weight_files( self, @@ -1020,25 +985,16 @@ def _get_bnb_target_modules(self, model: nn.Module) -> None: inverse_stacked_mapping[packed] = [] inverse_stacked_mapping[packed].insert(idx, orig) - linear_module_lst = [] for name, module in model.named_modules(): if isinstance(module, (LinearBase, )): last_name = name.split(".")[-1] if sub_modules := inverse_stacked_mapping.get(last_name, []): # Map vllm's names to transformers' names. for sub_name in sub_modules: - linear_module_lst.append( + self.target_modules.append( name.replace(last_name, sub_name)) else: - linear_module_lst.append(name) - if self.target_modules: - # Update self.target_modules - self.target_modules = [ - qual_name for qual_name in linear_module_lst - if any(t in qual_name for t in self.target_modules) - ] - else: - self.target_modules = linear_module_lst + self.target_modules.append(name) assert (self.target_modules ), "vllm currently does not support BNB quantization for" f" {type(model).__name__}" @@ -1110,7 +1066,14 @@ def _load_weights(self, model_config: ModelConfig, model_config.revision, pre_quant, load_8bit)) - model.load_weights(qweight_iterator) + weights_to_load = {name for name, _ in model.named_parameters()} + loaded_weights = model.load_weights(qweight_iterator) + # Some models may have weights loading tracker unimplemented. + if loaded_weights is not None: + weights_not_loaded = weights_to_load - loaded_weights + if weights_not_loaded: + raise ValueError("Following weights were not initialized from " + f"checkpoint: {weights_not_loaded}") torch.cuda.empty_cache() @@ -1142,9 +1105,10 @@ def _load_weights(self, model_config: ModelConfig, shard_name, weight_name) break + # Models like Clip/Siglip may skip some layers in initialization, + # causing unused quant_param_name in state_dict. if quant_param_name not in param_dict: - raise ValueError( - f"Parameter {quant_param_name} not found in the model.") + continue if quant_param_name not in stacked_quant_state_dict: stacked_quant_state_dict[quant_param_name] = {} diff --git a/vllm/model_executor/model_loader/utils.py b/vllm/model_executor/model_loader/utils.py index b95c0b7cd0612..f0316f9cde522 100644 --- a/vllm/model_executor/model_loader/utils.py +++ b/vllm/model_executor/model_loader/utils.py @@ -7,6 +7,7 @@ from vllm.config import ModelConfig from vllm.model_executor.models import ModelRegistry +from vllm.model_executor.models.adapters import as_embedding_model @contextlib.contextmanager @@ -21,6 +22,7 @@ def set_default_torch_dtype(dtype: torch.dtype): def get_model_architecture( model_config: ModelConfig) -> Tuple[Type[nn.Module], str]: architectures = getattr(model_config.hf_config, "architectures", []) + # Special handling for quantized Mixtral. # FIXME(woosuk): This is a temporary hack. mixtral_supported = [ @@ -31,8 +33,15 @@ def get_model_architecture( and model_config.quantization not in mixtral_supported and "MixtralForCausalLM" in architectures): architectures = ["QuantMixtralForCausalLM"] + + # FIXME(Isotr0py): This is a temporary hack to enable transformers fallback. + architectures = ["TransformersModel"] + + model_cls, arch = ModelRegistry.resolve_model_cls(architectures) + if model_config.runner_type == "pooling": + model_cls = as_embedding_model(model_cls) - return ModelRegistry.resolve_model_cls(architectures) + return model_cls, arch def get_architecture_class_name(model_config: ModelConfig) -> str: diff --git a/vllm/model_executor/models/__init__.py b/vllm/model_executor/models/__init__.py index d66373512b95e..a3ef9adad16d9 100644 --- a/vllm/model_executor/models/__init__.py +++ b/vllm/model_executor/models/__init__.py @@ -1,15 +1,14 @@ from .interfaces import (HasInnerState, SupportsLoRA, SupportsMultiModal, SupportsPP, has_inner_state, supports_lora, supports_multimodal, supports_pp) -from .interfaces_base import (VllmModelForEmbedding, - VllmModelForTextGeneration, is_embedding_model, - is_text_generation_model) +from .interfaces_base import (VllmModelForPooling, VllmModelForTextGeneration, + is_pooling_model, is_text_generation_model) from .registry import ModelRegistry __all__ = [ "ModelRegistry", - "VllmModelForEmbedding", - "is_embedding_model", + "VllmModelForPooling", + "is_pooling_model", "VllmModelForTextGeneration", "is_text_generation_model", "HasInnerState", @@ -20,4 +19,4 @@ "supports_multimodal", "SupportsPP", "supports_pp", -] \ No newline at end of file +] diff --git a/vllm/model_executor/models/adapters.py b/vllm/model_executor/models/adapters.py new file mode 100644 index 0000000000000..9cc43ae9181b9 --- /dev/null +++ b/vllm/model_executor/models/adapters.py @@ -0,0 +1,98 @@ +from collections.abc import Iterable +from typing import Any, TypeVar + +import torch +import torch.nn as nn + +from .interfaces_base import VllmModelForPooling, is_pooling_model + +_T = TypeVar("_T", bound=type[nn.Module]) + + +def as_embedding_model(cls: _T) -> _T: + """Subclass an existing vLLM model to support embeddings.""" + # Avoid modifying existing embedding models + if is_pooling_model(cls): + return cls + + # Lazy import + from vllm.config import VllmConfig + from vllm.model_executor.layers.pooler import (Pooler, PoolerOutput, + PoolingType) + from vllm.model_executor.pooling_metadata import PoolingMetadata + + from .utils import AutoWeightsLoader, WeightsMapper + + class ModelForEmbedding(cls, VllmModelForPooling): + + def __init__( + self, + *, + vllm_config: "VllmConfig", + prefix: str = "", + **kwargs: Any, + ) -> None: + super().__init__(vllm_config=vllm_config, prefix=prefix, **kwargs) + + # These are not used in embedding models + for attr in ("lm_head", "logits_processor"): + if hasattr(self, attr): + delattr(self, attr) + + pooler_config = vllm_config.model_config.pooler_config + assert pooler_config is not None + + # If the model already defines a pooler instance, don't overwrite it + if not getattr(self, "_pooler", None): + self._pooler = Pooler.from_config_with_defaults( + pooler_config, + pooling_type=PoolingType.LAST, + normalize=True, + softmax=False, + ) + + def pooler( + self, + hidden_states: torch.Tensor, + pooling_metadata: PoolingMetadata, + ) -> PoolerOutput: + return self._pooler(hidden_states, pooling_metadata) + + def load_weights(self, weights: Iterable[tuple[str, torch.Tensor]]): + # TODO: Support uninitialized params tracking + + # We have deleted this attribute, so don't load it + weights = ((name, data) for name, data in weights + if not name.startswith("lm_head.")) + + # If `*ForCausalLM` defines `load_weights` on the inner model + # and there are no other inner modules with parameters, + # we support loading from both `*Model` and `*ForCausalLM` + if hasattr(self, "model") and hasattr(self.model, "load_weights"): + # Whether only `self.model` contains parameters + model_is_only_param = all( + name == "model" or next(child.parameters(), None) is None + for name, child in self.named_children()) + + if model_is_only_param: + mapper = WeightsMapper(orig_to_new_prefix={"model.": ""}) + weights = mapper.apply(weights) + + self.model.load_weights(weights) + return + + # For most other models + if hasattr(cls, "load_weights"): + cls.load_weights(self, weights) # type: ignore + # Fallback + else: + loader = AutoWeightsLoader(self) + loader.load_weights(weights) + + ModelForEmbedding.__name__ = cls.__name__ \ + .removesuffix("ForCausalLM") \ + .removesuffix("ForConditionalGeneration") \ + .removesuffix("ChatModel") \ + .removesuffix("LMHeadModel") + "ForEmbedding" + + return ModelForEmbedding # type: ignore diff --git a/vllm/model_executor/models/aria.py b/vllm/model_executor/models/aria.py index fa6b95f5481ad..dd4b0c75cb84d 100644 --- a/vllm/model_executor/models/aria.py +++ b/vllm/model_executor/models/aria.py @@ -32,9 +32,8 @@ maybe_prefix, merge_multimodal_embeddings) from vllm.multimodal import MULTIMODAL_REGISTRY -from vllm.multimodal.base import MultiModalInputs from vllm.multimodal.image import cached_get_image_processor -from vllm.multimodal.inputs import NestedTensors +from vllm.multimodal.inputs import MultiModalKwargs, NestedTensors from vllm.multimodal.utils import (cached_get_tokenizer, repeat_and_pad_placeholder_tokens) from vllm.sequence import IntermediateTensors @@ -451,7 +450,7 @@ def get_max_multimodal_tokens(ctx): def input_mapper_for_aria(ctx, data): - return MultiModalInputs(data) + return MultiModalKwargs(data) def input_processor(ctx, llm_inputs): diff --git a/vllm/model_executor/models/bert.py b/vllm/model_executor/models/bert.py index 1fff72b3490e9..053d838432885 100644 --- a/vllm/model_executor/models/bert.py +++ b/vllm/model_executor/models/bert.py @@ -443,6 +443,8 @@ def pooler( def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): hf_to_vllm_mapper = WeightsMapper(orig_to_new_prefix={"model.": ""}) weights = hf_to_vllm_mapper.apply(weights) + weights = ((name, data) for name, data in weights + if not name.startswith("lm_head.")) self.model.load_weights(weights) def _build_model(self, diff --git a/vllm/model_executor/models/blip.py b/vllm/model_executor/models/blip.py index 6af59697160a0..42a239cadac46 100644 --- a/vllm/model_executor/models/blip.py +++ b/vllm/model_executor/models/blip.py @@ -4,11 +4,10 @@ import torch import torch.nn as nn -import torch.nn.functional as F from PIL import Image from transformers import Blip2VisionConfig, BlipVisionConfig -from vllm.attention.selector import _Backend +from vllm.attention.layer import MultiHeadAttention from vllm.config import ModelConfig from vllm.distributed import divide, get_tensor_model_parallel_world_size from vllm.inputs import DecoderOnlyInputs, token_inputs @@ -22,8 +21,6 @@ repeat_and_pad_placeholder_tokens) from vllm.sequence import SequenceData -from .utils import get_vit_attn_backend - def get_blip_patch_grid_length(*, image_size: int, patch_size: int) -> int: assert image_size % patch_size == 0 @@ -205,11 +202,8 @@ def __init__( self.tp_size = get_tensor_model_parallel_world_size() self.num_heads_per_partition = divide(self.num_heads, self.tp_size) - # Detect attention implementation. - self.attn_backend = get_vit_attn_backend(support_fa=False) - if self.attn_backend not in {_Backend.TORCH_SDPA, _Backend.XFORMERS}: - raise RuntimeError( - f"BLIP does not support {self.attn_backend} backend now.") + self.attn = MultiHeadAttention(self.num_heads_per_partition, + self.head_dim, self.scale) def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int): return tensor.view(bsz, seq_len, self.num_heads, @@ -220,41 +214,10 @@ def forward( hidden_states: torch.Tensor, ): """Input shape: Batch x Time x Channel""" - bsz, tgt_len, _ = hidden_states.size() qkv_states, _ = self.qkv(hidden_states) query_states, key_states, value_states = qkv_states.chunk(3, dim=-1) - query_states = query_states.view(bsz, tgt_len, - self.num_heads_per_partition, - self.head_dim) - key_states = key_states.view(bsz, tgt_len, - self.num_heads_per_partition, - self.head_dim) - value_states = value_states.view(bsz, tgt_len, - self.num_heads_per_partition, - self.head_dim) - - if self.attn_backend == _Backend.XFORMERS: - from xformers import ops as xops - - out = xops.memory_efficient_attention_forward(query_states, - key_states, - value_states, - p=self.dropout, - scale=self.scale) - elif self.attn_backend == _Backend.TORCH_SDPA: - query_states, key_states, value_states = (x.transpose(1, 2) - for x in (query_states, - key_states, - value_states)) - out = F.scaled_dot_product_attention(query_states, - key_states, - value_states, - dropout_p=self.dropout, - scale=self.scale) - out = out.transpose(1, 2) - - out = out.view(bsz, tgt_len, -1) + out = self.attn(query_states, key_states, value_states) attn_output, _ = self.projection(out) return attn_output, None diff --git a/vllm/model_executor/models/blip2.py b/vllm/model_executor/models/blip2.py index d2592016aff34..76b8505ee1c2a 100644 --- a/vllm/model_executor/models/blip2.py +++ b/vllm/model_executor/models/blip2.py @@ -512,9 +512,10 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): ) self.language_model = init_vllm_registered_model( - config.text_config, vllm_config=vllm_config, - prefix=maybe_prefix(prefix, "language_model")) + hf_config=config.text_config, + prefix=maybe_prefix(prefix, "language_model"), + ) self.make_empty_intermediate_tensors = ( self.language_model.make_empty_intermediate_tensors) diff --git a/vllm/model_executor/models/clip.py b/vllm/model_executor/models/clip.py index cd89519e95986..a5300dfd986f3 100644 --- a/vllm/model_executor/models/clip.py +++ b/vllm/model_executor/models/clip.py @@ -5,11 +5,10 @@ import numpy as np import torch import torch.nn as nn -import torch.nn.functional as F from PIL import Image from transformers import CLIPVisionConfig -from vllm.attention.selector import _Backend +from vllm.attention.layer import MultiHeadAttention from vllm.config import ModelConfig from vllm.distributed import divide, get_tensor_model_parallel_world_size from vllm.inputs import DecoderOnlyInputs, token_inputs @@ -25,8 +24,6 @@ resolve_visual_encoder_outputs) from vllm.sequence import SequenceData -from .utils import get_vit_attn_backend - def get_clip_patch_grid_length(*, image_size: int, patch_size: int) -> int: assert image_size % patch_size == 0 @@ -235,11 +232,8 @@ def __init__( self.tp_size = get_tensor_model_parallel_world_size() self.num_heads_per_partition = divide(self.num_heads, self.tp_size) - # Detect attention implementation. - self.attn_backend = get_vit_attn_backend(support_fa=False) - if self.attn_backend not in {_Backend.TORCH_SDPA, _Backend.XFORMERS}: - raise RuntimeError( - f"CLIP does not support {self.attn_backend} backend now.") + self.attn = MultiHeadAttention(self.num_heads_per_partition, + self.head_dim, self.scale) def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int): return tensor.view(bsz, seq_len, self.num_heads, @@ -250,42 +244,10 @@ def forward( hidden_states: torch.Tensor, ): """Input shape: Batch x Time x Channel""" - bsz, tgt_len, _ = hidden_states.size() qkv_states, _ = self.qkv_proj(hidden_states) query_states, key_states, value_states = qkv_states.chunk(3, dim=-1) - - query_states = query_states.view(bsz, tgt_len, - self.num_heads_per_partition, - self.head_dim) - key_states = key_states.view(bsz, tgt_len, - self.num_heads_per_partition, - self.head_dim) - value_states = value_states.view(bsz, tgt_len, - self.num_heads_per_partition, - self.head_dim) - - if self.attn_backend == _Backend.XFORMERS: - from xformers import ops as xops - - out = xops.memory_efficient_attention_forward(query_states, - key_states, - value_states, - p=self.dropout, - scale=self.scale) - elif self.attn_backend == _Backend.TORCH_SDPA: - query_states, key_states, value_states = (x.transpose(1, 2) - for x in (query_states, - key_states, - value_states)) - out = F.scaled_dot_product_attention(query_states, - key_states, - value_states, - dropout_p=self.dropout, - scale=self.scale) - out = out.transpose(1, 2) - - out = out.view(bsz, tgt_len, -1) + out = self.attn(query_states, key_states, value_states) attn_output, _ = self.out_proj(out) return attn_output, None diff --git a/vllm/model_executor/models/commandr.py b/vllm/model_executor/models/commandr.py index 85e24ca660686..c846e42f1b0c3 100644 --- a/vllm/model_executor/models/commandr.py +++ b/vllm/model_executor/models/commandr.py @@ -48,7 +48,7 @@ from vllm.sequence import IntermediateTensors from .interfaces import SupportsLoRA, SupportsPP -from .utils import (is_pp_missing_parameter, +from .utils import (extract_layer_index, is_pp_missing_parameter, make_empty_intermediate_tensors_factory, make_layers, maybe_prefix) @@ -171,12 +171,26 @@ def __init__( rope_scaling=self.rope_scaling, is_neox_style=False, ) + + sliding_window = getattr(config, "sliding_window", None) + # Model v2 has sliding windows, v1 does not + self.v1 = sliding_window is None + + layer_idx = extract_layer_index(prefix) + layer_has_sliding_window = ( + getattr(config, "sliding_window_pattern", False) + and (layer_idx + 1) % self.config.sliding_window_pattern != 0) + + self.sliding_window = (sliding_window + if layer_has_sliding_window else None) + self.attn = Attention(self.num_heads, self.head_dim, self.scaling, num_kv_heads=self.num_kv_heads, cache_config=cache_config, quant_config=quant_config, + per_layer_sliding_window=self.sliding_window, prefix=f"{prefix}.attn") if self.use_qk_norm: self.q_norm = LayerNorm(param_shape=(self.num_heads, @@ -206,7 +220,8 @@ def forward( q, k, v = qkv.split([self.q_size, self.kv_size, self.kv_size], dim=-1) if self.use_qk_norm: q, k = self._apply_qk_norm(q, k) - q, k = self.rotary_emb(positions, q, k) + if self.v1 or self.sliding_window: + q, k = self.rotary_emb(positions, q, k) attn_output = self.attn(q, k, v, kv_cache, attn_metadata) output, _ = self.o_proj(attn_output) return output diff --git a/vllm/model_executor/models/exaone.py b/vllm/model_executor/models/exaone.py index 5ca26d53a17e7..0398f0943a70a 100644 --- a/vllm/model_executor/models/exaone.py +++ b/vllm/model_executor/models/exaone.py @@ -473,10 +473,11 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): self.logits_processor = LogitsProcessor(self.unpadded_vocab_size, config.vocab_size, logit_scale) - self.sampler = get_sampler() else: self.lm_head = PPMissingLayer() + self.sampler = get_sampler() + self.make_empty_intermediate_tensors = ( self.transformer.make_empty_intermediate_tensors) diff --git a/vllm/model_executor/models/gemma2.py b/vllm/model_executor/models/gemma2.py index c93223c740272..4664aa53ea092 100644 --- a/vllm/model_executor/models/gemma2.py +++ b/vllm/model_executor/models/gemma2.py @@ -30,19 +30,17 @@ QKVParallelLinear, RowParallelLinear) from vllm.model_executor.layers.logits_processor import LogitsProcessor -from vllm.model_executor.layers.pooler import Pooler, PoolingType from vllm.model_executor.layers.quantization import QuantizationConfig from vllm.model_executor.layers.rotary_embedding import get_rope from vllm.model_executor.layers.sampler import SamplerOutput, get_sampler from vllm.model_executor.layers.vocab_parallel_embedding import ( VocabParallelEmbedding) from vllm.model_executor.model_loader.weight_utils import default_weight_loader -from vllm.model_executor.pooling_metadata import PoolingMetadata from vllm.model_executor.sampling_metadata import SamplingMetadata -from vllm.sequence import IntermediateTensors, PoolerOutput +from vllm.sequence import IntermediateTensors from .interfaces import SupportsLoRA, SupportsPP -from .utils import (AutoWeightsLoader, WeightsMapper, extract_layer_index, +from .utils import (AutoWeightsLoader, extract_layer_index, is_pp_missing_parameter, make_empty_intermediate_tensors_factory, make_layers, maybe_prefix) @@ -455,53 +453,3 @@ def load_weights(self, weights: Iterable[Tuple[str, if self.config.tie_word_embeddings else None), ) return loader.load_weights(weights) - - -class Gemma2EmbeddingModel(nn.Module, SupportsPP): - """ - A model that uses Gemma2 with additional embedding functionalities. - - This class encapsulates the Gemma2Model and provides an interface for - embedding operations and customized pooling functions. - - Attributes: - model: An instance of Gemma2Model used for forward operations. - _pooler: An instance of Pooler used for pooling operations. - """ - - def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): - super().__init__() - - self.model = Gemma2Model(vllm_config=vllm_config, - prefix=maybe_prefix(prefix, "model")) - self._pooler = Pooler.from_config_with_defaults( - vllm_config.model_config.pooler_config, - pooling_type=PoolingType.LAST, - normalize=True, - softmax=False) - self.make_empty_intermediate_tensors = ( - self.model.make_empty_intermediate_tensors) - - def forward( - self, - input_ids: Optional[torch.Tensor], - positions: torch.Tensor, - kv_caches: List[torch.Tensor], - attn_metadata: AttentionMetadata, - intermediate_tensors: Optional[IntermediateTensors] = None, - inputs_embeds: Optional[torch.Tensor] = None, - ) -> Union[torch.Tensor, IntermediateTensors]: - return self.model(input_ids, positions, kv_caches, attn_metadata, - intermediate_tensors, inputs_embeds) - - def pooler( - self, - hidden_states: torch.Tensor, - pooling_metadata: PoolingMetadata, - ) -> Optional[PoolerOutput]: - return self._pooler(hidden_states, pooling_metadata) - - def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): - hf_to_vllm_mapper = WeightsMapper(orig_to_new_prefix={"model.": ""}) - weights = hf_to_vllm_mapper.apply(weights) - self.model.load_weights(weights) diff --git a/vllm/model_executor/models/glm.py b/vllm/model_executor/models/glm.py new file mode 100644 index 0000000000000..942d1e14baed1 --- /dev/null +++ b/vllm/model_executor/models/glm.py @@ -0,0 +1,21 @@ +"""Inference-only HF format GLM-4 model compatible with THUDM weights.""" +from vllm.config import VllmConfig +from vllm.model_executor.models.llama import LlamaForCausalLM + +from .utils import PPMissingLayer + + +class GlmForCausalLM(LlamaForCausalLM): + + def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): + super().__init__(vllm_config=vllm_config, prefix=prefix) + # Hack Llama model to fit HF format GLM implementation + # Attention difference between GLM and Llama: + # 1. Half partial rotary_dim and no Neox style. + # 2. There is no bias for o_proj in attention + for layer in self.model.layers: + if not isinstance(layer, PPMissingLayer): + layer.self_attn.rotary_emb.rotary_dim //= 2 + layer.self_attn.rotary_emb.is_neox_style = False + layer.self_attn.o_proj.bias = None + layer.self_attn.o_proj.skip_bias_add = True diff --git a/vllm/model_executor/models/glm4_vision_encoder.py b/vllm/model_executor/models/glm4_vision_encoder.py index f37ab0f82d52a..39a5736eb199b 100644 --- a/vllm/model_executor/models/glm4_vision_encoder.py +++ b/vllm/model_executor/models/glm4_vision_encoder.py @@ -8,6 +8,7 @@ from torch import nn from torch.nn import LayerNorm +from vllm.attention.layer import MultiHeadAttention from vllm.distributed import get_tensor_model_parallel_world_size from vllm.model_executor.layers.activation import SiluAndMul, get_act_fn from vllm.model_executor.layers.linear import (ColumnParallelLinear, @@ -77,27 +78,16 @@ def __init__( quant_config=quant_config, ) + self.attn = MultiHeadAttention(self.num_heads_per_rank, self.head_dim, + self.scale) self.output_dropout = torch.nn.Dropout(config.dropout_prob) def forward(self, x: torch.Tensor) -> torch.Tensor: - B, L, _ = x.shape qkv, _ = self.query_key_value(x) # B, L, 3 * H * D q, k, v = qkv.chunk(3, dim=-1) - q = q.reshape(B, L, self.num_heads_per_rank, - self.head_dim).permute(0, 2, 1, 3) # B, H, L, D - k = k.reshape(B, L, self.num_heads_per_rank, - self.head_dim).permute(0, 2, 1, 3) # B, H, L, D - v = v.reshape(B, L, self.num_heads_per_rank, - self.head_dim).permute(0, 2, 1, 3) # B, H, L, D - - out = torch.nn.functional.scaled_dot_product_attention(q, - k, - v, - attn_mask=None, - dropout_p=0., - is_causal=False) - - output, _ = self.dense(out.transpose(1, 2).view(B, L, -1)) + + out = self.attn(q, k, v) + output, _ = self.dense(out) output = self.output_dropout(output) return output diff --git a/vllm/model_executor/models/granite.py b/vllm/model_executor/models/granite.py index bd2394e71c973..f9e0443b9a508 100644 --- a/vllm/model_executor/models/granite.py +++ b/vllm/model_executor/models/granite.py @@ -400,16 +400,17 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): self.lm_head.weight = self.model.embed_tokens.weight logit_scale = getattr(config, "logit_scale", 1.0) - if hasattr(config, "logits_scaling"): logit_scale /= config.logits_scaling + self.logits_processor = LogitsProcessor(self.unpadded_vocab_size, config.vocab_size, scale=logit_scale) - self.sampler = get_sampler() else: self.lm_head = PPMissingLayer() + self.sampler = get_sampler() + def get_input_embeddings(self, input_ids: torch.Tensor) -> torch.Tensor: return self.model.get_input_embeddings(input_ids) diff --git a/vllm/model_executor/models/gritlm.py b/vllm/model_executor/models/gritlm.py new file mode 100644 index 0000000000000..d179d6235424a --- /dev/null +++ b/vllm/model_executor/models/gritlm.py @@ -0,0 +1,248 @@ +from array import array +from typing import List, Optional, Union + +import torch +import torch.nn as nn +from xformers.ops.fmha.attn_bias import BlockDiagonalMask + +from vllm.attention import AttentionMetadata +from vllm.attention.backends.xformers import XFormersImpl +from vllm.config import ModelConfig, VllmConfig +from vllm.logger import init_logger +from vllm.model_executor.layers.pooler import PoolerHead +from vllm.model_executor.models.llama import LlamaForCausalLM +from vllm.model_executor.pooling_metadata import (PoolingMetadata, + PoolingTensors) +from vllm.multimodal.utils import cached_get_tokenizer +from vllm.sequence import (IntermediateTensors, PoolerOutput, + PoolingSequenceGroupOutput) + +logger = init_logger(__name__) + + +class GritLMPooler(nn.Module): + + def __init__(self, model_config: ModelConfig): + super().__init__() + + self.model_config = model_config + + tokenizer = cached_get_tokenizer( + self.model_config.tokenizer, + tokenizer_mode=self.model_config.tokenizer_mode, + tokenizer_revision=self.model_config.tokenizer_revision, + trust_remote_code=self.model_config.trust_remote_code, + ) + + # Collect the tokens needed for pattern matching. + # "▁<" is different from "_<". The former uses "▁" to indicate that + # the next token is the start of a word. + # "<0x0A>" is the newline token (i.e. "\n")." + self.token_ids = { + tok: tokenizer.convert_tokens_to_ids([tok])[0] + for tok in ["", "▁<", "<", "|", "embed", ">", "<0x0A>", "user"] + } + + def tokens_to_ids(tokens: list[str]) -> array: + return array("i", [self.token_ids[token] for token in tokens]) + + self.user_pattern_ids = tokens_to_ids( + ["▁<", "|", "user", "|", ">", "<0x0A>"]) + self.embed_newline_pattern_ids = tokens_to_ids( + ["<0x0A>", "<", "|", "embed", "|", ">", "<0x0A>"]) + self.embed_pattern_ids = tokens_to_ids( + ["▁<", "|", "embed", "|", ">", "<0x0A>"]) + + self.head = PoolerHead(normalize=True, softmax=False) + + def _find_array(self, arr: array, target: array, start_idx: int) -> int: + """ + Find the first occurrence of target in arr starting from start_idx. + + Args: + arr: The array to search within + target: The consecutive subsequence to find + start_idx: The starting index to search from + + Returns: + int: The index of the first occurrence of target in arr. + """ + if start_idx < 0: + raise ValueError("start_idx must be non-negative") + if not target or not arr: + raise ValueError("Empty arr or target not allowed") + + target_len = len(target) + for i in range(start_idx, len(arr) - target_len + 1): + if arr[i:i + target_len] == target: + return i + return -1 + + def _get_instruction_len(self, prompt_token_ids: array) -> int: + """ + Get the length of the instruction in the prompt. + + We do a pattern matching to find the instruction in the prompt, + and then return the length of the instruction. + + The pattern matching is done using integers instead of strings + because the prompt is given as a list of token IDs. + """ + + instruction_len = 0 + + # Return no instruction in case of missing BOS token. + if prompt_token_ids[0] != self.token_ids[""]: + logger.warning("BOS token not found in prompt," + "thus using empty string for instruction." + "GritLM requires BOS token in prompt.") + return instruction_len + + # If user pattern is found in the prompt, that means there should be + # a newline token before the embed pattern. + embed_pattern_ids = self.embed_pattern_ids + if self._find_array(prompt_token_ids, + self.user_pattern_ids, + start_idx=1) == 1: + embed_pattern_ids = self.embed_newline_pattern_ids + + # Find the embed pattern in the prompt. + found_embed_pattern_idx = self._find_array(prompt_token_ids, + embed_pattern_ids, + start_idx=1) + + if found_embed_pattern_idx != -1: + instruction_len = found_embed_pattern_idx + len(embed_pattern_ids) + else: + logger.warning("Query instruction not found in prompt," + "thus using BOS token as instruction instead." + "GritLM requires query instruction in prompt.") + instruction_len = 1 + + return instruction_len + + def forward( + self, + hidden_states: torch.Tensor, + pooling_metadata: PoolingMetadata, + ) -> PoolerOutput: + """ + Pool the hidden states by summing the embeddings of + non-instruction tokens. + """ + prompts_token_ids = [ + token_ids.prompt_token_ids_array + for _, token_ids in pooling_metadata.seq_data.items() + ] + + instruction_lens = torch.tensor( + [ + self._get_instruction_len(prompt_token_ids) + for prompt_token_ids in prompts_token_ids + ], + device=hidden_states.device, + ) + + prompt_lens = PoolingTensors.from_pooling_metadata( + pooling_metadata, hidden_states.device).prompt_lens + + mask = torch.zeros_like(hidden_states, dtype=torch.bool) + + start_idx = 0 + for prompt_len, instruction_len in zip(prompt_lens, instruction_lens): + end_idx = start_idx + prompt_len + mask[start_idx + instruction_len:end_idx] = True + start_idx = end_idx + + masked_hidden_states = hidden_states.masked_fill(~mask, 0.0) + + sum_embeddings = torch.zeros(len(prompt_lens), + hidden_states.size(1), + device=hidden_states.device) + + start_idx = 0 + for i, prompt_len in enumerate(prompt_lens): + end_idx = start_idx + prompt_len + sum_embeddings[i] = masked_hidden_states[start_idx:end_idx].sum( + dim=0) + start_idx = end_idx + + num_non_instruction_tokens = prompt_lens - instruction_lens + mean_embeddings = sum_embeddings / num_non_instruction_tokens.unsqueeze( + 1) + + pooled_data = self.head(mean_embeddings) + + pooled_outputs = [ + PoolingSequenceGroupOutput(data) for data in pooled_data + ] + + return PoolerOutput(outputs=pooled_outputs) + + +class GritLM(LlamaForCausalLM): + """This class implements the embedding model for parasail-ai/GritLM-7B-vllm. + + The class inherits from LlamaForCausalLM and provides a custom pooling + layer. + + The main difference between the pooling layer in GritLM and the one in + LlamaForCausalLM is that GritLM ignores the query instruction in the prompt + when pooling the hidden states. + + Embedding prompts should be in the following format: + - With instruction: "<|user|>\nINSTRUCTION\n<|embed|>\nPROMPT". + - Without instruction: "<|embed|>\nPROMPT". + + Generation prompts should be in the following format: + - "<|user|>\nPROMPT\n<|assistant|>\n" + """ + + def __init__( + self, + vllm_config: VllmConfig, + prefix: str = "", + **kwargs, + ) -> None: + super().__init__(vllm_config=vllm_config, prefix=prefix, **kwargs) + + self.runner_type = vllm_config.model_config.runner_type + + self._pooler = GritLMPooler(vllm_config.model_config) + + for layer in self.model.layers: + if self.runner_type == "pooling" and hasattr(layer, "self_attn"): + assert isinstance(layer.self_attn.attn.impl, XFormersImpl), ( + "GritLM embedding is only supported by XFormers backend, " + "which can be forced by VLLM_ATTENTION_BACKEND=XFORMERS") + + def forward( + self, + input_ids: torch.Tensor, + positions: torch.Tensor, + kv_caches: List[torch.Tensor], + attn_metadata: AttentionMetadata, + **kwargs, + ) -> Union[torch.Tensor, IntermediateTensors]: + + # Change attention to non-causal for pooling tasks. + if self.runner_type == "pooling": + assert attn_metadata.prefill_metadata.attn_bias is None + attn_metadata.prefill_metadata.attn_bias = [ + BlockDiagonalMask.from_seqlens(attn_metadata.seq_lens) + ] + + return super().forward( + input_ids=input_ids, + positions=positions, + kv_caches=kv_caches, + attn_metadata=attn_metadata, + **kwargs, + ) + + def pooler( + self, + hidden_states: torch.Tensor, + pooling_metadata: PoolingMetadata, + ) -> Optional[PoolerOutput]: + return self._pooler(hidden_states, pooling_metadata) diff --git a/vllm/model_executor/models/idefics2_vision_model.py b/vllm/model_executor/models/idefics2_vision_model.py index 16192928beb1f..e430a158d869a 100644 --- a/vllm/model_executor/models/idefics2_vision_model.py +++ b/vllm/model_executor/models/idefics2_vision_model.py @@ -21,8 +21,8 @@ from torch import nn from transformers.models.idefics2.configuration_idefics2 import ( Idefics2Config, Idefics2VisionConfig) -from xformers import ops as xops +from vllm.attention.layer import MultiHeadAttention from vllm.distributed import divide, get_tensor_model_parallel_world_size from vllm.model_executor.layers.activation import get_act_fn from vllm.model_executor.layers.linear import (ColumnParallelLinear, @@ -141,35 +141,18 @@ def __init__( ) self.tp_size = get_tensor_model_parallel_world_size() self.num_heads_per_partition = divide(self.num_heads, self.tp_size) - self.is_causal = False + self.attn = MultiHeadAttention(self.num_heads_per_partition, + self.head_dim, self.scale) def forward( self, hidden_states: torch.Tensor, ) -> torch.Tensor: - batch_size, q_len, _ = hidden_states.size() qkv, _ = self.qkv_proj( hidden_states ) # batch_size, q_len, 3 * num_heads_per_partition * head_dim query_states, key_states, value_states = qkv.chunk(3, dim=-1) - query_states = query_states.view(batch_size, q_len, - self.num_heads_per_partition, - self.head_dim) - key_states = key_states.view(batch_size, q_len, - self.num_heads_per_partition, - self.head_dim) - value_states = value_states.view(batch_size, q_len, - self.num_heads_per_partition, - self.head_dim) - # see: https://facebookresearch.github.io/xformers/components/ops.html - out = xops.memory_efficient_attention_forward( - query_states, - key_states, - value_states, - p=self.dropout, - scale=self.scale, - ) - out = out.view(batch_size, q_len, -1) + out = self.attn(query_states, key_states, value_states) attn_output, _ = self.out_proj(out) return attn_output diff --git a/vllm/model_executor/models/idefics3.py b/vllm/model_executor/models/idefics3.py index 014e27bc869d4..17e772e7faa32 100644 --- a/vllm/model_executor/models/idefics3.py +++ b/vllm/model_executor/models/idefics3.py @@ -60,7 +60,8 @@ class Idefics3ImagePixelInputs(TypedDict): type: Literal["pixel_values"] data: torch.Tensor """ - Shape: `(batch_size * num_images, num_channels, height, width)` + Shape: `(batch_size * num_images * num_patches, + num_channels, height, width)` """ pixel_attention_mask: Optional[torch.BoolTensor] @@ -267,54 +268,56 @@ def input_processor_for_idefics3(ctx: InputContext, n_images_in_text = [] text = inputs.get("prompt") - if text is not None: - if isinstance(text, str): - text = [text] - elif not isinstance(text, list) and not isinstance(text[0], str): - raise ValueError("Invalid input text. Please provide a string, " - "or a list of strings") - - fake_image_token = processor.fake_image_token.content - image_token = processor.image_token.content - global_img_token = processor.global_image_tag - - prompt_strings = [] - for sample, sample_rows, sample_cols in zip(text, image_rows, - image_cols): - n_images_in_text.append(sample.count(image_token)) - - # Replace the image token with fake tokens around the expanded - # image token sequence of length `image_seq_len` - image_prompt_strings = [] - for n_rows, n_cols in zip(sample_rows, sample_cols): - image_prompt_string = _get_image_prompt_string( - n_rows, - n_cols, - processor.image_seq_len, - image_token=image_token, - fake_token_around_image=fake_image_token, - global_img_token=global_img_token, - ) - image_prompt_strings.append(image_prompt_string) - - split_sample = sample.split(image_token) - if len(split_sample) == 0: - raise ValueError( - "The image token should be present in the text.") + if text is None: + prompt_token_ids = inputs.get("prompt_token_ids", []) + assert prompt_token_ids + text = tokenizer.decode(prompt_token_ids) + + if isinstance(text, str): + text = [text] + elif not isinstance(text, list) and not isinstance(text[0], str): + raise ValueError("Invalid input text. Please provide a string, " + "or a list of strings") + + fake_image_token = processor.fake_image_token.content + image_token = processor.image_token.content + global_img_token = processor.global_image_tag + + prompt_strings = [] + for sample, sample_rows, sample_cols in zip(text, image_rows, image_cols): + n_images_in_text.append(sample.count(image_token)) + + # Replace the image token with fake tokens around the expanded + # image token sequence of length `image_seq_len` + image_prompt_strings = [] + for n_rows, n_cols in zip(sample_rows, sample_cols): + image_prompt_string = _get_image_prompt_string( + n_rows, + n_cols, + processor.image_seq_len, + image_token=image_token, + fake_token_around_image=fake_image_token, + global_img_token=global_img_token, + ) + image_prompt_strings.append(image_prompt_string) - # Place in the image prompt strings where the image tokens are - sample = split_sample[0] - for i, image_prompt_string in enumerate(image_prompt_strings): - sample += image_prompt_string + split_sample[i + 1] - prompt_strings.append(sample) + split_sample = sample.split(image_token) + if len(split_sample) == 0: + raise ValueError("The image token should be present in the text.") - prompt_token_ids = tokenizer(text=prompt_strings[0]).input_ids + # Place in the image prompt strings where the image tokens are + sample = split_sample[0] + for i, image_prompt_string in enumerate(image_prompt_strings): + sample += image_prompt_string + split_sample[i + 1] + prompt_strings.append(sample) - return token_inputs( - prompt_token_ids=prompt_token_ids, - prompt=prompt_strings[0], - multi_modal_data=multi_modal_data, - ) + prompt_token_ids = tokenizer(text=prompt_strings[0]).input_ids + + return token_inputs( + prompt_token_ids=prompt_token_ids, + prompt=prompt_strings[0], + multi_modal_data=multi_modal_data, + ) def _get_max_num_image_patch(image_processor: Idefics3ImageProcessor) -> int: @@ -518,13 +521,17 @@ def _parse_and_validate_image_input( raise ValueError("Incorrect type of pixel values. " f"Got type: {type(pixel_values)}") - return Idefics3ImagePixelInputs(type="pixel_values", - data=self._validate_pixel_values( - flatten_bn(pixel_values, - concat=True)), - pixel_attention_mask=flatten_bn( - pixel_attention_mask, - concat=True)) + if isinstance(pixel_values, list): + pixel_values = torch.cat(pixel_values, dim=1) + pixel_attention_mask = torch.cat(pixel_attention_mask, dim=1) + else: + pixel_values = flatten_bn(pixel_values) + pixel_attention_mask = flatten_bn(pixel_attention_mask) + + return Idefics3ImagePixelInputs( + type="pixel_values", + data=self._validate_pixel_values(pixel_values), + pixel_attention_mask=pixel_attention_mask) raise AssertionError("This line should be unreachable.") diff --git a/vllm/model_executor/models/interfaces.py b/vllm/model_executor/models/interfaces.py index 1545ce332309f..70b78fe64f2d8 100644 --- a/vllm/model_executor/models/interfaces.py +++ b/vllm/model_executor/models/interfaces.py @@ -7,7 +7,7 @@ from vllm.logger import init_logger from vllm.utils import supports_kw -from .interfaces_base import is_embedding_model +from .interfaces_base import is_pooling_model if TYPE_CHECKING: from vllm.attention import AttentionMetadata @@ -36,6 +36,11 @@ def get_multimodal_embeddings(self, **kwargs) -> Optional[T]: """ Returns multimodal embeddings generated from multimodal kwargs to be merged with text embeddings. + + The output embeddings must be one of the following formats: + - A list or tuple of 2D tensors, where each tensor corresponds to + each input image. + - A single 3D tensor, with the batch dimension grouping the 2D tensors. """ ... @@ -358,6 +363,43 @@ def is_attention_free( return isinstance(model, IsAttentionFree) +@runtime_checkable +class IsHybrid(Protocol): + """The interface required for all models like Jamba that have both + attention and mamba blocks, indicates that + hf_config has 'layers_block_type'""" + + is_hybrid: ClassVar[Literal[True]] = True + """ + A flag that indicates this model has both mamba and attention blocks + , also indicates that the model's hf_config has + 'layers_block_type' """ + + +@runtime_checkable +class _IsHybridType(Protocol): + is_hybrid: ClassVar[Literal[True]] + + +@overload +def is_hybrid(model: object) -> TypeIs[IsHybrid]: + ... + + +@overload +def is_hybrid(model: Type[object]) -> TypeIs[Type[IsHybrid]]: + ... + + +def is_hybrid( + model: Union[Type[object], object] +) -> Union[TypeIs[Type[IsHybrid]], TypeIs[IsHybrid]]: + if isinstance(model, type): + return isinstance(model, _IsHybridType) + + return isinstance(model, IsHybrid) + + @runtime_checkable class SupportsCrossEncoding(Protocol): """The interface required for all models that support cross encoding.""" @@ -389,4 +431,4 @@ def _supports_cross_encoding( def supports_cross_encoding( model: Union[Type[object], object], ) -> Union[TypeIs[Type[SupportsCrossEncoding]], TypeIs[SupportsCrossEncoding]]: - return is_embedding_model(model) and _supports_cross_encoding(model) + return is_pooling_model(model) and _supports_cross_encoding(model) diff --git a/vllm/model_executor/models/interfaces_base.py b/vllm/model_executor/models/interfaces_base.py index 957a5a6e26b5c..de733b6d49a53 100644 --- a/vllm/model_executor/models/interfaces_base.py +++ b/vllm/model_executor/models/interfaces_base.py @@ -141,7 +141,7 @@ def is_text_generation_model( @runtime_checkable -class VllmModelForEmbedding(VllmModel[C_co, T], Protocol[C_co, T]): +class VllmModelForPooling(VllmModel[C_co, T], Protocol[C_co, T]): def pooler( self, @@ -153,23 +153,22 @@ def pooler( @overload -def is_embedding_model( - model: Type[object]) -> TypeIs[Type[VllmModelForEmbedding]]: +def is_pooling_model(model: Type[object]) -> TypeIs[Type[VllmModelForPooling]]: ... @overload -def is_embedding_model(model: object) -> TypeIs[VllmModelForEmbedding]: +def is_pooling_model(model: object) -> TypeIs[VllmModelForPooling]: ... -def is_embedding_model( +def is_pooling_model( model: Union[Type[object], object], -) -> Union[TypeIs[Type[VllmModelForEmbedding]], TypeIs[VllmModelForEmbedding]]: +) -> Union[TypeIs[Type[VllmModelForPooling]], TypeIs[VllmModelForPooling]]: if not is_vllm_model(model): return False if isinstance(model, type): - return isinstance(model, VllmModelForEmbedding) + return isinstance(model, VllmModelForPooling) - return isinstance(model, VllmModelForEmbedding) + return isinstance(model, VllmModelForPooling) diff --git a/vllm/model_executor/models/intern_vit.py b/vllm/model_executor/models/intern_vit.py index c4346fcb3bd2a..7ff68bd60e8ad 100644 --- a/vllm/model_executor/models/intern_vit.py +++ b/vllm/model_executor/models/intern_vit.py @@ -12,7 +12,7 @@ import torch.nn.functional as F from transformers import PretrainedConfig -from vllm.attention.selector import _Backend +from vllm.attention.layer import MultiHeadAttention from vllm.distributed import (divide, get_tensor_model_parallel_rank, get_tensor_model_parallel_world_size, split_tensor_along_last_dim, @@ -25,8 +25,6 @@ from vllm.model_executor.layers.quantization import QuantizationConfig from vllm.model_executor.model_loader.weight_utils import default_weight_loader -from .utils import get_vit_attn_backend - NORM2FN = { 'rms_norm': RMSNorm, 'layer_norm': nn.LayerNorm, @@ -183,10 +181,8 @@ def __init__( prefix=f"{prefix}.proj", ) - self.attn_backend = get_vit_attn_backend(support_fa=False) - if self.attn_backend not in {_Backend.TORCH_SDPA, _Backend.XFORMERS}: - raise RuntimeError( - f"InternViT does not support {self.attn_backend} backend now.") + self.attn = MultiHeadAttention(self.num_heads_per_partition, + self.head_dim, self.scale) def _apply_qk_norm(self, q: torch.Tensor, k: torch.Tensor): if self.tp_size > 1: @@ -209,23 +205,7 @@ def forward(self, x: torch.Tensor) -> torch.Tensor: if self.qk_normalization: q, k = self._apply_qk_norm(q, k) - q = q.view(B, N, self.num_heads_per_partition, self.head_dim) - k = k.view(B, N, self.num_heads_per_partition, self.head_dim) - v = v.view(B, N, self.num_heads_per_partition, self.head_dim) - - if self.attn_backend == _Backend.XFORMERS: - from xformers import ops as xops - - out = xops.memory_efficient_attention_forward(q, - k, - v, - scale=self.scale) - elif self.attn_backend == _Backend.TORCH_SDPA: - q, k, v = (x.transpose(1, 2) for x in (q, k, v)) - out = F.scaled_dot_product_attention(q, k, v, scale=self.scale) - out = out.transpose(1, 2) - - out = out.view(B, N, -1) + out = self.attn(q, k, v) out, _ = self.proj(out) return out diff --git a/vllm/model_executor/models/internlm2.py b/vllm/model_executor/models/internlm2.py index 906128940ff76..41b9f110d771f 100644 --- a/vllm/model_executor/models/internlm2.py +++ b/vllm/model_executor/models/internlm2.py @@ -27,7 +27,7 @@ from vllm.model_executor.sampling_metadata import SamplingMetadata from vllm.sequence import IntermediateTensors -from .interfaces import SupportsPP +from .interfaces import SupportsLoRA, SupportsPP from .utils import (is_pp_missing_parameter, make_empty_intermediate_tensors_factory, make_layers, maybe_prefix) @@ -319,7 +319,21 @@ def forward( return hidden_states -class InternLM2ForCausalLM(nn.Module, SupportsPP): +class InternLM2ForCausalLM(nn.Module, SupportsPP, SupportsLoRA): + packed_modules_mapping = { + "wqkv": ["wqkv"], + "gate_up_proj": ["w1", "w3"], + } + + # LoRA specific attributes + supported_lora_modules = [ + "wqkv", + "wo", + "gate_up_proj", + "w2", + ] + embedding_modules = {} + embedding_padding_modules = [] def __init__(self, *, @@ -329,8 +343,12 @@ def __init__(self, super().__init__() config = vllm_config.model_config.hf_config quant_config = vllm_config.quant_config + lora_config = vllm_config.lora_config + self.config = config self.quant_config = quant_config + self.lora_config = lora_config + self.model = model_type(vllm_config=vllm_config, prefix=maybe_prefix(prefix, "model")) self.output = ParallelLMHead(config.vocab_size, diff --git a/vllm/model_executor/models/internvl.py b/vllm/model_executor/models/internvl.py index b1c0065afbf30..f4b7e4478c164 100644 --- a/vllm/model_executor/models/internvl.py +++ b/vllm/model_executor/models/internvl.py @@ -26,7 +26,7 @@ InternVisionPatchModel) from vllm.model_executor.sampling_metadata import SamplingMetadata from vllm.multimodal import MULTIMODAL_REGISTRY, MultiModalKwargs -from vllm.multimodal.inputs import NestedTensors +from vllm.multimodal.inputs import NestedTensors, PlaceholderRange from vllm.multimodal.utils import cached_get_tokenizer from vllm.sequence import IntermediateTensors from vllm.utils import is_list_of @@ -52,12 +52,18 @@ class InternVLImagePixelInputs(TypedDict): Shape: `(batch_size * num_images * (1 + num_patches), num_channels, height, width)` """ + patches_per_image: List[int] + """ + List of number of total patches for each image in the batch. + """ class InternVLImageEmbeddingInputs(TypedDict): type: Literal["image_embeds"] - data: torch.Tensor - """Shape: `(batch_size * num_images, image_feature_size, hidden_size)` + data: NestedTensors + """ + A tensor of shape `(num_images, total_image_feature_size, hidden_size)` + or a list of tensors of shape `(total_image_feature_size, hidden_size)` `hidden_size` must match the hidden size of language model backbone. """ @@ -349,10 +355,32 @@ def input_processor( new_prompt = self._expand_image_prompt(prompt, image_feature_sizes, num_patches) new_prompt_token_ids = tokenizer.encode(new_prompt) + img_context_token_id = tokenizer.encode(self.img_context_token, + add_special_tokens=False) + assert len(img_context_token_id) == 1, \ + (f"Invalid image token '{self.img_context_token}': A valid image " + f"token encodes to a single token ID, got {img_context_token_id}.") + img_context_token_id = img_context_token_id[0] + + # Get precise tracking of placeholder positions + token_idx = image_idx = 0 + placeholder_ranges = [] + while token_idx < len(new_prompt_token_ids): + if new_prompt_token_ids[token_idx] == img_context_token_id: + curr_image_featue_size = image_feature_sizes[image_idx] + placeholder_ranges.append( + PlaceholderRange(offset=token_idx, + length=curr_image_featue_size)) + image_idx += 1 + token_idx += curr_image_featue_size + else: + token_idx += 1 - return token_inputs(prompt=prompt, - prompt_token_ids=new_prompt_token_ids, - multi_modal_data=multi_modal_data) + return token_inputs( + prompt=prompt, + prompt_token_ids=new_prompt_token_ids, + multi_modal_data=multi_modal_data, + multi_modal_placeholders={"image": placeholder_ranges}) def input_mapper( self, @@ -474,13 +502,15 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = "") -> None: ) self.language_model = init_vllm_registered_model( - config.text_config, vllm_config=vllm_config, - prefix=maybe_prefix(prefix, "language_model")) + hf_config=config.text_config, + prefix=maybe_prefix(prefix, "language_model"), + ) self.mlp1 = self._init_mlp1(config) self.img_context_token_id = None + self.visual_token_mask = None self.make_empty_intermediate_tensors = ( self.language_model.make_empty_intermediate_tensors) @@ -612,35 +642,57 @@ def _parse_and_validate_image_input( if not isinstance(pixel_values, (torch.Tensor, list)): raise ValueError("Incorrect type of pixel values. " f"Got type: {type(pixel_values)}") + + patches_per_image = [] + for request_pixel_values in pixel_values: + for image_pixel_values in request_pixel_values: + patches_per_image.append(image_pixel_values.shape[0]) # We need to flatten (B, N, P) to (B*N*P), # so we call flatten_bn twice. return InternVLImagePixelInputs( type="pixel_values", data=self._validate_pixel_values( flatten_bn(flatten_bn(pixel_values), concat=True)), - ) + patches_per_image=patches_per_image) raise AssertionError("This line should be unreachable.") def _process_image_input( self, image_input: InternVLImageInputs, - ) -> torch.Tensor: + ) -> Tuple[torch.Tensor]: if image_input["type"] == "image_embeds": return image_input["data"] assert self.vision_model is not None + image_embeds = self.extract_feature(image_input["data"]) + patches_per_image = image_input["patches_per_image"] + + # Only one image in the current batch + if len(patches_per_image) == 1: + image_embeds = image_embeds.view( + -1, self.config.text_config.hidden_size).unsqueeze(0) + return image_embeds + + # NOTE: Image embeddings are split into separate tensors for each image + # by the size of each embedding. + feature_size = image_embeds.shape[1] + image_embeds = image_embeds.view(-1, + self.config.text_config.hidden_size) + image_feature_sizes = [ + num_patches * feature_size for num_patches in patches_per_image + ] + image_embeds = image_embeds.split(image_feature_sizes) return image_embeds - def _get_visual_token_mask(self, input_ids: torch.Tensor) -> torch.Tensor: + def _set_visual_token_mask(self, input_ids: torch.Tensor) -> torch.Tensor: if self.is_mono: - visual_token_mask = ( + self.visual_token_mask = ( input_ids == self.img_context_token_id).reshape(-1, 1) else: - visual_token_mask = None - return visual_token_mask + self.visual_token_mask = None def get_multimodal_embeddings(self, **kwargs) -> Optional[NestedTensors]: image_input = self._parse_and_validate_image_input(**kwargs) @@ -657,6 +709,7 @@ def get_input_embeddings( inputs_embeds = self.language_model.get_input_embeddings(input_ids) if multimodal_embeddings is not None: assert self.img_context_token_id is not None + self._set_visual_token_mask(input_ids) inputs_embeds = merge_multimodal_embeddings( input_ids, inputs_embeds, multimodal_embeddings, self.img_context_token_id) @@ -673,7 +726,6 @@ def forward( **kwargs: object, ) -> Union[SamplerOutput, IntermediateTensors]: - visual_token_mask = None if intermediate_tensors is not None: input_ids = None inputs_embeds = None @@ -694,15 +746,12 @@ def forward( "intermediate_tensors": intermediate_tensors, "inputs_embeds": inputs_embeds, } - if self.img_context_token_id is not None: - visual_token_mask = self._get_visual_token_mask(input_ids) - # We always overwrite it back to None after computing visual token - # mask so that this doesn't need to depend on encoder output - self.img_context_token_id = None - - if self.is_mono: - forward_kwargs.update({"visual_token_mask": visual_token_mask}) + # Only required if the model is mono-architecture + if self.visual_token_mask is not None: + forward_kwargs.update( + {"visual_token_mask": self.visual_token_mask}) + self.visual_token_mask = None hidden_states = self.language_model.model(**forward_kwargs) return hidden_states diff --git a/vllm/model_executor/models/jamba.py b/vllm/model_executor/models/jamba.py index 099ca7e12b288..831db2ae52d74 100644 --- a/vllm/model_executor/models/jamba.py +++ b/vllm/model_executor/models/jamba.py @@ -9,6 +9,7 @@ from vllm.attention.layer import Attention from vllm.config import CacheConfig, VllmConfig from vllm.distributed import get_tensor_model_parallel_world_size +from vllm.distributed.parallel_state import get_pp_group from vllm.model_executor.layers.fused_moe import FusedMoE from vllm.model_executor.layers.layernorm import RMSNorm from vllm.model_executor.layers.linear import (QKVParallelLinear, @@ -25,11 +26,12 @@ MambaCacheParams) from vllm.model_executor.sampling_metadata import SamplingMetadata from vllm.sequence import IntermediateTensors -from vllm.worker.model_runner import (_BATCH_SIZES_TO_CAPTURE, - _get_graph_batch_size) +from vllm.utils import LayerBlockType -from .interfaces import HasInnerState, SupportsLoRA -from .utils import maybe_prefix +from .interfaces import HasInnerState, IsHybrid, SupportsLoRA, SupportsPP +from .utils import (is_pp_missing_parameter, + make_empty_intermediate_tensors_factory, make_layers, + maybe_prefix) KVCache = Tuple[torch.Tensor, torch.Tensor] @@ -283,16 +285,24 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): org_num_embeddings=config.vocab_size, ) - decoder_layers = [] - for i in range(config.num_hidden_layers): - layer_class = ALL_DECODER_LAYER_TYPES[config.layers_block_type[i]] - decoder_layers.append( - layer_class(config, - layer_idx=i, - cache_config=cache_config, - quant_config=quant_config, - prefix=f"{prefix}.layers.{i}")) - self.layers = nn.ModuleList(decoder_layers) + def get_layer(prefix: str): + layer_idx = int(prefix.rsplit(".", 1)[1]) + layer_class = ALL_DECODER_LAYER_TYPES[ + config.layers_block_type[layer_idx]] + return layer_class( + config, + layer_idx, + cache_config, + quant_config=quant_config, + prefix=prefix, + ) + + self.start_layer, self.end_layer, self.layers = make_layers( + config.num_hidden_layers, get_layer, prefix=f"{prefix}.layers") + self.make_empty_intermediate_tensors = ( + make_empty_intermediate_tensors_factory( + ["hidden_states", "residual"], config.hidden_size)) + self.final_layernorm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps) @@ -306,26 +316,34 @@ def forward( kv_caches: List[torch.Tensor], attn_metadata: AttentionMetadata, mamba_cache_params: MambaCacheParams, + intermediate_tensors: Optional[IntermediateTensors] = None, inputs_embeds: Optional[torch.Tensor] = None, ) -> torch.Tensor: - if inputs_embeds is not None: - hidden_states = inputs_embeds + if get_pp_group().is_first_rank: + if inputs_embeds is not None: + hidden_states = inputs_embeds + else: + hidden_states = self.get_input_embeddings(input_ids) + residual = None else: - hidden_states = self.get_input_embeddings(input_ids) - residual = None - for i in range(len(self.layers)): + assert intermediate_tensors is not None + hidden_states = intermediate_tensors["hidden_states"] + residual = intermediate_tensors["residual"] + + kv_cache_index = 0 + mamba_cache_index = 0 + for i in range(self.start_layer, self.end_layer): layer = self.layers[i] kv_cache = None layer_mamba_cache_params = None if isinstance(layer, JambaAttentionDecoderLayer): - kv_cache = kv_caches[(i - self.config.attn_layer_offset) // - self.config.attn_layer_period] + kv_cache = kv_caches[kv_cache_index] + kv_cache_index += 1 if isinstance(layer, JambaMambaDecoderLayer): - current_state_layer = i - (1 + - (i - self.config.attn_layer_offset) - // self.config.attn_layer_period) + current_state_layer = mamba_cache_index layer_mamba_cache_params = mamba_cache_params.at_layer_idx( current_state_layer) + mamba_cache_index += 1 hidden_states, residual = layer( positions=positions, @@ -334,11 +352,17 @@ def forward( attn_metadata=attn_metadata, residual=residual, mamba_cache_params=layer_mamba_cache_params) + if not get_pp_group().is_last_rank: + return IntermediateTensors({ + "hidden_states": hidden_states, + "residual": residual + }) hidden_states, _ = self.final_layernorm(hidden_states, residual) return hidden_states -class JambaForCausalLM(nn.Module, HasInnerState, SupportsLoRA): +class JambaForCausalLM(nn.Module, HasInnerState, SupportsLoRA, SupportsPP, + IsHybrid): packed_modules_mapping = { "qkv_proj": [ "q_proj", @@ -370,6 +394,8 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): super().__init__() self.config = config + self.vllm_config = vllm_config + self.model_config = vllm_config.model_config self.scheduler_config = scheduler_config self.model = JambaModel(vllm_config=vllm_config, prefix=maybe_prefix(prefix, "model")) @@ -392,6 +418,20 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): config.vocab_size) self.sampler = get_sampler() + self.make_empty_intermediate_tensors = ( + self.model.make_empty_intermediate_tensors) + if self.scheduler_config is not None and \ + not self.model_config.enforce_eager: + if self.scheduler_config.max_num_seqs > \ + vllm_config.compilation_config.max_capture_size: + self.max_batch_size = \ + vllm_config.compilation_config.max_capture_size + else: + self.max_batch_size = vllm_config.pad_for_cudagraph( + self.scheduler_config.max_num_seqs) + else: + self.max_batch_size = 8192 + 2 + def get_input_embeddings(self, input_ids: torch.Tensor) -> torch.Tensor: return self.model.get_input_embeddings(input_ids) @@ -404,17 +444,12 @@ def forward(self, inputs_embeds: Optional[torch.Tensor] = None, **kwargs): if self.mamba_cache is None: - max_batch_size = (_get_graph_batch_size( - self.scheduler_config.max_num_seqs) if self.scheduler_config - else max(_BATCH_SIZES_TO_CAPTURE) + 2) - - layers_type = self.config.layers_block_type - num_mamba_layers = sum( - [layer_type == "mamba" for layer_type in layers_type]) + num_mamba_layers = self.model_config.get_num_layers_by_block_type( + self.vllm_config.parallel_config, LayerBlockType.mamba) self.mamba_cache = MambaCacheManager( - self.lm_head.weight.dtype, num_mamba_layers, max_batch_size, - *self._get_mamba_cache_shape()) + self.lm_head.weight.dtype, num_mamba_layers, + self.max_batch_size, *self._get_mamba_cache_shape()) ( mamba_cache_tensors, state_indices_tensor, @@ -425,7 +460,7 @@ def forward(self, state_indices_tensor) hidden_states = self.model(input_ids, positions, kv_caches, attn_metadata, mamba_cache_params, - inputs_embeds) + intermediate_tensors, inputs_embeds) return hidden_states def copy_inputs_before_cuda_graphs(self, input_buffers, **kwargs): @@ -506,8 +541,12 @@ def load_weights(self, weights: Iterable[Tuple[str, continue name = name.replace(weight_name, param_name) # Skip loading extra bias for GPTQ models. + if name.endswith(".bias") and name not in params_dict: continue + # Skip layers on other devices. + if is_pp_missing_parameter(name, self): + continue param = params_dict[name] weight_loader = param.weight_loader weight_loader(param, loaded_weight, shard_id) @@ -522,6 +561,8 @@ def load_weights(self, weights: Iterable[Tuple[str, if weight_name not in name: continue + if is_pp_missing_parameter(name, self): + continue name = name.replace(weight_name, param_name) param = params_dict[name] weight_loader = param.weight_loader @@ -535,6 +576,8 @@ def load_weights(self, weights: Iterable[Tuple[str, # Skip loading extra bias for GPTQ models. if name.endswith(".bias") and name not in params_dict: continue + if is_pp_missing_parameter(name, self): + continue param = params_dict[name] weight_loader = getattr(param, "weight_loader", diff --git a/vllm/model_executor/models/llama.py b/vllm/model_executor/models/llama.py index fffb3fe53b94c..2902e6999c2fd 100644 --- a/vllm/model_executor/models/llama.py +++ b/vllm/model_executor/models/llama.py @@ -37,7 +37,6 @@ QKVParallelLinear, RowParallelLinear) from vllm.model_executor.layers.logits_processor import LogitsProcessor -from vllm.model_executor.layers.pooler import Pooler, PoolingType from vllm.model_executor.layers.quantization import QuantizationConfig from vllm.model_executor.layers.quantization.compressed_tensors.utils import ( get_compressed_tensors_cache_scale) @@ -47,13 +46,12 @@ DEFAULT_VOCAB_PADDING_SIZE, ParallelLMHead, VocabParallelEmbedding) from vllm.model_executor.model_loader.weight_utils import ( default_weight_loader, kv_cache_scales_loader, maybe_remap_kv_scale_name) -from vllm.model_executor.pooling_metadata import PoolingMetadata from vllm.model_executor.sampling_metadata import SamplingMetadata from vllm.platforms import current_platform -from vllm.sequence import IntermediateTensors, PoolerOutput +from vllm.sequence import IntermediateTensors from .interfaces import SupportsLoRA, SupportsPP -from .utils import (AutoWeightsLoader, PPMissingLayer, WeightsMapper, +from .utils import (AutoWeightsLoader, PPMissingLayer, extract_layer_index, is_pp_missing_parameter, make_empty_intermediate_tensors_factory, make_layers, maybe_prefix) @@ -114,6 +112,7 @@ def __init__( prefix: str = "", ) -> None: super().__init__() + layer_idx = extract_layer_index(prefix) self.hidden_size = hidden_size tp_size = get_tensor_model_parallel_world_size() self.total_num_heads = num_heads @@ -168,6 +167,20 @@ def __init__( rope_scaling=rope_scaling, is_neox_style=is_neox_style, ) + + if hasattr(config, "interleaved_sliding_window"): + interleaved_sliding_window = config.interleaved_sliding_window + if isinstance(interleaved_sliding_window, int): + sliding_window = interleaved_sliding_window + elif isinstance(interleaved_sliding_window, list): + sw_idx = layer_idx % len(interleaved_sliding_window) + sliding_window = interleaved_sliding_window[sw_idx] + else: + raise ValueError( + f"{type(interleaved_sliding_window)} is not supported.") + else: + sliding_window = None + self.attn = Attention( self.num_heads, self.head_dim, @@ -175,6 +188,7 @@ def __init__( num_kv_heads=self.num_kv_heads, cache_config=cache_config, quant_config=quant_config, + per_layer_sliding_window=sliding_window, prefix=f"{prefix}.attn", ) @@ -497,11 +511,12 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): config = vllm_config.model_config.hf_config quant_config = vllm_config.quant_config lora_config = vllm_config.lora_config - pooler_config = vllm_config.model_config.pooler_config self.config = config self.lora_config = lora_config - self.model = self._init_model(vllm_config=vllm_config, prefix=prefix) + self.model = self._init_model(vllm_config=vllm_config, + prefix=maybe_prefix(prefix, "model")) + if get_pp_group().is_last_rank: self.unpadded_vocab_size = config.vocab_size if lora_config: @@ -527,16 +542,13 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): self.logits_processor = LogitsProcessor(self.unpadded_vocab_size, config.vocab_size, logit_scale) - self.sampler = get_sampler() else: self.lm_head = PPMissingLayer() + + self.sampler = get_sampler() + self.make_empty_intermediate_tensors = ( self.model.make_empty_intermediate_tensors) - self._pooler = Pooler.from_config_with_defaults( - pooler_config, - pooling_type=PoolingType.STEP, - normalize=False, - softmax=False) def _init_model(self, vllm_config: VllmConfig, prefix: str = ""): return LlamaModel(vllm_config=vllm_config, prefix=prefix) @@ -567,14 +579,6 @@ def compute_logits( sampling_metadata) return logits - def pooler( - self, - hidden_states: torch.Tensor, - pooling_metadata: PoolingMetadata, - ) -> Optional[PoolerOutput]: - logits = self.compute_logits(hidden_states, None) - return self._pooler(logits, pooling_metadata) - def sample(self, logits: torch.Tensor, sampling_metadata: SamplingMetadata) -> Optional[SamplerOutput]: next_tokens = self.sampler(logits, sampling_metadata) @@ -625,76 +629,3 @@ def permute(w: torch.Tensor, n_heads: int): name = name.replace(item, mapping[item]) return name, loaded_weight - - -class LlamaEmbeddingModel(nn.Module, SupportsLoRA, SupportsPP): - """ - A model that uses Llama with additional embedding functionalities. - - This class encapsulates the LlamaModel and provides an interface for - embedding operations and customized pooling functions. - - Attributes: - model: An instance of LlamaModel used for forward operations. - _pooler: An instance of Pooler used for pooling operations. - """ - packed_modules_mapping = { - "qkv_proj": ["q_proj", "k_proj", "v_proj"], - "gate_up_proj": ["gate_proj", "up_proj"] - } - - # LoRA specific attributes - supported_lora_modules = [ - "qkv_proj", "o_proj", "gate_up_proj", "down_proj", "embed_tokens" - ] - embedding_modules = { - "embed_tokens": "input_embeddings", - } - embedding_padding_modules = [] - - def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): - super().__init__() - - pooler_config = vllm_config.model_config.pooler_config - - self.model = LlamaModel(vllm_config=vllm_config, - prefix=maybe_prefix(prefix, "model")) - self._pooler = Pooler.from_config_with_defaults( - pooler_config, - pooling_type=PoolingType.LAST, - normalize=True, - softmax=False) - self.make_empty_intermediate_tensors = ( - self.model.make_empty_intermediate_tensors) - - def forward( - self, - input_ids: Optional[torch.Tensor], - positions: torch.Tensor, - kv_caches: List[torch.Tensor], - attn_metadata: AttentionMetadata, - intermediate_tensors: Optional[IntermediateTensors] = None, - inputs_embeds: Optional[torch.Tensor] = None, - ) -> Union[torch.Tensor, IntermediateTensors]: - return self.model(input_ids, positions, kv_caches, attn_metadata, - intermediate_tensors, inputs_embeds) - - def pooler( - self, - hidden_states: torch.Tensor, - pooling_metadata: PoolingMetadata, - ) -> Optional[PoolerOutput]: - return self._pooler(hidden_states, pooling_metadata) - - def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): - hf_to_vllm_mapper = WeightsMapper(orig_to_new_prefix={"model.": ""}) - weights = hf_to_vllm_mapper.apply(weights) - self.model.load_weights(weights) - - def load_kv_cache_scales(self, quantization_param_path: str) -> None: - self.model.load_kv_cache_scales(quantization_param_path) - - # LRUCacheWorkerLoRAManager instantiation requires model config. - @property - def config(self): - return self.model.config diff --git a/vllm/model_executor/models/llava.py b/vllm/model_executor/models/llava.py index e7757b3c7d405..a2e404cf43238 100644 --- a/vllm/model_executor/models/llava.py +++ b/vllm/model_executor/models/llava.py @@ -1,37 +1,40 @@ from functools import cached_property +from types import MethodType from typing import (Iterable, List, Literal, Mapping, Optional, Protocol, Set, Tuple, TypedDict, Union) import torch import torch.nn as nn -from PIL import Image -from transformers import (CLIPVisionConfig, LlavaConfig, PixtralVisionConfig, - PretrainedConfig, SiglipVisionConfig) +from transformers import (BatchFeature, CLIPVisionConfig, LlavaConfig, + PixtralVisionConfig, PretrainedConfig, + ProcessorMixin, SiglipVisionConfig) +from transformers.models.llava import LlavaProcessor +from transformers.models.pixtral import PixtralProcessor from vllm.attention import AttentionMetadata from vllm.config import VllmConfig -from vllm.inputs import (INPUT_REGISTRY, DecoderOnlyInputs, DummyData, - InputContext) +from vllm.inputs import InputContext from vllm.model_executor.layers.activation import get_act_fn +from vllm.model_executor.layers.linear import (ColumnParallelLinear, + RowParallelLinear) from vllm.model_executor.layers.quantization import QuantizationConfig from vllm.model_executor.layers.sampler import SamplerOutput, get_sampler from vllm.model_executor.sampling_metadata import SamplingMetadata from vllm.multimodal import MULTIMODAL_REGISTRY from vllm.multimodal.inputs import NestedTensors +from vllm.multimodal.processing import (BaseMultiModalProcessor, + MultiModalDataItems, ProcessorInputs, + PromptReplacement) from vllm.sequence import IntermediateTensors -from vllm.utils import is_list_of from .clip import (CLIPVisionModel, dummy_image_for_clip, - dummy_seq_data_for_clip, get_max_clip_image_tokens, - input_processor_for_clip) + get_max_clip_image_tokens) from .interfaces import SupportsMultiModal, SupportsPP from .pixtral import (PixtralHFVisionModel, dummy_image_for_pixtral_hf, - dummy_seq_data_for_pixtral_hf, get_max_pixtral_hf_image_tokens, - input_processor_for_pixtral_hf) + get_pixtral_hf_image_feature_size) from .siglip import (SiglipVisionModel, dummy_image_for_siglip, - dummy_seq_data_for_siglip, get_max_siglip_image_tokens, - input_processor_for_siglip) + get_max_siglip_image_tokens) from .utils import (AutoWeightsLoader, flatten_bn, init_vllm_registered_model, maybe_prefix, merge_multimodal_embeddings) @@ -59,25 +62,32 @@ class LlavaImageEmbeddingInputs(TypedDict): LlavaImageInputs = Union[LlavaImagePixelInputs, LlavaImageEmbeddingInputs] -# TODO(xwjiang): Run benchmark and decide if TP. class LlavaMultiModalProjector(nn.Module): - def __init__(self, vision_hidden_size: int, text_hidden_size: int, - projector_hidden_act: str): + def __init__(self, + vision_hidden_size: int, + text_hidden_size: int, + projector_hidden_act: str, + quant_config: Optional[QuantizationConfig] = None, + prefix: str = ""): super().__init__() - self.linear_1 = nn.Linear(vision_hidden_size, - text_hidden_size, - bias=True) + self.linear_1 = ColumnParallelLinear(vision_hidden_size, + text_hidden_size, + bias=True, + quant_config=quant_config, + prefix=f"{prefix}.linear_1") self.act = get_act_fn(projector_hidden_act) - self.linear_2 = nn.Linear(text_hidden_size, - text_hidden_size, - bias=True) + self.linear_2 = RowParallelLinear(text_hidden_size, + text_hidden_size, + bias=True, + quant_config=quant_config, + prefix=f"{prefix}.linear_2") def forward(self, image_features: torch.Tensor) -> torch.Tensor: - hidden_states = self.linear_1(image_features) + hidden_states, _ = self.linear_1(image_features) hidden_states = self.act(hidden_states) - hidden_states = self.linear_2(hidden_states) + hidden_states, _ = self.linear_2(hidden_states) return hidden_states @@ -104,102 +114,112 @@ def get_max_llava_image_tokens(ctx: InputContext): raise ValueError(f"Unexpected select feature strategy: {strategy}") -def dummy_data_for_llava(ctx: InputContext, seq_len: int, - mm_counts: Mapping[str, int]): - hf_config = ctx.get_hf_config(LlavaConfig) - vision_config = hf_config.vision_config - num_images = mm_counts["image"] +class LlavaMultiModalProcessor(BaseMultiModalProcessor): - image_feature_size = get_max_llava_image_tokens(ctx) + def _patch_pixtral_processor(self, hf_processor: PixtralProcessor): + if getattr(hf_processor, "__is_patched__", False): + return # Already patched - if isinstance(vision_config, CLIPVisionConfig): - seq_data, ranges = dummy_seq_data_for_clip( - vision_config, - seq_len, - num_images, - image_token_id=hf_config.image_token_index, - image_feature_size_override=image_feature_size, - ) + image_processor = hf_processor.image_processor # type: ignore + orig_preprocess = image_processor.preprocess - mm_data = dummy_image_for_clip(vision_config, num_images) - return DummyData(seq_data, mm_data, ranges) - elif isinstance(vision_config, SiglipVisionConfig): - seq_data, ranges = dummy_seq_data_for_siglip( - vision_config, - seq_len, - num_images, - image_token_id=hf_config.image_token_index, - image_feature_size_override=image_feature_size, - ) + def preprocess(__self, *args, **kwargs): + hf_inputs = orig_preprocess(*args, **kwargs) + hf_inputs["is_pixtral"] = torch.tensor(True) + return hf_inputs - mm_data = dummy_image_for_siglip(vision_config, num_images) - return DummyData(seq_data, mm_data, ranges) - elif isinstance(vision_config, PixtralVisionConfig): - seq_data, ranges = dummy_seq_data_for_pixtral_hf( - vision_config, - seq_len, - num_images, - image_token_id=hf_config.image_token_index, - image_feature_size_override=image_feature_size, - ) + image_processor.preprocess = MethodType(preprocess, image_processor) - mm_data = dummy_image_for_pixtral_hf(vision_config, num_images) - return DummyData(seq_data, mm_data, ranges) + hf_processor.__is_patched__ = True # type: ignore - msg = f"Unsupported vision config: {type(vision_config)}" - raise NotImplementedError(msg) + def _get_hf_processor(self) -> Union[LlavaProcessor, PixtralProcessor]: + hf_processor = self.ctx.get_hf_processor() + assert isinstance(hf_processor, (LlavaProcessor, PixtralProcessor)) + if isinstance(hf_processor, PixtralProcessor): + self._patch_pixtral_processor(hf_processor) -def input_processor_for_llava(ctx: InputContext, inputs: DecoderOnlyInputs): - multi_modal_data = inputs.get("multi_modal_data") - if multi_modal_data is None or "image" not in multi_modal_data: - return inputs + return hf_processor - model_config = ctx.model_config - hf_config = ctx.get_hf_config(LlavaConfig) - vision_config = hf_config.vision_config + def _get_prompt_replacements( + self, + mm_items: MultiModalDataItems, + hf_inputs: BatchFeature, + mm_processor_kwargs: Mapping[str, object], + ) -> list[PromptReplacement]: + hf_config = self.ctx.get_hf_config(LlavaConfig) + image_token_id = hf_config.image_token_index + + processor = self._get_hf_processor() + if isinstance(processor, PixtralProcessor): + image_token = processor.image_token + image_break_token = processor.image_break_token + image_end_token = processor.image_end_token + + vision_config = hf_config.vision_config + assert isinstance(vision_config, PixtralVisionConfig) + + def get_replacement_pixtral(item_idx: int): + image_size = mm_items.get_image_size(item_idx) + ( + num_width_tokens, + num_height_tokens, + ) = get_pixtral_hf_image_feature_size( + vision_config, + image_width=image_size.width, + image_height=image_size.height, + ) - image_data = multi_modal_data["image"] - if isinstance(image_data, Image.Image): - image_feature_size = get_max_llava_image_tokens(ctx) - elif is_list_of(image_data, Image.Image): - image_feature_size = [get_max_llava_image_tokens(ctx) - ] * len(image_data) - elif isinstance(image_data, torch.Tensor): - num_images, image_feature_size, hidden_size = image_data.shape - elif is_list_of(image_data, torch.Tensor): - image_feature_size = [item.shape[1] for item in image_data] - else: - raise TypeError(f"Invalid image type: {type(image_data)}") + tokens = ([image_token] * num_width_tokens + + [image_break_token]) * num_height_tokens + tokens[-1] = image_end_token - if isinstance(vision_config, CLIPVisionConfig): - return input_processor_for_clip( - model_config, - vision_config, - inputs, - image_token_id=hf_config.image_token_index, - image_feature_size_override=image_feature_size, - ) - elif isinstance(vision_config, SiglipVisionConfig): - return input_processor_for_siglip( - model_config, - vision_config, - inputs, - image_token_id=hf_config.image_token_index, - image_feature_size_override=image_feature_size, - ) - elif isinstance(vision_config, PixtralVisionConfig): - # We ignore image_feature_size_override since we have non-uniform - # image sizes for Pixtral - return input_processor_for_pixtral_hf( - model_config, - vision_config, - inputs, - image_token_id=hf_config.image_token_index, - ) + return "".join(tokens) - msg = f"Unsupported vision config: {type(vision_config)}" - raise NotImplementedError(msg) + return [ + PromptReplacement( + modality="image", + target=[image_token_id], + replacement=get_replacement_pixtral, + ), + ] + + max_image_tokens = get_max_llava_image_tokens(self.ctx) + + return [ + PromptReplacement( + modality="image", + target=[image_token_id], + replacement=[image_token_id] * max_image_tokens, + ) + ] + + def _get_dummy_mm_inputs( + self, + mm_counts: Mapping[str, int], + ) -> ProcessorInputs: + hf_config = self.ctx.get_hf_config(LlavaConfig) + vision_config = hf_config.vision_config + num_images = mm_counts["image"] + + if isinstance(vision_config, CLIPVisionConfig): + data = dummy_image_for_clip(vision_config, num_images) + elif isinstance(vision_config, SiglipVisionConfig): + data = dummy_image_for_siglip(vision_config, num_images) + elif isinstance(vision_config, PixtralVisionConfig): + data = dummy_image_for_pixtral_hf(vision_config, num_images) + else: + msg = f"Unsupported vision config: {type(vision_config)}" + raise NotImplementedError(msg) + + hf_processor = self._get_hf_processor() + image_token = hf_processor.image_token + + return ProcessorInputs( + prompt_text=image_token * num_images, + mm_data=data, + mm_processor_kwargs={}, + ) class LlavaLikeConfig(Protocol): @@ -282,11 +302,18 @@ def init_vision_tower_for_llava( raise NotImplementedError(msg) -@MULTIMODAL_REGISTRY.register_image_input_mapper() @MULTIMODAL_REGISTRY.register_max_image_tokens(get_max_llava_image_tokens) -@INPUT_REGISTRY.register_dummy_data(dummy_data_for_llava) -@INPUT_REGISTRY.register_input_processor(input_processor_for_llava) +@MULTIMODAL_REGISTRY.register_processor(LlavaMultiModalProcessor) class LlavaForConditionalGeneration(nn.Module, SupportsMultiModal, SupportsPP): + # BitandBytes specific attributes + bitsandbytes_stacked_params_mapping = { + # shard_name, weight_name, index + "q_proj": ("qkv_proj", 0), + "k_proj": ("qkv_proj", 1), + "v_proj": ("qkv_proj", 2), + "gate_proj": ("gate_up_proj", 0), + "up_proj": ("gate_up_proj", 1), + } def __init__(self, *, vllm_config: VllmConfig, prefix: str = "") -> None: super().__init__() @@ -316,12 +343,15 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = "") -> None: self.multi_modal_projector = LlavaMultiModalProjector( vision_hidden_size=config.vision_config.hidden_size, text_hidden_size=config.text_config.hidden_size, - projector_hidden_act=config.projector_hidden_act) + projector_hidden_act=config.projector_hidden_act, + quant_config=quant_config, + prefix=maybe_prefix(prefix, "multi_modal_projector")) self.language_model = init_vllm_registered_model( - config.text_config, vllm_config=vllm_config, - prefix=maybe_prefix(prefix, "language_model")) + hf_config=config.text_config, + prefix=maybe_prefix(prefix, "language_model"), + ) self.make_empty_intermediate_tensors = ( self.language_model.make_empty_intermediate_tensors) @@ -346,38 +376,10 @@ def _validate_pixel_values(self, data: torch.Tensor) -> torch.Tensor: return data - def _validate_image_sizes(self, images: List[torch.Tensor], - sizes: List[torch.Tensor]) -> List[torch.Tensor]: - if not isinstance(sizes, list): - sizes = [sizes] - - total_images = sum(size.numel() // 2 for size in sizes) - if total_images != len(images): - raise ValueError("Mismatch in number of images. " - f"Expected {total_images}, got {len(images)}") - img_idx = 0 - for size in sizes: - # Flatten the size tensor to a list of (height, width) pairs - size = size.view(-1, 2).tolist() - for expected_h, expected_w in size: - if img_idx >= len(images): - raise ValueError("Ran out of images before sizes. " - f"{img_idx} >= {len(images)}") - img = images[img_idx] - if img.shape[-2:] != (expected_h, expected_w): - raise ValueError( - "Image size mismatch. Expected " - f"{(expected_h, expected_w)}, got {img.shape[-2:]}") - if img.shape[-3] != 3: - raise ValueError("Image channel mismatch. Expected 3, " - f"got {img.shape[-3]}") - img_idx += 1 - return images - def _parse_and_validate_image_input( self, **kwargs: object) -> Optional[LlavaImageInputs]: pixel_values = kwargs.pop("pixel_values", None) - image_sizes = kwargs.pop("image_sizes", None) + is_pixtral = kwargs.pop("is_pixtral", torch.tensor([False])) image_embeds = kwargs.pop("image_embeds", None) if pixel_values is None and image_embeds is None: @@ -388,9 +390,8 @@ def _parse_and_validate_image_input( raise ValueError("Incorrect type of pixel values. " f"Got type: {type(pixel_values)}") - # Case for models like PixtralHF that have dynamic image sizes - # so we need to produce a list of tensors - if image_sizes is not None: + assert isinstance(is_pixtral, torch.Tensor) + if is_pixtral.any(): images = pixel_values def flatten_to_3d_tensors(item): @@ -413,7 +414,7 @@ def flatten_to_3d_tensors(item): return LlavaImagePixelInputs( type="pixel_values", - data=self._validate_image_sizes(images, image_sizes), + data=images, ) return LlavaImagePixelInputs( @@ -581,3 +582,28 @@ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]) -> Set[str]: loader = AutoWeightsLoader(self) return loader.load_weights(weights) + + +class MantisMultiModalProcessor(LlavaMultiModalProcessor): + + def _get_hf_processor(self) -> ProcessorMixin: + try: + from mantis.models.mllava import MLlavaProcessor + except ModuleNotFoundError as exc: + raise ModuleNotFoundError( + "You need to `pip install " + "git+https://github.com/TIGER-AI-Lab/Mantis.git` " + "to use this model") from exc + + processor = MLlavaProcessor.from_pretrained( + self.ctx.model_config.tokenizer) + assert isinstance(processor, ProcessorMixin) + return processor + + +# To use this model, please use +# `--hf_overrides '{"architectures": ["MantisForConditionalGeneration"]}'` +@MULTIMODAL_REGISTRY.register_max_image_tokens(get_max_llava_image_tokens) +@MULTIMODAL_REGISTRY.register_processor(MantisMultiModalProcessor) +class MantisForConditionalGeneration(LlavaForConditionalGeneration): + pass diff --git a/vllm/model_executor/models/llava_next.py b/vllm/model_executor/models/llava_next.py index e113f5862830d..a39f2f4124d05 100644 --- a/vllm/model_executor/models/llava_next.py +++ b/vllm/model_executor/models/llava_next.py @@ -14,13 +14,11 @@ from vllm.config import VllmConfig from vllm.inputs import (INPUT_REGISTRY, DecoderOnlyInputs, DummyData, InputContext) -from vllm.model_executor.layers.pooler import Pooler, PoolingType from vllm.model_executor.layers.sampler import SamplerOutput, get_sampler -from vllm.model_executor.pooling_metadata import PoolingMetadata from vllm.model_executor.sampling_metadata import SamplingMetadata from vllm.multimodal import MULTIMODAL_REGISTRY from vllm.multimodal.inputs import NestedTensors -from vllm.sequence import IntermediateTensors, PoolerOutput +from vllm.sequence import IntermediateTensors from vllm.utils import is_list_of from .clip import (CLIPVisionModel, dummy_image_for_clip, @@ -286,7 +284,6 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = "") -> None: super().__init__() config = vllm_config.model_config.hf_config quant_config = vllm_config.quant_config - pooler_config = vllm_config.model_config.pooler_config multimodal_config = vllm_config.model_config.multimodal_config vision_feature_layer = config.vision_feature_layer @@ -321,17 +318,11 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = "") -> None: projector_hidden_act=config.projector_hidden_act) self.language_model = init_vllm_registered_model( - config.text_config, vllm_config=vllm_config, - prefix=maybe_prefix(prefix, "language_model")) - - # The same model class supports both language generation and embedding - # because the architecture name is the same - self._pooler = Pooler.from_config_with_defaults( - pooler_config, - pooling_type=PoolingType.LAST, - normalize=True, - softmax=False) + hf_config=config.text_config, + prefix=maybe_prefix(prefix, "language_model"), + ) + self.make_empty_intermediate_tensors = ( self.language_model.make_empty_intermediate_tensors) @@ -678,13 +669,6 @@ def sample( ) -> Optional[SamplerOutput]: return self.language_model.sample(logits, sampling_metadata) - def pooler( - self, - hidden_states: torch.Tensor, - pooling_metadata: PoolingMetadata, - ) -> Optional[PoolerOutput]: - return self._pooler(hidden_states, pooling_metadata) - def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]) -> Set[str]: loader = AutoWeightsLoader(self) diff --git a/vllm/model_executor/models/llava_next_video.py b/vllm/model_executor/models/llava_next_video.py index b130791808924..0de9d8c5ea572 100644 --- a/vllm/model_executor/models/llava_next_video.py +++ b/vllm/model_executor/models/llava_next_video.py @@ -275,9 +275,10 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = "") -> None: text_hidden_size=config.text_config.hidden_size, projector_hidden_act=config.projector_hidden_act) self.language_model = init_vllm_registered_model( - config.text_config, vllm_config=vllm_config, - prefix=maybe_prefix(prefix, "language_model")) + hf_config=config.text_config, + prefix=maybe_prefix(prefix, "language_model"), + ) self.make_empty_intermediate_tensors = ( self.language_model.model.make_empty_intermediate_tensors) diff --git a/vllm/model_executor/models/llava_onevision.py b/vllm/model_executor/models/llava_onevision.py index 3166737d61582..0bebc1c745e2b 100644 --- a/vllm/model_executor/models/llava_onevision.py +++ b/vllm/model_executor/models/llava_onevision.py @@ -422,9 +422,10 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = "") -> None: prefix=maybe_prefix(prefix, "vision_tower")) self.multi_modal_projector = LlavaOnevisionMultiModalProjector(config) self.language_model = init_vllm_registered_model( - config.text_config, vllm_config=vllm_config, - prefix=maybe_prefix(prefix, "language_model")) + hf_config=config.text_config, + prefix=maybe_prefix(prefix, "language_model"), + ) self.image_newline = nn.Parameter( torch.empty(config.text_config.hidden_size)) diff --git a/vllm/model_executor/models/mamba.py b/vllm/model_executor/models/mamba.py index ac0d265a961f0..06c8d9723cd01 100644 --- a/vllm/model_executor/models/mamba.py +++ b/vllm/model_executor/models/mamba.py @@ -1,5 +1,5 @@ """PyTorch MAMBA model.""" -from typing import Iterable, List, Optional, Tuple +from typing import Iterable, List, Optional, Set, Tuple import torch from torch import nn @@ -8,6 +8,7 @@ from vllm.attention.backends.abstract import AttentionMetadata from vllm.config import CacheConfig, VllmConfig from vllm.distributed import get_tensor_model_parallel_world_size +from vllm.distributed.parallel_state import get_pp_group from vllm.model_executor.layers.layernorm import RMSNorm from vllm.model_executor.layers.logits_processor import LogitsProcessor from vllm.model_executor.layers.mamba.mamba_mixer import MambaMixer @@ -18,15 +19,16 @@ DEFAULT_VOCAB_PADDING_SIZE, ParallelLMHead, VocabParallelEmbedding) from vllm.model_executor.model_loader.weight_utils import default_weight_loader from vllm.model_executor.models.interfaces import (HasInnerState, - IsAttentionFree) + IsAttentionFree, SupportsPP) from vllm.model_executor.models.mamba_cache import (MambaCacheManager, MambaCacheParams) from vllm.model_executor.sampling_metadata import SamplingMetadata from vllm.sequence import IntermediateTensors -from vllm.worker.model_runner import (_BATCH_SIZES_TO_CAPTURE, - _get_graph_batch_size) +from vllm.utils import LayerBlockType -from .utils import maybe_prefix +from .utils import (is_pp_missing_parameter, + make_empty_intermediate_tensors_factory, make_layers, + maybe_prefix) KVCache = Tuple[torch.Tensor, torch.Tensor] @@ -49,6 +51,7 @@ def __init__(self, use_conv_bias=config.use_conv_bias, use_bias=config.use_bias, use_rms_norm=self.is_falcon_mamba, + rms_norm_has_weight=not self.is_falcon_mamba, rms_norm_eps=mixer_rms_eps, activation=config.hidden_act) @@ -96,15 +99,17 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): org_num_embeddings=config.vocab_size, ) - decoder_layers = [] - for i in range(config.num_hidden_layers): - decoder_layers.append( - MambaDecoderLayer(config, - cache_config=cache_config, - quant_config=quant_config)) - self.layers = nn.ModuleList(decoder_layers) + self.start_layer, self.end_layer, self.layers = make_layers( + config.num_hidden_layers, + lambda prefix: MambaDecoderLayer( + config, cache_config=cache_config, quant_config=quant_config), + prefix=f"{prefix}.layers") + self.norm_f = RMSNorm(config.hidden_size, eps=config.layer_norm_epsilon) + self.make_empty_intermediate_tensors = ( + make_empty_intermediate_tensors_factory( + ["hidden_states", "residual"], config.hidden_size)) def get_input_embeddings(self, input_ids: torch.Tensor) -> torch.Tensor: return self.embeddings(input_ids) @@ -115,29 +120,40 @@ def forward( positions: torch.Tensor, attn_metadata: AttentionMetadata, mamba_cache_params: MambaCacheParams, + intermediate_tensors: Optional[IntermediateTensors] = None, inputs_embeds: Optional[torch.Tensor] = None, ) -> torch.Tensor: - - if inputs_embeds is not None: - hidden_states = inputs_embeds + if get_pp_group().is_first_rank: + if inputs_embeds is not None: + hidden_states = inputs_embeds + else: + hidden_states = self.get_input_embeddings(input_ids) + residual = None else: - hidden_states = self.get_input_embeddings(input_ids) - residual = None + assert intermediate_tensors is not None + hidden_states = intermediate_tensors["hidden_states"] + residual = intermediate_tensors["residual"] - for i in range(len(self.layers)): + for i in range(self.start_layer, self.end_layer): layer = self.layers[i] hidden_states, residual = layer( positions=positions, hidden_states=hidden_states, attn_metadata=attn_metadata, residual=residual, - mamba_cache_params=mamba_cache_params.at_layer_idx(i)) + mamba_cache_params=mamba_cache_params.at_layer_idx( + i - self.start_layer)) + if not get_pp_group().is_last_rank: + return IntermediateTensors({ + "hidden_states": hidden_states, + "residual": residual + }) hidden_states, _ = self.norm_f(hidden_states, residual) return hidden_states -class MambaForCausalLM(nn.Module, HasInnerState, IsAttentionFree): +class MambaForCausalLM(nn.Module, HasInnerState, IsAttentionFree, SupportsPP): def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): config = vllm_config.model_config.hf_config @@ -149,7 +165,9 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): super().__init__() self.config = config + self.vllm_config = vllm_config self.scheduler_config = scheduler_config + self.model_config = vllm_config.model_config self.backbone = MambaModel(vllm_config=vllm_config, prefix=maybe_prefix(prefix, "backbone")) self.unpadded_vocab_size = config.vocab_size @@ -175,6 +193,20 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): config.vocab_size) self.sampler = get_sampler() + self.make_empty_intermediate_tensors = ( + self.backbone.make_empty_intermediate_tensors) + if self.scheduler_config is not None and \ + not self.model_config.enforce_eager: + if self.scheduler_config.max_num_seqs > \ + vllm_config.compilation_config.max_capture_size: + self.max_batch_size = \ + vllm_config.compilation_config.max_capture_size + else: + self.max_batch_size = vllm_config.pad_for_cudagraph( + self.scheduler_config.max_num_seqs) + else: + self.max_batch_size = 8192 + 2 + def get_input_embeddings(self, input_ids: torch.Tensor) -> torch.Tensor: return self.backbone.get_input_embeddings(input_ids) @@ -187,12 +219,11 @@ def forward(self, inputs_embeds: Optional[torch.Tensor] = None, **kwargs): if self.mamba_cache is None: - max_batch_size = (_get_graph_batch_size( - self.scheduler_config.max_num_seqs) if self.scheduler_config - else max(_BATCH_SIZES_TO_CAPTURE) + 2) + num_mamba_layers = self.model_config.get_num_layers_by_block_type( + self.vllm_config.parallel_config, LayerBlockType.mamba) self.mamba_cache = MambaCacheManager( - self.lm_head.weight.dtype, self.config.num_hidden_layers, - max_batch_size, *self._get_mamba_cache_shape()) + self.lm_head.weight.dtype, num_mamba_layers, + self.max_batch_size, *self._get_mamba_cache_shape()) ( mamba_cache_tensors, @@ -205,7 +236,8 @@ def forward(self, state_indices_tensor) hidden_states = self.backbone(input_ids, positions, attn_metadata, - mamba_cache_params, inputs_embeds) + mamba_cache_params, intermediate_tensors, + inputs_embeds) return hidden_states @@ -243,16 +275,22 @@ def sample( next_tokens = self.sampler(logits, sampling_metadata) return next_tokens - def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): + def load_weights(self, weights: Iterable[Tuple[str, + torch.Tensor]]) -> Set[str]: params_dict = dict(self.named_parameters()) + loaded_params: Set[str] = set() for name, loaded_weight in weights: if "A_log" in name: name = name.replace("A_log", "A") # Skip loading extra bias for GPTQ models. if name.endswith(".bias") and name not in params_dict: continue + if is_pp_missing_parameter(name, self): + continue param = params_dict[name] weight_loader = getattr(param, "weight_loader", default_weight_loader) weight_loader(param, loaded_weight) + loaded_params.add(name) + return loaded_params diff --git a/vllm/model_executor/models/minicpm.py b/vllm/model_executor/models/minicpm.py index c9a573278a136..5a0f202364f26 100644 --- a/vllm/model_executor/models/minicpm.py +++ b/vllm/model_executor/models/minicpm.py @@ -52,7 +52,7 @@ from vllm.sequence import IntermediateTensors from .interfaces import SupportsLoRA, SupportsPP -from .utils import (is_pp_missing_parameter, +from .utils import (AutoWeightsLoader, is_pp_missing_parameter, make_empty_intermediate_tensors_factory, make_layers, maybe_prefix) @@ -378,6 +378,7 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): config.hidden_size, org_num_embeddings=config.vocab_size, ) + self.num_experts = getattr(self.config, "num_experts", 0) self._init_layers(prefix, config, cache_config, quant_config) self.norm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps) self.make_empty_intermediate_tensors = ( @@ -437,6 +438,73 @@ def forward( hidden_states = self.norm(hidden_states) return hidden_states + def load_weights(self, weights: Iterable[Tuple[str, + torch.Tensor]]) -> Set[str]: + stacked_params_mapping = [ + # (param_name, shard_name, shard_id) + ("qkv_proj", "q_proj", "q"), + ("qkv_proj", "k_proj", "k"), + ("qkv_proj", "v_proj", "v"), + ("gate_up_proj", "gate_proj", 0), + ("gate_up_proj", "up_proj", 1), + ] + expert_params_mapping = [ + # (param_name, weight_name, expert_id) + ("ws" if weight_name in ["w1", "w3"] else "w2s", + f"experts.{expert_id}.{weight_name}.weight", expert_id) + for expert_id in range(self.num_experts) + for weight_name in ["w1", "w2", "w3"] + ] + params_dict = dict(self.named_parameters()) + loaded_params: Set[str] = set() + for name, loaded_weight in weights: + if "rotary_emb.inv_freq" in name: + continue + if ("rotary_emb.cos_cached" in name + or "rotary_emb.sin_cached" in name): + # Models trained using ColossalAI may include these tensors in + # the checkpoint. Skip them. + continue + for (param_name, weight_name, shard_id) in stacked_params_mapping: + if weight_name not in name: + continue + name = name.replace(weight_name, param_name) + # Skip loading extra bias for GPTQ models. + if name.endswith(".bias") and name not in params_dict: + continue + if is_pp_missing_parameter(name, self): + continue + param = params_dict[name] + weight_loader = param.weight_loader + weight_loader(param, loaded_weight, shard_id) + break + else: + for param_name, weight_name, expert_id in expert_params_mapping: + if weight_name not in name: + continue + name = name.replace(weight_name, param_name) + if is_pp_missing_parameter(name, self): + continue + param = params_dict[name] + weight_loader = param.weight_loader + weight_loader(param, + loaded_weight, + weight_name, + expert_id=expert_id) + break + else: + # Skip loading extra bias for GPTQ models. + if name.endswith(".bias") and name not in params_dict: + continue + if is_pp_missing_parameter(name, self): + continue + param = params_dict[name] + weight_loader = getattr(param, "weight_loader", + default_weight_loader) + weight_loader(param, loaded_weight) + loaded_params.add(name) + return loaded_params + class MiniCPMForCausalLM(nn.Module, SupportsLoRA, SupportsPP): packed_modules_mapping = { @@ -466,6 +534,16 @@ class MiniCPMForCausalLM(nn.Module, SupportsLoRA, SupportsPP): } embedding_padding_modules = ["lm_head"] + # BitandBytes specific attributes + bitsandbytes_stacked_params_mapping = { + # shard_name, weight_name, index + "q_proj": ("qkv_proj", 0), + "k_proj": ("qkv_proj", 1), + "v_proj": ("qkv_proj", 2), + "gate_proj": ("gate_up_proj", 0), + "up_proj": ("gate_up_proj", 1), + } + def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): super().__init__() config = vllm_config.model_config.hf_config @@ -480,8 +558,9 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): self.cache_config = cache_config self.quant_config = quant_config - self.num_experts = getattr(self.config, "num_experts", 0) - self._init_model(vllm_config=vllm_config, prefix=prefix) + self.model = self._init_model(vllm_config=vllm_config, + prefix=maybe_prefix(prefix, "model")) + unpadded_vocab_size = config.vocab_size if lora_config: unpadded_vocab_size += lora_config.lora_extra_vocab_size @@ -506,8 +585,7 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): self.model.make_empty_intermediate_tensors) def _init_model(self, *, vllm_config: VllmConfig, prefix: str = ""): - self.model = MiniCPMModel(vllm_config=vllm_config, - prefix=maybe_prefix(prefix, "model")) + return MiniCPMModel(vllm_config=vllm_config, prefix=prefix) def get_input_embeddings(self, input_ids: torch.Tensor) -> torch.Tensor: return self.model.get_input_embeddings(input_ids) @@ -546,72 +624,9 @@ def sample( def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]) -> Set[str]: - stacked_params_mapping = [ - # (param_name, shard_name, shard_id) - ("qkv_proj", "q_proj", "q"), - ("qkv_proj", "k_proj", "k"), - ("qkv_proj", "v_proj", "v"), - ("gate_up_proj", "gate_proj", 0), - ("gate_up_proj", "up_proj", 1), - ] - expert_params_mapping = [ - # (param_name, weight_name, expert_id) - ("ws" if weight_name in ["w1", "w3"] else "w2s", - f"experts.{expert_id}.{weight_name}.weight", expert_id) - for expert_id in range(self.num_experts) - for weight_name in ["w1", "w2", "w3"] - ] - params_dict = dict(self.named_parameters()) - loaded_params: Set[str] = set() - for name, loaded_weight in weights: - if "rotary_emb.inv_freq" in name: - continue - if ("rotary_emb.cos_cached" in name - or "rotary_emb.sin_cached" in name): - # Models trained using ColossalAI may include these tensors in - # the checkpoint. Skip them. - continue - # With tie_word_embeddings, we can skip lm_head.weight - # The weight might appear unnecessarily in the files if the model is - # processed with quantization, LoRA, fine-tuning, etc. - if self.config.tie_word_embeddings and "lm_head.weight" in name: - continue - for (param_name, weight_name, shard_id) in stacked_params_mapping: - if weight_name not in name: - continue - name = name.replace(weight_name, param_name) - # Skip loading extra bias for GPTQ models. - if name.endswith(".bias") and name not in params_dict: - continue - if is_pp_missing_parameter(name, self): - continue - param = params_dict[name] - weight_loader = param.weight_loader - weight_loader(param, loaded_weight, shard_id) - break - else: - for param_name, weight_name, expert_id in expert_params_mapping: - if weight_name not in name: - continue - name = name.replace(weight_name, param_name) - if is_pp_missing_parameter(name, self): - continue - param = params_dict[name] - weight_loader = param.weight_loader - weight_loader(param, - loaded_weight, - weight_name, - expert_id=expert_id) - break - else: - # Skip loading extra bias for GPTQ models. - if name.endswith(".bias") and name not in params_dict: - continue - if is_pp_missing_parameter(name, self): - continue - param = params_dict[name] - weight_loader = getattr(param, "weight_loader", - default_weight_loader) - weight_loader(param, loaded_weight) - loaded_params.add(name) - return loaded_params + loader = AutoWeightsLoader( + self, + skip_prefixes=(["lm_head."] + if self.config.tie_word_embeddings else None), + ) + return loader.load_weights(weights) diff --git a/vllm/model_executor/models/minicpm3.py b/vllm/model_executor/models/minicpm3.py index c38c31a0d4953..e9d7eada1d16c 100644 --- a/vllm/model_executor/models/minicpm3.py +++ b/vllm/model_executor/models/minicpm3.py @@ -40,7 +40,7 @@ MiniCPMForCausalLM, MiniCPMModel) -from .utils import make_layers, maybe_prefix +from .utils import make_layers class MiniCPM3Attention(nn.Module): @@ -241,6 +241,11 @@ class MiniCPM3ForCausalLM(MiniCPMForCausalLM): # `embedding_modules` and `embedding_padding_modules` # are inherited from MiniCPMForCausalLM + bitsandbytes_stacked_params_mapping = { + # shard_name, weight_name, index + "gate_proj": ("gate_up_proj", 0), + "up_proj": ("gate_up_proj", 1), + } + def _init_model(self, *, vllm_config: VllmConfig, prefix: str = ""): - self.model = MiniCPM3Model(vllm_config=vllm_config, - prefix=maybe_prefix(prefix, "model")) + return MiniCPM3Model(vllm_config=vllm_config, prefix=prefix) diff --git a/vllm/model_executor/models/minicpmv.py b/vllm/model_executor/models/minicpmv.py index aacce477e0460..1e8f9bd4cf418 100644 --- a/vllm/model_executor/models/minicpmv.py +++ b/vllm/model_executor/models/minicpmv.py @@ -22,7 +22,7 @@ """Inference-only MiniCPM-V model compatible with HuggingFace weights.""" import math import re -from functools import partial +from functools import cached_property, partial from typing import (Any, Callable, Iterable, List, Literal, Mapping, Optional, Set, Tuple, TypedDict, Union) @@ -37,19 +37,15 @@ from vllm.config import VllmConfig from vllm.inputs import (INPUT_REGISTRY, DecoderOnlyInputs, DummyData, InputContext, token_inputs) -from vllm.model_executor.layers.logits_processor import LogitsProcessor from vllm.model_executor.layers.quantization import QuantizationConfig from vllm.model_executor.layers.resampler import (BaseResampler, Resampler2, get_2d_sincos_pos_embed) from vllm.model_executor.layers.sampler import SamplerOutput, get_sampler -from vllm.model_executor.layers.vocab_parallel_embedding import ParallelLMHead from vllm.model_executor.model_loader.utils import set_default_torch_dtype -from vllm.model_executor.model_loader.weight_utils import default_weight_loader -from vllm.model_executor.models.llama import LlamaModel -from vllm.model_executor.models.minicpm import MiniCPMModel +from vllm.model_executor.models.llama import LlamaForCausalLM +from vllm.model_executor.models.minicpm import MiniCPMForCausalLM from vllm.model_executor.models.module_mapping import MultiModelKeys -from vllm.model_executor.models.qwen2 import Qwen2Model -from vllm.model_executor.models.utils import LLMWrapper +from vllm.model_executor.models.qwen2 import Qwen2ForCausalLM from vllm.model_executor.sampling_metadata import SamplingMetadata from vllm.multimodal import MULTIMODAL_REGISTRY, MultiModalKwargs from vllm.multimodal.image import cached_get_image_processor @@ -58,11 +54,7 @@ from .idefics2_vision_model import Idefics2VisionTransformer from .interfaces import SupportsLoRA, SupportsMultiModal, SupportsPP -from .utils import is_pp_missing_parameter, maybe_prefix - -_KEYS_TO_MODIFY_MAPPING = { - "llm.lm_head": "lm_head", -} +from .utils import AutoWeightsLoader, maybe_prefix RawImageType = Union[Image.Image, torch.Tensor] @@ -297,10 +289,9 @@ def input_processor_for_minicpmv(ctx: InputContext, inputs: DecoderOnlyInputs): def get_placeholder(image_size: Tuple[int, int], num_image: int): if version == (2, 0) or version == (2, 5): - return image_processor. \ - get_slice_image_placeholder(image_size) - return image_processor. \ - get_slice_image_placeholder(image_size, num_image) + return image_processor.get_slice_image_placeholder(image_size) + return image_processor.get_slice_image_placeholder( + image_size, num_image) prompt = inputs.get("prompt") token_ids = inputs.get("prompt_token_ids") @@ -400,37 +391,32 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): self.vpm = self.init_vision_module(config, quant_config, prefix=maybe_prefix(prefix, "vpm")) - param_dtype = torch.get_default_dtype() - self.vpm.to(dtype=param_dtype) self.vision_dim = (self.vpm.embed_dim if self.version == (2, 0) else self.vpm.embeddings.embed_dim) self.embed_dim = self.config.hidden_size + self.resampler = self.init_resampler(self.embed_dim, self.vision_dim, quant_config=quant_config, prefix=maybe_prefix( prefix, "resampler")) - self.resampler.to(device="cuda", dtype=param_dtype) - # TODO: why is there _KEYS_TO_MODIFY_MAPPING? lm_head should be in llm - self.lm_head = ParallelLMHead(config.vocab_size, - config.hidden_size, - quant_config=quant_config, - prefix=maybe_prefix( - prefix, "llm.lm_head")) - self.logits_processor = LogitsProcessor(config.vocab_size) - self.sampler = get_sampler() self.make_empty_intermediate_tensors = ( self.llm.make_empty_intermediate_tensors) + @cached_property + def sampler(self): + if hasattr(self.llm, "sampler"): + return self.llm.sampler + + return get_sampler() + def get_embedding( self, input_ids: torch.Tensor, image_inputs: Optional[MiniCPMVImageInputs], ) -> Tuple[torch.Tensor, torch.Tensor]: - vlm_embedding: torch.Tensor = self.llm.embed_tokens(input_ids) - if hasattr(self.config, "scale_emb"): - vlm_embedding *= self.config.scale_emb + vlm_embedding: torch.Tensor = self.llm.get_input_embeddings(input_ids) if image_inputs is None: # No image vision_hidden_states = torch.tensor([], device=input_ids.device) @@ -575,7 +561,7 @@ def forward( # for `torch.compile` integration input_ids = None - output = self.llm( + output = self.llm.model( input_ids=input_ids, positions=positions, kv_caches=kv_caches, @@ -590,9 +576,7 @@ def compute_logits( hidden_states: torch.Tensor, sampling_metadata: SamplingMetadata, ) -> Optional[torch.Tensor]: - logits = self.logits_processor(self.lm_head, hidden_states, - sampling_metadata) - return logits + return self.llm.compute_logits(hidden_states, sampling_metadata) def sample( self, @@ -604,52 +588,8 @@ def sample( def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]) -> Set[str]: - stacked_params_mapping = [ - # (param_name, shard_name, shard_id) - ("qkv_proj", "q_proj", "q"), - ("qkv_proj", "k_proj", "k"), - ("qkv_proj", "v_proj", "v"), - ("gate_up_proj", "gate_proj", 0), - ("gate_up_proj", "up_proj", 1), - ] - params_dict = dict(self.named_parameters()) - loaded_params: Set[str] = set() - for name, loaded_weight in weights: - for key_to_modify, new_key in _KEYS_TO_MODIFY_MAPPING.items(): - if key_to_modify in name: - name = name.replace(key_to_modify, new_key) - if "rotary_emb.inv_freq" in name: - continue - if ("rotary_emb.cos_cached" in name - or "rotary_emb.sin_cached" in name): - # Models trained using ColossalAI may include these tensors in - # the checkpoint. Skip them. - continue - use_default_weight_loading = False - if self.is_default_weight_loading(name): - use_default_weight_loading = True - else: - for param_name, weight_name, shard_id in stacked_params_mapping: - if weight_name not in name: - continue - name = name.replace(weight_name, param_name) - if is_pp_missing_parameter(name, self): - continue - param = params_dict[name] - weight_loader = param.weight_loader - weight_loader(param, loaded_weight, shard_id) - break - else: - use_default_weight_loading = True - if use_default_weight_loading: - if is_pp_missing_parameter(name, self): - continue - param = params_dict[name] - weight_loader = getattr(param, "weight_loader", - default_weight_loader) - weight_loader(param, loaded_weight) - loaded_params.add(name) - return loaded_params + loader = AutoWeightsLoader(self) + return loader.load_weights(weights) def get_mm_mapping(self) -> MultiModelKeys: """ @@ -693,9 +633,6 @@ def get_vision_hidden_states(self, data: MiniCPMVImageInputs) -> torch.Tensor: raise NotImplementedError - def is_default_weight_loading(self, name: str) -> bool: - raise NotImplementedError - class MiniCPMV2_0(MiniCPMVBaseModel): @@ -708,8 +645,7 @@ def init_llm( vllm_config: VllmConfig, prefix: str = "", ) -> nn.Module: - return LLMWrapper(MiniCPMModel(vllm_config=vllm_config, prefix=prefix), - name="model") + return MiniCPMForCausalLM(vllm_config=vllm_config, prefix=prefix) def init_vision_module( self, @@ -717,11 +653,12 @@ def init_vision_module( quant_config: Optional[QuantizationConfig], prefix: str = "", ) -> nn.Module: - # TODO :refactor this vision model + # TODO: refactor this vision model try: import timm except ImportError: raise ImportError("Please install timm==0.9.10") from ImportError + with set_default_torch_dtype(torch.float16): model = timm.create_model( "vit_so400m_patch14_siglip_384.webli", @@ -731,6 +668,8 @@ def init_vision_module( dynamic_img_pad=True, ) + model = model.to(dtype=torch.get_default_dtype()) + if (isinstance(model, timm.models.VisionTransformer) and model.attn_pool is not None): model.attn_pool = torch.nn.Identity() @@ -759,7 +698,7 @@ def init_resampler(self, quant_config=quant_config, prefix=prefix) - return resampler + return resampler.to(device="cuda", dtype=torch.get_default_dtype()) def get_vision_embedding( self, @@ -790,9 +729,6 @@ def get_vision_hidden_states(self, return self.get_vision_embedding(pixel_values) - def is_default_weight_loading(self, name: str) -> bool: - return "resampler" in name or "vpm" in name - class MiniCPMV2_5(MiniCPMVBaseModel, SupportsLoRA): packed_modules_mapping = { @@ -843,8 +779,7 @@ def init_llm( vllm_config: VllmConfig, prefix: str = "", ) -> nn.Module: - return LLMWrapper(LlamaModel(vllm_config=vllm_config, prefix=prefix), - name="model") + return LlamaForCausalLM(vllm_config=vllm_config, prefix=prefix) def init_vision_module( self, @@ -871,7 +806,8 @@ def init_resampler(self, kv_dim=vision_dim, quant_config=quant_config, prefix=prefix) - return resampler + + return resampler.to(device="cuda", dtype=torch.get_default_dtype()) def get_vision_embedding( self, @@ -913,9 +849,6 @@ def get_vision_hidden_states(self, return self.get_vision_embedding(all_pixel_values.type(dtype), patch_attn_mask, tgt_sizes) - def is_default_weight_loading(self, name: str) -> bool: - return "resampler" in name - class MiniCPMV2_6(MiniCPMVBaseModel, SupportsLoRA): packed_modules_mapping = { @@ -966,8 +899,7 @@ def init_llm( vllm_config: VllmConfig, prefix: str = "", ) -> nn.Module: - return LLMWrapper(Qwen2Model(vllm_config=vllm_config, prefix=prefix), - name="model") + return Qwen2ForCausalLM(vllm_config=vllm_config, prefix=prefix) def init_vision_module( self, @@ -995,7 +927,8 @@ def init_resampler(self, kv_dim=vision_dim, quant_config=quant_config, prefix=prefix) - return resampler + + return resampler.to(device="cuda", dtype=torch.get_default_dtype()) def get_vision_embedding( self, @@ -1043,9 +976,6 @@ def get_vision_hidden_states(self, return self.resampler(vision_embedding, tgt_sizes) - def is_default_weight_loading(self, name: str) -> bool: - return "resampler" in name - _SUPPORT_VERSION = { (2, 0): MiniCPMV2_0, diff --git a/vllm/model_executor/models/molmo.py b/vllm/model_executor/models/molmo.py index acedddd84d7cb..a328b5a2aeea7 100644 --- a/vllm/model_executor/models/molmo.py +++ b/vllm/model_executor/models/molmo.py @@ -3,7 +3,7 @@ from array import array from dataclasses import dataclass from functools import lru_cache, partial -from typing import Iterable, List, Mapping, Optional, Tuple, TypedDict +from typing import Iterable, List, Mapping, Optional, Set, Tuple, TypedDict import torch from einops import rearrange @@ -13,6 +13,7 @@ from transformers import PretrainedConfig from vllm.attention import Attention, AttentionMetadata +from vllm.attention.layer import MultiHeadAttention from vllm.compilation.decorators import support_torch_compile from vllm.config import CacheConfig, VllmConfig from vllm.distributed import (get_pp_group, get_tensor_model_parallel_rank, @@ -36,22 +37,25 @@ ParallelLMHead, VocabParallelEmbedding) from vllm.model_executor.model_loader.weight_utils import default_weight_loader from vllm.multimodal import MULTIMODAL_REGISTRY, MultiModalKwargs -from vllm.multimodal.inputs import NestedTensors +from vllm.multimodal.inputs import NestedTensors, PlaceholderRange from vllm.multimodal.utils import cached_get_tokenizer -from vllm.platforms import _Backend from vllm.sequence import (VLLM_TOKEN_ID_ARRAY_TYPE, IntermediateTensors, SequenceData) from vllm.transformers_utils.processor import get_processor from .interfaces import SupportsMultiModal, SupportsPP -from .utils import (get_vit_attn_backend, +from .utils import (AutoWeightsLoader, WeightsMapper, is_pp_missing_parameter, make_empty_intermediate_tensors_factory, make_layers, - maybe_prefix) + maybe_prefix, merge_multimodal_embeddings) # TODO: hard-coded for now. Consider making it configurable. VIT_LAYERS = [-2, -9] NUM_PREFIX_TOKENS = 1 ADDITIONAL_VOCAB_SIZE = 128 +DEFAULT_IMAGE_PATCH_TOKEN_ID = 152066 +DEFAULT_IM_START_TOKEN_ID = 152067 +DEFAULT_IM_END_TOKEN_ID = 152064 +DEFAULT_IM_COL_TOKEN_ID = 152065 class MolmoImageInputs(TypedDict): @@ -75,6 +79,11 @@ class MolmoImageInputs(TypedDict): `(batch_size, num_crops, num_patch)` """ + image_start_end: Tuple[int, int] + """Starting and ending index of placeholder + tokens + """ + @dataclass class VisionBackboneConfig: @@ -187,13 +196,11 @@ def __init__( quant_config=quant_config, ) - # Detect attention implementation. - self.attn_backend: _Backend = get_vit_attn_backend(support_fa=True) - if self.attn_backend not in { - _Backend.FLASH_ATTN, _Backend.TORCH_SDPA, _Backend.XFORMERS - }: - raise RuntimeError( - f"Molmo does not support {self.attn_backend} backend now.") + self.scale = self.head_dim**-0.5 + self.attn = MultiHeadAttention(self.num_heads, + self.head_dim, + self.scale, + num_kv_heads=self.num_kv_heads) def forward(self, inputs_q: torch.Tensor, @@ -209,25 +216,8 @@ def forward(self, xq, _ = self.wq(inputs_q) xk, _ = self.wk(inputs_k) xv, _ = self.wv(inputs_v) - q_shape = xq.size()[:-1] + (self.num_heads, self.head_dim) - kv_shape = xk.size()[:-1] + (self.num_kv_heads, self.head_dim) - xq = xq.view(*q_shape) - xk = xk.view(*kv_shape) - xv = xv.view(*kv_shape) - - if self.attn_backend == _Backend.FLASH_ATTN: - from flash_attn import flash_attn_func - output = flash_attn_func(xq, xk, xv, dropout_p=0.0, causal=False) - elif self.attn_backend == _Backend.TORCH_SDPA: - xq, xk, xv = (rearrange(x, "b s h d -> b h s d") - for x in (xq, xk, xv)) - output = F.scaled_dot_product_attention(xq, xk, xv) - output = rearrange(output, "b h s d -> b s h d ") - elif self.attn_backend == _Backend.XFORMERS: - from xformers import ops as xops - output = xops.memory_efficient_attention_forward(xq, xk, xv, p=0) - - output = rearrange(output, "b s h d -> b s (h d)").contiguous() + + output = self.attn(xq, xk, xv) output, _ = self.wo(output) return output @@ -720,6 +710,42 @@ def forward( # image_features: (batch_size, num_image, num_patch, d_model) return image_features + def load_weights(self, weights: Iterable[Tuple[str, + torch.Tensor]]) -> Set[str]: + stacked_params_mapping = [ + # (param_name, shard_name, shard_id) + ("gate_up_proj", "gate_proj", 0), + ("gate_up_proj", "up_proj", 1), + ] + params_dict = dict(self.named_parameters()) + loaded_params: Set[str] = set() + + for name, loaded_weight in weights: + for (param_name, weight_name, shard_id) in stacked_params_mapping: + if weight_name not in name: + continue + name = name.replace(weight_name, param_name) + # Skip loading extra bias for GPTQ models. + if name.endswith(".bias") and name not in params_dict: + continue + if is_pp_missing_parameter(name, self): + continue + param = params_dict[name] + weight_loader = param.weight_loader + weight_loader(param, loaded_weight, shard_id) + break + else: + if name.endswith(".bias") and name not in params_dict: + continue + if is_pp_missing_parameter(name, self): + continue + param = params_dict[name] + weight_loader = getattr(param, "weight_loader", + default_weight_loader) + weight_loader(param, loaded_weight) + loaded_params.add(name) + return loaded_params + @support_torch_compile class MolmoModel(nn.Module): @@ -804,6 +830,28 @@ def forward( hidden_states = self.norm(hidden_states) return hidden_states + def load_weights(self, weights: Iterable[Tuple[str, + torch.Tensor]]) -> Set[str]: + params_dict = dict(self.named_parameters()) + loaded_params: Set[str] = set() + + for name, loaded_weight in weights: + if "gate_up_proj" in name: + up_proj, gate_proj = loaded_weight.chunk(2, dim=0) + loaded_weight = torch.cat([gate_proj, up_proj], dim=0) + + if name.endswith(".bias") and name not in params_dict: + continue + if is_pp_missing_parameter(name, self): + continue + + param = params_dict[name] + weight_loader = getattr(param, "weight_loader", + default_weight_loader) + weight_loader(param, loaded_weight) + loaded_params.add(name) + return loaded_params + cached_get_processor = lru_cache(get_processor) @@ -879,6 +927,8 @@ def image_input_mapper_for_molmo( ctx: InputContext, data: object, ): + if isinstance(data, list): + data = data[0] return MultiModalKwargs(data) @@ -928,7 +978,22 @@ def dummy_data_for_molmo(ctx: InputContext, seq_len: int, if "image_masks" in out: dummy_imgdata["image_masks"] = out["image_masks"] dummy_imgdata["seq_len"] = torch.tensor(seq_len, dtype=torch.long) - return DummyData(dummy_seqdata, {"image": dummy_imgdata}) + size = 0 + offset = -1 + for i in range(len(token_ids)): + if token_ids[i] in (DEFAULT_IMAGE_PATCH_TOKEN_ID, + DEFAULT_IM_START_TOKEN_ID, DEFAULT_IM_END_TOKEN_ID, + DEFAULT_IM_COL_TOKEN_ID): + if offset < 0: + offset = i + size += 1 + dummy_imgdata["image_start_end"] = (offset, offset + size) + return DummyData(seq_data=dummy_seqdata, + multi_modal_data={"image": dummy_imgdata}, + multi_modal_placeholders={ + "image": + [PlaceholderRange(offset=offset, length=size)] + }) def pad_images( @@ -1016,19 +1081,34 @@ def input_processor_for_molmo(ctx: InputContext, inputs: DecoderOnlyInputs): if image_masks is not None: image_data["image_masks"] = image_masks - image_data["seq_len"] = torch.tensor(len(out["input_ids"]), + new_prompt_token_ids = out["input_ids"].tolist() + image_data["seq_len"] = torch.tensor(len(new_prompt_token_ids), dtype=torch.long) multi_modal_data = dict(image=image_data) + size = 0 + offset = -1 + for i in range(len(new_prompt_token_ids)): + if new_prompt_token_ids[i] in (DEFAULT_IMAGE_PATCH_TOKEN_ID, + DEFAULT_IM_START_TOKEN_ID, + DEFAULT_IM_END_TOKEN_ID, + DEFAULT_IM_COL_TOKEN_ID): + if offset < 0: + offset = i + size += 1 + image_data["image_start_end"] = (offset, offset + size) prompt = inputs.get("prompt") if prompt is None: - prompt = tokenizer.decode(out["input_ids"]) + prompt = tokenizer.decode(new_prompt_token_ids) return token_inputs( - prompt_token_ids=out["input_ids"], + prompt_token_ids=new_prompt_token_ids, prompt=prompt, multi_modal_data=multi_modal_data, + multi_modal_placeholders={ + "image": [PlaceholderRange(offset=offset, length=size)] + }, ) @@ -1074,6 +1154,7 @@ def _parse_and_validate_image_input( ) -> Optional[MolmoImageInputs]: images = kwargs.pop("images", None) image_masks = kwargs.pop("image_masks", None) + image_start_end = kwargs.pop("image_start_end", None) if images is None: return None @@ -1091,6 +1172,7 @@ def _parse_and_validate_image_input( image_input_idx=image_input_idx, seq_len=seq_len, image_masks=image_masks, + image_start_end=image_start_end, ) def _process_image_input( @@ -1139,9 +1221,16 @@ def get_multimodal_embeddings(self, **kwargs) -> Optional[NestedTensors]: # Note: In this original implementation from AI2, the final # vision_embeddings will be always be the same length - # of input embedddings, which is not very efficient. - # TODO(ywang96): see if this can be optimized. + # of input embeddings. vision_embeddings = torch.einsum('nd,nm->md', image_features, mat) + + # Split by the sizes of the input sequences. For each full embedding, + # extract the actual vision embeddings to be merged. + vision_embeddings = list(vision_embeddings.split(seq_len.tolist())) + for i in range(len(vision_embeddings)): + start, end = image_input['image_start_end'][i] + vision_embeddings[i] = vision_embeddings[i][start:end] + return vision_embeddings def get_input_embeddings( @@ -1151,7 +1240,11 @@ def get_input_embeddings( ) -> torch.Tensor: inputs_embeds = self.model.get_input_embeddings(input_ids) if multimodal_embeddings is not None: - inputs_embeds = inputs_embeds + multimodal_embeddings + inputs_embeds = merge_multimodal_embeddings( + input_ids, inputs_embeds, multimodal_embeddings, [ + DEFAULT_IMAGE_PATCH_TOKEN_ID, DEFAULT_IM_START_TOKEN_ID, + DEFAULT_IM_END_TOKEN_ID, DEFAULT_IM_COL_TOKEN_ID + ]) return inputs_embeds def forward( @@ -1200,103 +1293,53 @@ def sample( return next_tokens def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): - - params_mapping = [ - ("model.transformer.ln_f.weight", "model.norm.weight"), - ("attn_out", "self_attn.o_proj"), - ("att_proj", "self_attn.qkv_proj"), - ("q_norm", "self_attn.q_norm"), - ("k_norm", "self_attn.k_norm"), - ("attn_norm", "input_layernorm"), - ("ff_norm", "post_attention_layernorm"), - ] - - params_dict = dict(self.named_parameters(remove_duplicate=False)) - - embedding_weight = dict() - projector_weight = dict() - for name, loaded_weight in weights: - if "rotary_emb.inv_freq" in name: - continue - if self.config.tie_word_embeddings and "lm_head.weight" in name: - continue - - if "wte.embedding" in name: - embedding_weight["embedding"] = loaded_weight - continue - - if "wte.new_embedding" in name: - embedding_weight["new_embedding"] = loaded_weight - continue - - if "vision_backbone" in name: - if name.startswith("model"): - name = name[len("model."):] - if 'image_projector' in name: - if 'w1' in name: - projector_weight['gate_proj'] = loaded_weight - elif 'w3' in name: - projector_weight['up_proj'] = loaded_weight - elif 'w2' in name: - projector_weight['down_proj'] = loaded_weight - else: - raise ValueError( - f"Unexpected projector weight: {name}") - continue - else: - if "transformer.blocks" in name: - name = name.replace("transformer.blocks", "layers") - - if "ff_proj" in name: - name = name.replace("ff_proj", "mlp.gate_up_proj") - assert 'weight' in name - up_weight, gate_weight = loaded_weight.chunk(2, dim=0) - loaded_weight = torch.cat([gate_weight, up_weight], dim=0) - - elif "ff_out" in name: - if "layers" in name: - name = name.replace("ff_out", "mlp.down_proj") - else: - # lm head - name = name.replace("model.transformer.ff_out", - "lm_head") - - else: - for (param_name, weight_name) in params_mapping: - if param_name in name: - name = name.replace(param_name, weight_name) - break - - try: - # Skip loading extra bias for GPTQ models. - if name.endswith(".bias") and name not in params_dict: - continue - param = params_dict[name] - except KeyError: - raise ValueError(f"Unexpected weight: {name}") from None - - weight_loader = getattr(param, "weight_loader", - default_weight_loader) - weight_loader(param, loaded_weight) - - gate_up_proj_weight = torch.cat( - [projector_weight["gate_proj"], projector_weight["up_proj"]], - dim=0) - name = "vision_backbone.image_projector.gate_up_proj.weight" - param = params_dict[name] - weight_loader = getattr(param, "weight_loader", default_weight_loader) - weight_loader(param, gate_up_proj_weight) - - down_proj_weight = projector_weight["down_proj"] - name = "vision_backbone.image_projector.down_proj.weight" - param = params_dict[name] - weight_loader = getattr(param, "weight_loader", default_weight_loader) - weight_loader(param, down_proj_weight) - - embedding_weight = torch.cat( - [embedding_weight["embedding"], embedding_weight["new_embedding"]], - dim=0) - name = "model.embed_tokens.weight" - param = params_dict[name] - weight_loader = getattr(param, "weight_loader", default_weight_loader) - weight_loader(param, embedding_weight) + hf_to_vllm_mapper = WeightsMapper( + orig_to_new_substr={ + # vision backbone mapping + "image_projector.w1.": "image_projector.gate_proj.", + "image_projector.w3.": "image_projector.up_proj.", + "image_projector.w2.": "image_projector.down_proj.", + # language backbone mapping + "att_proj": "self_attn.qkv_proj", + "attn_out": "self_attn.o_proj", + "q_norm": "self_attn.q_norm", + "k_norm": "self_attn.k_norm", + "ff_proj": "mlp.gate_up_proj", + "ff_out": "mlp.down_proj", + "attn_norm": "input_layernorm", + "ff_norm": "post_attention_layernorm", + }, + orig_to_new_prefix={ + # vision backbone mapping + "model.vision_backbone.": "vision_backbone.", + # language backbone mapping + "model.transformer.blocks.": "model.layers.", + "model.transformer.ln_f.": "model.norm.", + # lm_head is renamed to model.transformer.mlp.down_proj firstly, + # we need to run a second renaming for it + "model.transformer.mlp.down_proj.": "lm_head.", + }, + ) + loader = AutoWeightsLoader(self) + weights = _get_weights_with_merged_embedding(weights) + return loader.load_weights(weights, mapper=hf_to_vllm_mapper) + + +def _get_weights_with_merged_embedding( + weights: Iterable[Tuple[str, torch.Tensor]] +) -> Iterable[Tuple[str, torch.Tensor]]: + embedding_weights = {} + for name, weight in weights: + if "wte.embedding" in name: + embedding_weights["embedding"] = weight + elif "wte.new_embedding" in name: + embedding_weights["new_embedding"] = weight + else: + yield (name, weight) + # this is compatible with most of quantization, + # because they won't quantize embed_tokens + embedding_weights = torch.cat( + [embedding_weights["embedding"], embedding_weights["new_embedding"]], + dim=0, + ) + yield ("model.embed_tokens.weight", embedding_weights) diff --git a/vllm/model_executor/models/nemotron.py b/vllm/model_executor/models/nemotron.py index c7b4c22b6896b..34cb9981c167b 100644 --- a/vllm/model_executor/models/nemotron.py +++ b/vllm/model_executor/models/nemotron.py @@ -435,9 +435,11 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): self.logits_processor = LogitsProcessor(self.unpadded_vocab_size, config.vocab_size, logit_scale) - self.sampler = get_sampler() else: self.lm_head = PPMissingLayer() + + self.sampler = get_sampler() + self.make_empty_intermediate_tensors = ( self.model.make_empty_intermediate_tensors) diff --git a/vllm/model_executor/models/paligemma.py b/vllm/model_executor/models/paligemma.py index 2e5b6bee784e7..f9ad0c67adaba 100644 --- a/vllm/model_executor/models/paligemma.py +++ b/vllm/model_executor/models/paligemma.py @@ -105,6 +105,11 @@ def input_processor_for_paligemma(ctx: InputContext, orig_prompt_ids.remove(hf_config.image_token_index) new_prompt = f"{image_token_str_pad}{bos_token}{orig_prompt}\n" + + # The PaliGemma 2 tokenizer does not include a starting BOS token + if orig_prompt_ids[0] != hf_config.bos_token_id: + orig_prompt_ids = [hf_config.bos_token_id] + orig_prompt_ids + new_token_ids = image_token_ids_pad + orig_prompt_ids + [108] #newline # NOTE: Create a defensive copy of the original inputs @@ -149,11 +154,16 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): projection_dim=config.vision_config.projection_dim) self.quant_config = quant_config - config.text_config.architectures = ["GemmaForCausalLM"] + + if config.text_config.model_type == "gemma": + config.text_config.architectures = ["GemmaForCausalLM"] + else: + config.text_config.architectures = ["Gemma2ForCausalLM"] self.language_model = init_vllm_registered_model( - config.text_config, vllm_config=vllm_config, - prefix=maybe_prefix(prefix, "language_model")) + hf_config=config.text_config, + prefix=maybe_prefix(prefix, "language_model"), + ) logit_scale = getattr(config, "logit_scale", 1.0) self.language_model.logits_processor.scale *= logit_scale diff --git a/vllm/model_executor/models/phi3v.py b/vllm/model_executor/models/phi3v.py index 4cb874a13e0c1..7ab06768ae612 100644 --- a/vllm/model_executor/models/phi3v.py +++ b/vllm/model_executor/models/phi3v.py @@ -12,41 +12,38 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -import itertools -import re -from functools import cached_property, lru_cache -from typing import (Any, Dict, Iterable, List, Literal, Mapping, Optional, Set, - Tuple, TypedDict, Union) +from functools import cached_property +from typing import (Iterable, List, Literal, Mapping, Optional, Set, Tuple, + TypedDict, Union) -import numpy as np import torch import torch.nn as nn -from PIL import Image -from transformers import CLIPVisionConfig, PretrainedConfig +from transformers import (BatchFeature, CLIPVisionConfig, PretrainedConfig, + ProcessorMixin) from vllm.attention import AttentionMetadata -from vllm.config import ModelConfig, VllmConfig -from vllm.inputs import (INPUT_REGISTRY, DecoderOnlyInputs, DummyData, - InputContext, token_inputs) +from vllm.config import VllmConfig +from vllm.inputs import InputContext from vllm.logger import init_logger -from vllm.model_executor.layers.pooler import Pooler, PoolingType from vllm.model_executor.layers.quantization import QuantizationConfig from vllm.model_executor.layers.sampler import SamplerOutput, get_sampler from vllm.model_executor.layers.vocab_parallel_embedding import ( VocabParallelEmbedding) from vllm.model_executor.models.clip import CLIPVisionModel -from vllm.model_executor.models.llama import LlamaForCausalLM -from vllm.model_executor.pooling_metadata import PoolingMetadata from vllm.model_executor.sampling_metadata import SamplingMetadata from vllm.multimodal import MULTIMODAL_REGISTRY -from vllm.multimodal.inputs import NestedTensors, PlaceholderRange -from vllm.multimodal.utils import cached_get_tokenizer, repeat_and_pad_token -from vllm.sequence import IntermediateTensors, PoolerOutput +from vllm.multimodal.inputs import NestedTensors +from vllm.multimodal.processing import (BaseMultiModalProcessor, + MultiModalDataDict, + MultiModalDataItems, ProcessorInputs, + PromptReplacement) +from vllm.sequence import IntermediateTensors from vllm.utils import is_list_of -from .clip import dummy_image_for_clip, dummy_seq_data_for_clip +from .clip import dummy_image_for_clip from .interfaces import SupportsMultiModal, SupportsPP -from .utils import (AutoWeightsLoader, WeightsMapper, flatten_bn, maybe_prefix, +from .utils import (AutoWeightsLoader, WeightsMapper, flatten_bn, + init_vllm_registered_model, maybe_prefix, merge_multimodal_embeddings) logger = init_logger(__name__) @@ -305,238 +302,104 @@ def add_image_newline(self, image_features_hd): return image_features_hd_newline -# Based on https://huggingface.co/microsoft/Phi-3-vision-128k-instruct/blob/main/image_processing_phi3_v.py#L57 -def _calc_padded_size(*, width: int, height: int, padding_unit: int = 336): - target_height = int(np.ceil(height / padding_unit) * padding_unit) - top_padding = int((target_height - height) / 2) - bottom_padding = target_height - height - top_padding - padded_width = width - padded_height = height + top_padding + bottom_padding - return padded_width, padded_height - - -# Based on https://huggingface.co/microsoft/Phi-3-vision-128k-instruct/blob/main/image_processing_phi3_v.py#L90 -def _calc_hd_transform_size(*, width: int, height: int, hd_num: int): - transposed = False - if width < height: - width, height = height, width - transposed = True - - ratio = width / height - scale = 1 - while scale * np.ceil(scale / ratio) <= hd_num: - scale += 1 - scale -= 1 - - new_width = int(scale * 336) - new_height = int(new_width / ratio) - - padded_width, padded_height = _calc_padded_size(width=new_width, - height=new_height) - - if transposed: - padded_width, padded_height = padded_height, padded_width - - return padded_width, padded_height - - -# Based on https://huggingface.co/microsoft/Phi-3-vision-128k-instruct/blob/main/image_processing_phi3_v.py#L181 -def get_phi3v_image_feature_size( - hf_config: Dict[str, Any], - *, - input_height: int, - input_width: int, - num_crops: int, -) -> int: - if num_crops is None: - num_crops = hf_config.get("num_crops", 16) - new_width, new_height = _calc_hd_transform_size(width=input_width, - height=input_height, - hd_num=num_crops) - - return (new_height // 336 * new_width // 336 + 1) * 144 + 1 \ - + (new_height // 336 + 1) * 12 - - -def get_max_phi3v_image_tokens(ctx: InputContext, - *, - num_crops: Optional[int] = None): - - return get_phi3v_image_feature_size( - ctx.get_hf_image_processor_config(), - input_height=MAX_IMAGE_FEATURE_SIZE_HEIGHT, - input_width=MAX_IMAGE_FEATURE_SIZE_WIDTH, - num_crops=num_crops, - ) - - -def dummy_data_for_phi3v(ctx: InputContext, - seq_len: int, - mm_counts: Mapping[str, int], - *, - num_crops: Optional[int] = None): - num_images = mm_counts["image"] - - image_feature_size = get_max_phi3v_image_tokens(ctx, num_crops=num_crops) +def get_max_phi3v_image_tokens(ctx: InputContext) -> int: + processor = ctx.get_hf_processor() + image_processor = processor.image_processor # type: ignore - seq_data, ranges = dummy_seq_data_for_clip( - CLIP_VIT_LARGE_PATCH14_336_CONFIG, - seq_len, - num_images, - image_token_id=_IMAGE_TOKEN_ID, - image_feature_size_override=image_feature_size, + return image_processor.calc_num_image_tokens_from_image_size( + width=MAX_IMAGE_FEATURE_SIZE_WIDTH, + height=MAX_IMAGE_FEATURE_SIZE_HEIGHT, ) - mm_data = dummy_image_for_clip( - CLIP_VIT_LARGE_PATCH14_336_CONFIG, - num_images, - image_width_override=MAX_IMAGE_FEATURE_SIZE_WIDTH, - image_height_override=MAX_IMAGE_FEATURE_SIZE_HEIGHT, - ) - - return DummyData(seq_data, mm_data, ranges) - -@lru_cache -def _get_image_placeholder_token_id_candidates( - model_config: ModelConfig, - idx: int, -) -> List[List[int]]: - assert idx > 0 - tokenizer = cached_get_tokenizer(model_config.tokenizer) +class Phi3VMultiModalProcessor(BaseMultiModalProcessor): - # This is used when the image token is at the start of the string - start_candidate = tokenizer.encode(f"<|image_{idx}|>", - add_special_tokens=False) - - # This is used when the image token is in the middle of the string - # We need to get the token for "<", not "▁<" - # https://huggingface.co/microsoft/Phi-3-vision-128k-instruct/raw/main/tokenizer.json - a_token_id, = tokenizer.encode("a", add_special_tokens=False) - a_token_id_, *middle_candidate = tokenizer.encode(f"a<|image_{idx}|>", - add_special_tokens=False) - assert a_token_id == a_token_id_ - - return [start_candidate, middle_candidate] + def _get_hf_processor( + self, + *, + num_crops: Optional[int] = None, + ) -> ProcessorMixin: + if num_crops is not None: + return self.ctx.get_hf_processor(num_crops=num_crops) + return self.ctx.get_hf_processor() + + def _apply_hf_processor( + self, + prompt: str, + mm_data: MultiModalDataDict, + mm_processor_kwargs: Mapping[str, object], + ) -> BatchFeature: + processed_outputs = super()._apply_hf_processor( + prompt, mm_data, mm_processor_kwargs) + # Phi3v processor has inserted -1, -2 etc as placeholder in prompt_ids, + # which will cause OverflowError when decoding the prompt_ids. + # Therefore, we need to do an early replacement here + token_ids = processed_outputs['input_ids'] + token_ids[token_ids < 0] = _IMAGE_TOKEN_ID + processed_outputs['input_ids'] = token_ids + return processed_outputs + + def _get_prompt_replacements( + self, + mm_items: MultiModalDataItems, + hf_inputs: BatchFeature, + mm_processor_kwargs: Mapping[str, object], + ) -> list[PromptReplacement]: + hf_processor = self._get_hf_processor() + image_tokens: list[str] = hf_processor.img_tokens # type: ignore + image_processor = hf_processor.image_processor # type: ignore + + mm_config = self.ctx.get_mm_config() + max_images = mm_config.limit_per_prompt.get("image", 1) + + def get_replacement_phi3v(item_idx: int): + image_size = mm_items.get_image_size(item_idx) + num_tokens = image_processor.calc_num_image_tokens_from_image_size( + width=image_size.width, + height=image_size.height, + ) + return [_IMAGE_TOKEN_ID] * num_tokens -def input_processor_for_phi3v(ctx: InputContext, - inputs: DecoderOnlyInputs, - *, - num_crops: Optional[int] = None): - multi_modal_data = inputs.get("multi_modal_data") - if multi_modal_data is None or "image" not in multi_modal_data: - return inputs + return [ + PromptReplacement( + modality="image", + target=image_token, + replacement=get_replacement_phi3v, + ) for image_token in image_tokens[:max_images] + ] - model_config = ctx.model_config - hf_config = ctx.get_hf_image_processor_config() + def _get_dummy_mm_inputs( + self, + mm_counts: Mapping[str, int], + ) -> ProcessorInputs: + num_images = mm_counts["image"] + + data = dummy_image_for_clip( + CLIP_VIT_LARGE_PATCH14_336_CONFIG, + num_images, + image_width_override=MAX_IMAGE_FEATURE_SIZE_WIDTH, + image_height_override=MAX_IMAGE_FEATURE_SIZE_HEIGHT, + ) - image_data = multi_modal_data["image"] - if isinstance(image_data, Image.Image): - w, h = image_data.size - image_feature_size = [ - get_phi3v_image_feature_size(hf_config, - input_width=w, - input_height=h, - num_crops=num_crops) - ] - image_data = [image_data] - elif is_list_of(image_data, Image.Image): - image_feature_size = [] - for image in image_data: - w, h = image.size - image_feature_size.append( - get_phi3v_image_feature_size(hf_config, - input_width=w, - input_height=h, - num_crops=num_crops)) - elif isinstance(image_data, torch.Tensor): - image_feature_size = [image_data.shape[0]] - image_data = [image_data] - elif is_list_of(image_data, torch.Tensor): - image_feature_size = [item.shape[0] for item in image_data] - else: - raise TypeError(f"Invalid image type: {type(image_data)}") - - prompt = inputs.get("prompt") - if prompt is None: - # for async server request, we assume prompt and its token_ids is always - # in correct format. And num_image_tags == len(image_data) always True. - image_idx = range(1, len(image_data) + 1) - new_prompt = None - else: - image_idx = sorted(map(int, re.findall(r"<\|image_(\d+)\|>+", prompt))) - if prompt.count("<|image|>") > 0: - logger.warning("Please follow the prompt format that is " - "documented on HuggingFace which does not involve " - "repeating <|image|> tokens.") - elif (num_image_tags := len(image_idx)) > 1: - assert num_image_tags == len( - image_data), "The count of image_placeholder not match image's" - new_prompt = prompt - - prompt_token_ids = inputs["prompt_token_ids"].copy() - - # masked placeholder with image token id - for idx in image_idx: - candidates = _get_image_placeholder_token_id_candidates(model_config, - idx=idx) - - for candidate in candidates: - for i in range(len(prompt_token_ids) - len(candidate) + 1): - if prompt_token_ids[i:i + len(candidate)] == candidate: - prompt_token_ids[i:i + - len(candidate)] = ([_IMAGE_TOKEN_ID] * - len(candidate)) - break - - # merge consecutive tag ids - merged_token_ids: List[int] = [] - for is_placeholder, token_ids in itertools.groupby( - prompt_token_ids, lambda x: x == _IMAGE_TOKEN_ID): - if is_placeholder: - merged_token_ids.append(_IMAGE_TOKEN_ID) - else: - merged_token_ids.extend(list(token_ids)) - - # TODO: Move this to utils or integrate with clip. - new_token_ids: List[int] = [] - placeholder_ranges: List[PlaceholderRange] = [] - placeholder_idx = 0 - while merged_token_ids: - token_id = merged_token_ids.pop(0) - if token_id == _IMAGE_TOKEN_ID: - replacement_ids = repeat_and_pad_token( - _IMAGE_TOKEN_ID, - repeat_count=image_feature_size[placeholder_idx], - ) - placeholder_ranges.append({ - "offset": len(new_token_ids), - "length": len(replacement_ids) - }) - new_token_ids.extend(replacement_ids) - placeholder_idx += 1 - else: - new_token_ids.append(token_id) + hf_processor = self._get_hf_processor() + image_tokens: list[str] = hf_processor.img_tokens # type: ignore - # NOTE: Create a defensive copy of the original inputs - return token_inputs(prompt_token_ids=new_token_ids, - prompt=new_prompt, - multi_modal_data=multi_modal_data, - multi_modal_placeholders={"image": placeholder_ranges}) + return ProcessorInputs( + prompt_text="".join(image_tokens[:num_images]), + mm_data=data, + mm_processor_kwargs={}, + ) -@MULTIMODAL_REGISTRY.register_image_input_mapper() @MULTIMODAL_REGISTRY.register_max_image_tokens(get_max_phi3v_image_tokens) -@INPUT_REGISTRY.register_dummy_data(dummy_data_for_phi3v) -@INPUT_REGISTRY.register_input_processor(input_processor_for_phi3v) +@MULTIMODAL_REGISTRY.register_processor(Phi3VMultiModalProcessor) class Phi3VForCausalLM(nn.Module, SupportsMultiModal, SupportsPP): def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): super().__init__() config = vllm_config.model_config.hf_config quant_config = vllm_config.quant_config - pooler_config = vllm_config.model_config.pooler_config multimodal_config = vllm_config.model_config.multimodal_config self.config = config self.multimodal_config = multimodal_config @@ -556,18 +419,17 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): quant_config, prefix=maybe_prefix(prefix, "model.vision_embed_tokens")) - # The prefix is empty intentionally because default prefix of - # LlamaForCausalLM is "model" - self.language_model = LlamaForCausalLM(vllm_config=vllm_config, - prefix="") - - # The same model class supports both language generation and embedding - # because the architecture name is the same - self._pooler = Pooler.from_config_with_defaults( - pooler_config, - pooling_type=PoolingType.LAST, - normalize=True, - softmax=False) + self.language_model = init_vllm_registered_model( + vllm_config=vllm_config, + # The prefix is empty intentionally because default prefix of + # LlamaForCausalLM is "model" + prefix="", + # We don't directly initialize vLLM's LlamaForCausalLM so we + # can automatically apply embedding wrapper if this model is + # initialized as an embedding model + architectures=["LlamaForCausalLM"], + ) + self.make_empty_intermediate_tensors = ( self.language_model.make_empty_intermediate_tensors) @@ -739,13 +601,6 @@ def sample( ) -> Optional[SamplerOutput]: return self.language_model.sample(logits, sampling_metadata) - def pooler( - self, - hidden_states: torch.Tensor, - pooling_metadata: PoolingMetadata, - ) -> Optional[PoolerOutput]: - return self._pooler(hidden_states, pooling_metadata) - def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]) -> Set[str]: hf_to_vllm_mapper = WeightsMapper( diff --git a/vllm/model_executor/models/pixtral.py b/vllm/model_executor/models/pixtral.py index 45171c1a04b17..f05ea195e043d 100644 --- a/vllm/model_executor/models/pixtral.py +++ b/vllm/model_executor/models/pixtral.py @@ -1,6 +1,5 @@ from dataclasses import dataclass, fields from functools import cached_property -from itertools import tee from typing import Iterable, List, Mapping, Optional, Set, Tuple, Union import numpy @@ -48,6 +47,9 @@ except ImportError: USE_XFORMERS_OPS = False +PIXTRAL_IMAGE_BREAK_ID = 12 +PIXTRAL_IMAGE_END_ID = 13 + def get_max_pixtral_image_tokens(ctx: InputContext): tokenizer = cached_get_tokenizer( @@ -68,18 +70,17 @@ def dummy_data_for_pixtral(ctx: InputContext, seq_len: int, tokenizer_mode=ctx.model_config.tokenizer_mode) mm_encoder = tokenizer.mistral.instruct_tokenizer.mm_encoder - patch_size = mm_encoder.mm_config.image_patch_size image_token_id = mm_encoder.special_ids.img - mm_config = ctx.model_config.multimodal_config + mm_config = ctx.get_mm_config() num_images = mm_config.limit_per_prompt.get("image", 1) # dummy size size = 256 image = Image.new("RGB", (size, size), color=0) - image_feature_size = (size**2) // (patch_size**2) - + encoding = tokenizer.instruct.mm_encoder(ImageChunk(image=image)) + image_feature_size = len(encoding.tokens) num_image_tokens = image_feature_size * num_images seq_data = SequenceData.from_prompt_token_counts( (image_token_id, num_image_tokens), @@ -101,14 +102,13 @@ def input_mapper_for_pixtral(ctx: InputContext, Args: ctx: Context of the loaded model. - data: data potentially containing image/image embeddings to be mapped - to pixel_values in .forward() for a visual QWenLMHeadModel model. + data: data potentially containing PIL images to be processed + and mapped to `images`. Returns: MultiModalKwargs containing the stacked normalized images tensor or image embeddings. """ - # Early exit if we have provided an image to a language only Qwen model model_config = ctx.model_config tokenizer = cached_get_tokenizer( model_config.tokenizer, tokenizer_mode=model_config.tokenizer_mode) @@ -116,35 +116,67 @@ def input_mapper_for_pixtral(ctx: InputContext, data_list = data if isinstance(data, list) else [data] images = [] + image_tokens_list = [] for image_data in data_list: image = ImageChunk(image=image_data) encoding = tokenizer.instruct.mm_encoder(image) image = torch.from_numpy(encoding.image).to(device="cuda", dtype=torch.float16) images.append(image) + image_tokens_list.append(encoding.tokens) - return MultiModalKwargs({"images": images}) + image_tokens = torch.tensor([ + token_id for image_tokens in image_tokens_list + for token_id in image_tokens + ]) + return MultiModalKwargs({"images": images, "image_tokens": image_tokens}) def input_processor_for_pixtral(ctx: InputContext, inputs: DecoderOnlyInputs): multi_modal_data = inputs.get("multi_modal_data") - if multi_modal_data is not None and "image" in multi_modal_data: - tokenizer = cached_get_tokenizer( - ctx.model_config.tokenizer, - tokenizer_mode=ctx.model_config.tokenizer_mode) - - mm_encoder = tokenizer.mistral.instruct_tokenizer.mm_encoder - image_token_id = mm_encoder.special_ids.img + if multi_modal_data is None or "image" not in multi_modal_data: + return inputs - if image_token_id not in inputs['prompt_token_ids']: - raise ValueError( - f"You've passed {inputs=} without {image_token_id=}" - " Make sure to process your input via mistral_common's" - " tokenizer or pass a chat completion request. For more" - " For more info, see: " - "https://github.com/vllm-project/vllm/issues/8411.") + prompt_token_ids = inputs.get("prompt_token_ids") + prompt = inputs.get("prompt") + tokenizer = cached_get_tokenizer( + ctx.model_config.tokenizer, + tokenizer_mode=ctx.model_config.tokenizer_mode) - return inputs + mm_encoder = tokenizer.mistral.instruct_tokenizer.mm_encoder + image_token_id = mm_encoder.special_ids.img + image_break_id = mm_encoder.special_ids.img_break + image_end_id = mm_encoder.special_ids.img_end + + if image_token_id not in inputs['prompt_token_ids']: + raise ValueError( + f"You've passed {inputs=} without {image_token_id=}" + " Make sure to process your input via mistral_common's" + " tokenizer or pass a chat completion request. For more" + " For more info, see: " + "https://github.com/vllm-project/vllm/issues/8411.") + + # Get precise tracking of placeholder positions + placeholder_ranges = [] + curr_offset = -1 + curr_length = 0 + for i in range(len(prompt_token_ids)): + if prompt_token_ids[i] in (image_token_id, image_break_id): + if curr_offset < 0: + curr_offset = i + curr_length += 1 + elif prompt_token_ids[i] == image_end_id: + curr_length += 1 + placeholder_ranges.append( + PlaceholderRange(offset=curr_offset, length=curr_length)) + curr_offset = -1 + curr_length = 0 + else: + pass + return token_inputs(prompt=prompt, + prompt_token_ids=prompt_token_ids, + multi_modal_data=multi_modal_data, + multi_modal_placeholders={"image": placeholder_ranges}) @MULTIMODAL_REGISTRY.register_image_input_mapper(input_mapper_for_pixtral) @@ -172,9 +204,10 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): # init MistralForCausalLM self.language_model = init_vllm_registered_model( - config.text_config, vllm_config=vllm_config, - prefix=maybe_prefix(prefix, "language_model")) + hf_config=config.text_config, + prefix=maybe_prefix(prefix, "language_model"), + ) self.vision_encoder = VisionTransformer(self.vision_args) self.vision_language_adapter = VisionLanguageAdapter( @@ -191,11 +224,34 @@ def sampler(self): return get_sampler() def get_multimodal_embeddings(self, **kwargs) -> Optional[NestedTensors]: - image_input = self._parse_and_validate_image_input(**kwargs) + image_input, image_tokens = self._parse_and_validate_image_input( + **kwargs) if image_input is None: return None + vision_embeddings = self._process_image_input(image_input) - return vision_embeddings + + # NOTE: We patch the outputs of the vision encoder with embeddings + # from `[IMG_BREAK]` and `[IMG_END]` tokens. + image_embeds = self.language_model.get_input_embeddings(image_tokens) + image_token_mask = image_tokens == self.vision_args.image_token_id + image_embeds[image_token_mask] = vision_embeddings + + # NOTE: Image embeddings are split into separate tensors for each image + # by the indices of `[IMG_END]` token. + split_indices = torch.where( + image_tokens == PIXTRAL_IMAGE_END_ID)[0] + 1 + if len(split_indices) <= 1: + # Do not split, return as tensor of shape [1, fs, hs] + return image_embeds.unsqueeze(0) + + # If the last split index is the last index in image_tokens, we + # ignore it to avoid empty split tensor + if split_indices[-1] == len(image_tokens): + split_indices = split_indices[:-1] + + image_embeds = image_embeds.tensor_split(split_indices.cpu()) + return image_embeds def get_input_embeddings( self, @@ -205,8 +261,10 @@ def get_input_embeddings( inputs_embeds = self.language_model.get_input_embeddings(input_ids) if multimodal_embeddings is not None: inputs_embeds = merge_multimodal_embeddings( - input_ids, inputs_embeds, multimodal_embeddings, - self.vision_args.image_token_id) + input_ids, inputs_embeds, multimodal_embeddings, [ + self.vision_args.image_token_id, PIXTRAL_IMAGE_END_ID, + PIXTRAL_IMAGE_BREAK_ID + ]) return inputs_embeds def forward( @@ -244,10 +302,11 @@ def forward( def _parse_and_validate_image_input( self, images: Optional[Union[List[List[torch.Tensor]], List[torch.Tensor], - torch.Tensor]] = None + torch.Tensor]] = None, + image_tokens: Optional[torch.Tensor] = None, ) -> Optional[List[torch.Tensor]]: if images is None: - return None + return None, None if isinstance(images, torch.Tensor): # if passed as batch take all images @@ -266,7 +325,16 @@ def _parse_and_validate_image_input( images = flatten_images - return images + if isinstance(image_tokens, torch.Tensor): + # image_tokens are batched + image_tokens = image_tokens.flatten() + elif isinstance(image_tokens, list): + # image_tokens are of different lengths thus passed as a list + image_tokens = torch.cat(image_tokens) + + assert image_tokens.dim() == 1 + + return images, image_tokens def _process_image_input(self, image_input: List[torch.Tensor]) -> torch.Tensor: @@ -295,38 +363,33 @@ def is_vision_encoder_weights(weight: Tuple[str, torch.Tensor]): def is_vision_lang_adapter_weights(weight: Tuple[str, torch.Tensor]): return weight[0].startswith("vision_language_adapter") - def is_vision_weights(weight: Tuple[str, torch.Tensor]): - return is_vision_encoder_weights( - weight) or is_vision_lang_adapter_weights(weight) - - llm_weights, vision_encoder_weights, vision_lang_adapter_weights = tee( - weights, 3) - - # llm - llm_weights = filter(lambda x: not is_vision_weights(x), llm_weights) - self.language_model.load_weights(llm_weights) - - # vision encoder - vision_encoder_weights = filter(is_vision_encoder_weights, - vision_encoder_weights) + # Get references to parameters for direct loading vision_encoder_dict = dict(self.vision_encoder.named_parameters()) - for name, loaded_weight in vision_encoder_weights: - # cut 'vision_encoder.' - name = '.'.join(name.split(".")[1:]) - param = vision_encoder_dict[name] - - default_weight_loader(param, loaded_weight) - - # adapter - vision_lang_adapter_weights = filter(is_vision_lang_adapter_weights, - vision_lang_adapter_weights) - vision_lang_adpter_dict = dict( + vision_lang_adapter_dict = dict( self.vision_language_adapter.named_parameters()) - for name, loaded_weight in vision_lang_adapter_weights: - # cut 'vision_language_adapter.' - name = '.'.join(name.split(".")[1:]) - param = vision_lang_adpter_dict[name] - default_weight_loader(param, loaded_weight) + + def llm_weights_generator(): + # Single pass over weights + for name, w in weights: + if is_vision_encoder_weights((name, w)): + # Load vision encoder weights directly + trimmed_name = '.'.join(name.split(".")[1:]) + param = vision_encoder_dict[trimmed_name] + with torch.no_grad(): + default_weight_loader(param, w) + elif is_vision_lang_adapter_weights((name, w)): + # Load vision-language adapter weights directly + trimmed_name = '.'.join(name.split(".")[1:]) + param = vision_lang_adapter_dict[trimmed_name] + with torch.no_grad(): + default_weight_loader(param, w) + else: + # LLM weights: yield them to be loaded + # by language_model.load_weights + yield (name, w) + + # Now we call the language model load with the generator + self.language_model.load_weights(llm_weights_generator()) # Vision encoder diff --git a/vllm/model_executor/models/qwen2.py b/vllm/model_executor/models/qwen2.py index 9f706610a129a..3ce4eb5869f21 100644 --- a/vllm/model_executor/models/qwen2.py +++ b/vllm/model_executor/models/qwen2.py @@ -31,6 +31,7 @@ from vllm.compilation.decorators import support_torch_compile from vllm.config import CacheConfig, VllmConfig from vllm.distributed import get_pp_group, get_tensor_model_parallel_world_size +from vllm.logger import init_logger from vllm.model_executor.layers.activation import SiluAndMul from vllm.model_executor.layers.layernorm import RMSNorm from vllm.model_executor.layers.linear import (MergedColumnParallelLinear, @@ -55,6 +56,8 @@ make_empty_intermediate_tensors_factory, make_layers, maybe_prefix) +logger = init_logger(__name__) + class Qwen2MLP(nn.Module): @@ -433,7 +436,6 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): config = vllm_config.model_config.hf_config quant_config = vllm_config.quant_config lora_config = vllm_config.lora_config - pooler_config = vllm_config.model_config.pooler_config self.config = config self.lora_config = lora_config @@ -442,26 +444,21 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): self.model = Qwen2Model(vllm_config=vllm_config, prefix=maybe_prefix(prefix, "model")) - if config.tie_word_embeddings: - self.lm_head = self.model.embed_tokens + if get_pp_group().is_last_rank: + if config.tie_word_embeddings: + self.lm_head = self.model.embed_tokens + else: + self.lm_head = ParallelLMHead(config.vocab_size, + config.hidden_size, + quant_config=quant_config, + prefix=maybe_prefix( + prefix, "lm_head")) else: - self.lm_head = ParallelLMHead(config.vocab_size, - config.hidden_size, - quant_config=quant_config, - prefix=maybe_prefix( - prefix, "lm_head")) + self.lm_head = PPMissingLayer() self.logits_processor = LogitsProcessor(config.vocab_size) self.sampler = get_sampler() - # The same model class supports both language generation and embedding - # because the architecture name is the same - self._pooler = Pooler.from_config_with_defaults( - pooler_config, - pooling_type=PoolingType.LAST, - normalize=True, - softmax=False) - self.make_empty_intermediate_tensors = ( self.model.make_empty_intermediate_tensors) @@ -499,13 +496,6 @@ def sample( next_tokens = self.sampler(logits, sampling_metadata) return next_tokens - def pooler( - self, - hidden_states: torch.Tensor, - pooling_metadata: PoolingMetadata, - ) -> Optional[PoolerOutput]: - return self._pooler(hidden_states, pooling_metadata) - def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]) -> Set[str]: loader = AutoWeightsLoader( @@ -553,6 +543,15 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): self.model = Qwen2Model(vllm_config=vllm_config, prefix=maybe_prefix(prefix, "model")) + # TODO: Replace this model class with for_embedding(Qwen2ForCausalLM), + # after changing the default pooling method + if pooler_config.pooling_type is None: + logger.warning( + "This embedding model will default to last-token pooling in " + "an upcoming version. To avoid breaking changes, you should " + "pass `--override-pooler-config '{\"pooling_type\": \"MEAN\"}'`" + " explicitly.") + self._pooler = Pooler.from_config_with_defaults( pooler_config, pooling_type=PoolingType.MEAN, @@ -580,4 +579,6 @@ def pooler( def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]): hf_to_vllm_mapper = WeightsMapper(orig_to_new_prefix={"model.": ""}) weights = hf_to_vllm_mapper.apply(weights) + weights = ((name, data) for name, data in weights + if not name.startswith("lm_head.")) self.model.load_weights(weights) diff --git a/vllm/model_executor/models/qwen2_audio.py b/vllm/model_executor/models/qwen2_audio.py index a0605fee82aca..48a2d470414b9 100644 --- a/vllm/model_executor/models/qwen2_audio.py +++ b/vllm/model_executor/models/qwen2_audio.py @@ -19,7 +19,7 @@ # See the License for the specific language governing permissions and # limitations under the License. """Inference-only Qwen2-Audio model compatible with HuggingFace weights.""" -from functools import lru_cache +from functools import cached_property, lru_cache from typing import (Iterable, List, Mapping, Optional, Set, Tuple, TypedDict, Union) @@ -34,12 +34,7 @@ from vllm.inputs import (INPUT_REGISTRY, DecoderOnlyInputs, DummyData, InputContext, token_inputs) from vllm.logger import init_logger -from vllm.model_executor.layers.logits_processor import LogitsProcessor from vllm.model_executor.layers.sampler import SamplerOutput, get_sampler -from vllm.model_executor.layers.vocab_parallel_embedding import ParallelLMHead -from vllm.model_executor.model_loader.weight_utils import ( - default_weight_loader, maybe_remap_kv_scale_name) -from vllm.model_executor.models.qwen2 import Qwen2Model from vllm.model_executor.sampling_metadata import SamplingMetadata from vllm.multimodal import MULTIMODAL_REGISTRY, MultiModalKwargs from vllm.multimodal.inputs import NestedTensors @@ -47,15 +42,11 @@ from vllm.sequence import IntermediateTensors, SequenceData from .interfaces import SupportsMultiModal, SupportsPP -from .utils import merge_multimodal_embeddings +from .utils import (AutoWeightsLoader, init_vllm_registered_model, + maybe_prefix, merge_multimodal_embeddings) logger = init_logger(__name__) -_KEYS_TO_MODIFY_MAPPING = { - "language_model.lm_head": "lm_head", - "language_model.model": "language_model", -} - # # === Audio Inputs === # class Qwen2AudioInputs(TypedDict): @@ -281,25 +272,23 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): self.quant_config = quant_config - self.language_model = Qwen2Model( - vllm_config=vllm_config.with_hf_config(config.text_config), - prefix=prefix) - self.unpadded_vocab_size = config.text_config.vocab_size - if config.text_config.tie_word_embeddings: - self.lm_head = self.language_model.embed_tokens - else: - self.lm_head = ParallelLMHead(config.text_config.vocab_size, - config.text_config.hidden_size, - quant_config=quant_config) - logit_scale = getattr(config, "logit_scale", 1.0) - self.logits_processor = LogitsProcessor(self.unpadded_vocab_size, - config.text_config.vocab_size, - logit_scale) - self.sampler = get_sampler() + self.language_model = init_vllm_registered_model( + vllm_config=vllm_config, + hf_config=config.text_config, + prefix=maybe_prefix(prefix, "language_model"), + architectures=["Qwen2ForCausalLM"], + ) self.make_empty_intermediate_tensors = ( self.language_model.make_empty_intermediate_tensors) + @cached_property + def sampler(self): + if hasattr(self.language_model, "sampler"): + return self.language_model.sampler + + return get_sampler() + def _validate_and_reshape_mm_tensor(self, mm_input: Union[torch.Tensor, List[torch.Tensor]], @@ -414,72 +403,30 @@ def forward( multimodal_embeddings) input_ids = None - hidden_states = self.language_model(input_ids, - positions, - kv_caches, - attn_metadata, - intermediate_tensors, - inputs_embeds=inputs_embeds) + hidden_states = self.language_model.model(input_ids, + positions, + kv_caches, + attn_metadata, + intermediate_tensors, + inputs_embeds=inputs_embeds) return hidden_states - def compute_logits(self, hidden_states: torch.Tensor, - sampling_metadata: SamplingMetadata) -> torch.Tensor: - logits = self.logits_processor(self.lm_head, hidden_states, - sampling_metadata) - return logits + def compute_logits( + self, + hidden_states: torch.Tensor, + sampling_metadata: SamplingMetadata, + ) -> Optional[torch.Tensor]: + return self.language_model.compute_logits(hidden_states, + sampling_metadata) def sample( self, logits: torch.Tensor, sampling_metadata: SamplingMetadata, ) -> Optional[SamplerOutput]: - next_tokens = self.sampler(logits, sampling_metadata) - return next_tokens + return self.language_model.sample(logits, sampling_metadata) def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]) -> Set[str]: - stacked_params_mapping = [ - # (param_name, shard_name, shard_id) - ("qkv_proj", "q_proj", "q"), - ("qkv_proj", "k_proj", "k"), - ("qkv_proj", "v_proj", "v"), - ("gate_up_proj", "gate_proj", 0), - ("gate_up_proj", "up_proj", 1), - ] - params_dict = dict(self.named_parameters(remove_duplicate=False)) - loaded_params: Set[str] = set() - for name, loaded_weight in weights: - if "rotary_emb.inv_freq" in name: - continue - if (self.config.text_config.tie_word_embeddings - and "lm_head.weight" in name): - continue - for key_to_modify, new_key in _KEYS_TO_MODIFY_MAPPING.items(): - if key_to_modify in name: - name = name.replace(key_to_modify, new_key) - for (param_name, weight_name, shard_id) in stacked_params_mapping: - if weight_name not in name or 'audio' in name: - continue - name = name.replace(weight_name, param_name) - # Skip loading extra bias for GPTQ models. - if name.endswith(".bias") and name not in params_dict: - continue - param = params_dict[name] - weight_loader = param.weight_loader - weight_loader(param, loaded_weight, shard_id) - break - else: - # Skip loading extra bias for GPTQ models. - if name.endswith(".bias") and name not in params_dict: - continue - # Remapping the name of FP8 kv-scale. - name = maybe_remap_kv_scale_name(name, params_dict) - if name is None: - continue - - param = params_dict[name] - weight_loader = getattr(param, "weight_loader", - default_weight_loader) - weight_loader(param, loaded_weight) - loaded_params.add(name) - return loaded_params + loader = AutoWeightsLoader(self) + return loader.load_weights(weights) diff --git a/vllm/model_executor/models/qwen2_vl.py b/vllm/model_executor/models/qwen2_vl.py index 7956a98b21569..cfc90cdab01e4 100644 --- a/vllm/model_executor/models/qwen2_vl.py +++ b/vllm/model_executor/models/qwen2_vl.py @@ -21,7 +21,7 @@ # See the License for the specific language governing permissions and # limitations under the License. """Inference-only Qwen2-VL model compatible with HuggingFace weights.""" -from functools import partial +from functools import cached_property, partial from typing import (Any, Callable, Dict, Iterable, List, Literal, Mapping, Optional, Set, Tuple, Type, TypedDict, Union) @@ -40,7 +40,7 @@ from vllm.attention import AttentionMetadata from vllm.config import VllmConfig -from vllm.distributed import get_pp_group, parallel_state +from vllm.distributed import parallel_state from vllm.distributed import utils as dist_utils from vllm.inputs import (INPUT_REGISTRY, DecoderOnlyInputs, DummyData, InputContext, token_inputs) @@ -49,31 +49,25 @@ from vllm.model_executor.layers.activation import QuickGELU from vllm.model_executor.layers.linear import (ColumnParallelLinear, RowParallelLinear) -from vllm.model_executor.layers.logits_processor import LogitsProcessor -from vllm.model_executor.layers.pooler import Pooler, PoolingType from vllm.model_executor.layers.quantization import QuantizationConfig from vllm.model_executor.layers.quantization.gptq import GPTQConfig from vllm.model_executor.layers.quantization.gptq_marlin import ( GPTQMarlinConfig) from vllm.model_executor.layers.sampler import SamplerOutput, get_sampler -from vllm.model_executor.layers.vocab_parallel_embedding import ParallelLMHead from vllm.model_executor.model_loader.weight_utils import default_weight_loader -from vllm.model_executor.models.qwen2 import Qwen2Model -from vllm.model_executor.pooling_metadata import PoolingMetadata from vllm.multimodal import MULTIMODAL_REGISTRY from vllm.multimodal.image import cached_get_image_processor from vllm.multimodal.inputs import (MultiModalData, MultiModalDataDict, MultiModalKwargs, NestedTensors) from vllm.multimodal.utils import cached_get_tokenizer from vllm.platforms import _Backend -from vllm.sequence import IntermediateTensors, PoolerOutput, SequenceData +from vllm.sequence import IntermediateTensors, SequenceData from vllm.transformers_utils.config import uses_mrope from vllm.transformers_utils.processor import cached_get_processor from .interfaces import SupportsLoRA, SupportsMultiModal, SupportsPP -from .utils import (PPMissingLayer, get_vit_attn_backend, - is_pp_missing_parameter, - make_empty_intermediate_tensors_factory, maybe_prefix) +from .utils import (AutoWeightsLoader, WeightsMapper, get_vit_attn_backend, + init_vllm_registered_model, maybe_prefix) logger = init_logger(__name__) @@ -508,6 +502,8 @@ def __init__( mlp_ratio: float = vision_config.mlp_ratio self.spatial_merge_size = spatial_merge_size + self.num_heads = num_heads + self.embed_dim = embed_dim self.patch_embed = Qwen2VisionPatchEmbed( patch_size=patch_size, @@ -597,6 +593,53 @@ def forward( x = self.merger(x) return x + def load_weights(self, weights: Iterable[Tuple[str, + torch.Tensor]]) -> Set[str]: + stacked_params_mapping = [ + # (param_name, shard_name, shard_id) + ("qkv_proj", "q_proj", "q"), + ("qkv_proj", "k_proj", "k"), + ("qkv_proj", "v_proj", "v"), + ] + params_dict = dict(self.named_parameters(remove_duplicate=False)) + loaded_params: Set[str] = set() + + for name, loaded_weight in weights: + for (param_name, weight_name, shard_id) in stacked_params_mapping: + if weight_name not in name: + continue + name = name.replace(weight_name, param_name) + + param = params_dict[name] + weight_loader = param.weight_loader + weight_loader(param, loaded_weight, shard_id) + break + else: + if name.endswith("qkv.weight"): + visual_num_heads = self.num_heads + visual_embed_dim = self.embed_dim + head_size = visual_embed_dim // visual_num_heads + loaded_weight = loaded_weight.view(3, visual_num_heads, + head_size, + visual_embed_dim) + loaded_weight = loaded_weight.transpose(0, 1) + loaded_weight = loaded_weight.reshape(-1, visual_embed_dim) + elif name.endswith("qkv.bias"): + visual_num_heads = self.num_heads + visual_embed_dim = self.embed_dim + head_size = visual_embed_dim // visual_num_heads + loaded_weight = loaded_weight.view(3, visual_num_heads, + head_size) + loaded_weight = loaded_weight.transpose(0, 1) + loaded_weight = loaded_weight.reshape(-1) + + param = params_dict[name] + weight_loader = getattr(param, "weight_loader", + default_weight_loader) + weight_loader(param, loaded_weight) + loaded_params.add(name) + return loaded_params + # === Vision input helpers === # @@ -1070,7 +1113,6 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): config = vllm_config.model_config.hf_config cache_config = vllm_config.cache_config quant_config = vllm_config.quant_config - pooler_config = vllm_config.model_config.pooler_config multimodal_config = vllm_config.model_config.multimodal_config assert not cache_config.enable_prefix_caching, \ "Qwen2-VL currently does not support prefix caching" @@ -1085,31 +1127,21 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): prefix=maybe_prefix(prefix, "visual"), ) - self.model = Qwen2Model(vllm_config=vllm_config, - prefix=maybe_prefix(prefix, "model")) + self.language_model = init_vllm_registered_model( + vllm_config=vllm_config, + prefix=maybe_prefix(prefix, "language_model"), + architectures=["Qwen2ForCausalLM"], + ) - if get_pp_group().is_last_rank: - if config.tie_word_embeddings: - self.lm_head = self.model.embed_tokens - else: - self.lm_head = ParallelLMHead(config.vocab_size, - config.hidden_size, - quant_config=quant_config, - prefix=maybe_prefix( - prefix, "lm_head")) - else: - self.lm_head = PPMissingLayer() - - self.logits_processor = LogitsProcessor(config.vocab_size) - self.sampler = get_sampler() - self._pooler = Pooler.from_config_with_defaults( - pooler_config, - pooling_type=PoolingType.LAST, - normalize=True, - softmax=False) self.make_empty_intermediate_tensors = ( - make_empty_intermediate_tensors_factory( - ["hidden_states", "residual"], config.hidden_size)) + self.language_model.make_empty_intermediate_tensors) + + @cached_property + def sampler(self): + if hasattr(self.language_model, "sampler"): + return self.language_model.sampler + + return get_sampler() def _maybe_ignore_quant_config(self, quant_config: QuantizationConfig): # GPTQ configs do not have a list of ignored modules, however AutoGPTQ @@ -1268,7 +1300,7 @@ def get_input_embeddings( multimodal_embeddings: Optional[List[Tuple[NestedTensors, str]]] = None, ) -> torch.Tensor: - inputs_embeds = self.model.get_input_embeddings(input_ids) + inputs_embeds = self.language_model.get_input_embeddings(input_ids) if multimodal_embeddings is not None: for embeddings, modality in multimodal_embeddings: if modality == "image": @@ -1337,7 +1369,7 @@ def forward( multimodal_embeddings) input_ids = None - hidden_states = self.model( + hidden_states = self.language_model.model( input_ids=input_ids, positions=positions, kv_caches=kv_caches, @@ -1347,87 +1379,28 @@ def forward( ) return hidden_states - def compute_logits(self, hidden_states: torch.Tensor, - sampling_metadata: SamplingMetadata) -> torch.Tensor: - logits = self.logits_processor(self.lm_head, hidden_states, - sampling_metadata) - return logits + def compute_logits( + self, + hidden_states: torch.Tensor, + sampling_metadata: SamplingMetadata, + ) -> Optional[torch.Tensor]: + return self.language_model.compute_logits(hidden_states, + sampling_metadata) def sample( self, logits: torch.Tensor, sampling_metadata: SamplingMetadata, ) -> Optional[SamplerOutput]: - next_tokens = self.sampler(logits, sampling_metadata) - return next_tokens - - def pooler( - self, - hidden_states: torch.Tensor, - pooling_metadata: PoolingMetadata, - ) -> Optional[PoolerOutput]: - return self._pooler(hidden_states, pooling_metadata) + return self.language_model.sample(logits, sampling_metadata) def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]) -> Set[str]: - stacked_params_mapping = [ - # (param_name, shard_name, shard_id) - ("qkv_proj", "q_proj", "q"), - ("qkv_proj", "k_proj", "k"), - ("qkv_proj", "v_proj", "v"), - ("gate_up_proj", "up_proj", 1), - ("gate_up_proj", "gate_proj", 0), - ] - params_dict = dict(self.named_parameters(remove_duplicate=False)) - loaded_params: Set[str] = set() - for name, loaded_weight in weights: - if "rotary_emb.inv_freq" in name: - continue - if self.config.tie_word_embeddings and "lm_head.weight" in name: - continue - for (param_name, weight_name, shard_id) in stacked_params_mapping: - if weight_name not in name: - continue - name = name.replace(weight_name, param_name) - # Skip loading extra bias for GPTQ models. - if name.endswith(".bias") and name not in params_dict: - continue - if is_pp_missing_parameter(name, self): - continue - param = params_dict[name] - weight_loader = param.weight_loader - weight_loader(param, loaded_weight, shard_id) - break - else: - if "visual" in name and name.endswith("qkv.weight"): - visual_num_heads = self.config.vision_config.num_heads - visual_embed_dim = self.config.vision_config.embed_dim - head_size = visual_embed_dim // visual_num_heads - loaded_weight = loaded_weight.view(3, visual_num_heads, - head_size, - visual_embed_dim) - loaded_weight = loaded_weight.transpose(0, 1) - loaded_weight = loaded_weight.reshape(-1, visual_embed_dim) - elif "visual" in name and name.endswith("qkv.bias"): - visual_num_heads = self.config.vision_config.num_heads - visual_embed_dim = self.config.vision_config.embed_dim - head_size = visual_embed_dim // visual_num_heads - loaded_weight = loaded_weight.view(3, visual_num_heads, - head_size) - loaded_weight = loaded_weight.transpose(0, 1) - loaded_weight = loaded_weight.reshape(-1) - try: - # Skip loading extra bias for GPTQ models. - if name.endswith(".bias") and name not in params_dict: - continue - if is_pp_missing_parameter(name, self): - continue - param = params_dict[name] - except KeyError: - raise ValueError(f"Unexpected weight: {name}") from None - - weight_loader = getattr(param, "weight_loader", - default_weight_loader) - weight_loader(param, loaded_weight) - loaded_params.add(name) - return loaded_params + hf_to_vllm_mapper = WeightsMapper( + orig_to_new_prefix={ + "lm_head.": "language_model.lm_head.", + "model.": "language_model.model.", + }) + + loader = AutoWeightsLoader(self) + return loader.load_weights(weights, mapper=hf_to_vllm_mapper) diff --git a/vllm/model_executor/models/registry.py b/vllm/model_executor/models/registry.py index 4462f6ed55a9c..c16ca99526fa9 100644 --- a/vllm/model_executor/models/registry.py +++ b/vllm/model_executor/models/registry.py @@ -20,10 +20,11 @@ from vllm.logger import init_logger from vllm.platforms import current_platform -from .interfaces import (has_inner_state, is_attention_free, +from .adapters import as_embedding_model +from .interfaces import (has_inner_state, is_attention_free, is_hybrid, supports_cross_encoding, supports_multimodal, supports_pp) -from .interfaces_base import is_embedding_model, is_text_generation_model +from .interfaces_base import is_pooling_model, is_text_generation_model logger = init_logger(__name__) @@ -40,6 +41,7 @@ "BloomForCausalLM": ("bloom", "BloomForCausalLM"), # ChatGLMModel supports multimodal "CohereForCausalLM": ("commandr", "CohereForCausalLM"), + "Cohere2ForCausalLM": ("commandr", "CohereForCausalLM"), "DbrxForCausalLM": ("dbrx", "DbrxForCausalLM"), "DeciLMForCausalLM": ("decilm", "DeciLMForCausalLM"), "DeepseekForCausalLM": ("deepseek", "DeepseekForCausalLM"), @@ -48,12 +50,14 @@ "FalconForCausalLM": ("falcon", "FalconForCausalLM"), "GemmaForCausalLM": ("gemma", "GemmaForCausalLM"), "Gemma2ForCausalLM": ("gemma2", "Gemma2ForCausalLM"), + "GlmForCausalLM": ("glm", "GlmForCausalLM"), "GPT2LMHeadModel": ("gpt2", "GPT2LMHeadModel"), "GPTBigCodeForCausalLM": ("gpt_bigcode", "GPTBigCodeForCausalLM"), "GPTJForCausalLM": ("gpt_j", "GPTJForCausalLM"), "GPTNeoXForCausalLM": ("gpt_neox", "GPTNeoXForCausalLM"), "GraniteForCausalLM": ("granite", "GraniteForCausalLM"), "GraniteMoeForCausalLM": ("granitemoe", "GraniteMoeForCausalLM"), + "GritLM": ("gritlm", "GritLM"), "InternLMForCausalLM": ("llama", "LlamaForCausalLM"), "InternLM2ForCausalLM": ("internlm2", "InternLM2ForCausalLM"), "InternLM2VEForCausalLM": ("internlm2_ve", "InternLM2VEForCausalLM"), @@ -92,7 +96,7 @@ "Starcoder2ForCausalLM": ("starcoder2", "Starcoder2ForCausalLM"), "SolarForCausalLM": ("solar", "SolarForCausalLM"), "TeleChat2ForCausalLM": ("telechat2", "TeleChat2ForCausalLM"), - "XverseForCausalLM": ("xverse", "XverseForCausalLM"), + "XverseForCausalLM": ("llama", "LlamaForCausalLM"), # [Encoder-decoder] "BartModel": ("bart", "BartForConditionalGeneration"), "BartForConditionalGeneration": ("bart", "BartForConditionalGeneration"), @@ -106,14 +110,16 @@ "RobertaForMaskedLM": ("roberta", "RobertaEmbeddingModel"), "XLMRobertaModel": ("roberta", "RobertaEmbeddingModel"), "DeciLMForCausalLM": ("decilm", "DeciLMForCausalLM"), - "Gemma2Model": ("gemma2", "Gemma2EmbeddingModel"), - "LlamaModel": ("llama", "LlamaEmbeddingModel"), + "Gemma2Model": ("gemma2", "Gemma2ForCausalLM"), + "GlmForCausalLM": ("glm", "GlmForCausalLM"), + "GritLM": ("gritlm", "GritLM"), + "LlamaModel": ("llama", "LlamaForCausalLM"), **{ # Multiple models share the same architecture, so we include them all k: (mod, arch) for k, (mod, arch) in _TEXT_GENERATION_MODELS.items() if arch == "LlamaForCausalLM" }, - "MistralModel": ("llama", "LlamaEmbeddingModel"), + "MistralModel": ("llama", "LlamaForCausalLM"), "Phi3ForCausalLM": ("phi3", "Phi3ForCausalLM"), "Qwen2Model": ("qwen2", "Qwen2EmbeddingModel"), "Qwen2ForCausalLM": ("qwen2", "Qwen2ForCausalLM"), @@ -123,7 +129,7 @@ # [Multimodal] "LlavaNextForConditionalGeneration": ("llava_next", "LlavaNextForConditionalGeneration"), # noqa: E501 "Phi3VForCausalLM": ("phi3v", "Phi3VForCausalLM"), - "Qwen2VLForConditionalGeneration": ("qwen2_vl", "Qwen2VLForConditionalGeneration") # noqa: E501, + "Qwen2VLForConditionalGeneration": ("qwen2_vl", "Qwen2VLForConditionalGeneration"), # noqa: E501 } _CROSS_ENCODER_MODELS = { @@ -149,6 +155,7 @@ "LlavaNextForConditionalGeneration": ("llava_next", "LlavaNextForConditionalGeneration"), # noqa: E501 "LlavaNextVideoForConditionalGeneration": ("llava_next_video", "LlavaNextVideoForConditionalGeneration"), # noqa: E501 "LlavaOnevisionForConditionalGeneration": ("llava_onevision", "LlavaOnevisionForConditionalGeneration"), # noqa: E501 + "MantisForConditionalGeneration": ("llava", "MantisForConditionalGeneration"), # noqa: E501 "MiniCPMV": ("minicpmv", "MiniCPMV"), "MolmoForCausalLM": ("molmo", "MolmoForCausalLM"), "NVLM_D": ("nvlm_d", "NVLM_D_Model"), @@ -176,6 +183,7 @@ **_CROSS_ENCODER_MODELS, **_MULTIMODAL_MODELS, **_SPECULATIVE_DECODING_MODELS, + "TransformersModel": ("transformers", "TransformersModel"), } # Models not supported by ROCm. @@ -206,24 +214,37 @@ @dataclass(frozen=True) class _ModelInfo: + architecture: str is_text_generation_model: bool - is_embedding_model: bool + is_pooling_model: bool supports_cross_encoding: bool supports_multimodal: bool supports_pp: bool has_inner_state: bool is_attention_free: bool + is_hybrid: bool @staticmethod def from_model_cls(model: Type[nn.Module]) -> "_ModelInfo": + is_pooling_model_ = is_pooling_model(model) + if not is_pooling_model_: + try: + as_embedding_model(model) + except Exception: + pass + else: + is_pooling_model_ = True + return _ModelInfo( + architecture=model.__name__, is_text_generation_model=is_text_generation_model(model), - is_embedding_model=is_embedding_model(model), + is_pooling_model=is_pooling_model_, supports_cross_encoding=supports_cross_encoding(model), supports_multimodal=supports_multimodal(model), supports_pp=supports_pp(model), has_inner_state=has_inner_state(model), is_attention_free=is_attention_free(model), + is_hybrid=is_hybrid(model), ) @@ -397,13 +418,13 @@ def _normalize_archs( def inspect_model_cls( self, architectures: Union[str, List[str]], - ) -> _ModelInfo: + ) -> Tuple[_ModelInfo, str]: architectures = self._normalize_archs(architectures) for arch in architectures: model_info = self._try_inspect_model_cls(arch) if model_info is not None: - return model_info + return (model_info, arch) return self._raise_for_unsupported(architectures) @@ -424,39 +445,57 @@ def is_text_generation_model( self, architectures: Union[str, List[str]], ) -> bool: - return self.inspect_model_cls(architectures).is_text_generation_model + model_cls, _ = self.inspect_model_cls(architectures) + return model_cls.is_text_generation_model - def is_embedding_model( + def is_pooling_model( self, architectures: Union[str, List[str]], ) -> bool: - return self.inspect_model_cls(architectures).is_embedding_model + model_cls, _ = self.inspect_model_cls(architectures) + return model_cls.is_pooling_model def is_cross_encoder_model( self, architectures: Union[str, List[str]], ) -> bool: - return self.inspect_model_cls(architectures).supports_cross_encoding + model_cls, _ = self.inspect_model_cls(architectures) + return model_cls.supports_cross_encoding def is_multimodal_model( self, architectures: Union[str, List[str]], ) -> bool: - return self.inspect_model_cls(architectures).supports_multimodal + model_cls, _ = self.inspect_model_cls(architectures) + return model_cls.supports_multimodal def is_pp_supported_model( self, architectures: Union[str, List[str]], ) -> bool: - return self.inspect_model_cls(architectures).supports_pp + model_cls, _ = self.inspect_model_cls(architectures) + return model_cls.supports_pp + + def model_has_inner_state( + self, + architectures: Union[str, List[str]], + ) -> bool: + model_cls, _ = self.inspect_model_cls(architectures) + return model_cls.has_inner_state - def model_has_inner_state(self, architectures: Union[str, - List[str]]) -> bool: - return self.inspect_model_cls(architectures).has_inner_state + def is_attention_free_model( + self, + architectures: Union[str, List[str]], + ) -> bool: + model_cls, _ = self.inspect_model_cls(architectures) + return model_cls.is_attention_free - def is_attention_free_model(self, architectures: Union[str, - List[str]]) -> bool: - return self.inspect_model_cls(architectures).is_attention_free + def is_hybrid_model( + self, + architectures: Union[str, List[str]], + ) -> bool: + model_cls, _ = self.inspect_model_cls(architectures) + return model_cls.is_hybrid ModelRegistry = _ModelRegistry({ diff --git a/vllm/model_executor/models/siglip.py b/vllm/model_executor/models/siglip.py index deaed0ba7e4ce..6fb9e2cc4584f 100644 --- a/vllm/model_executor/models/siglip.py +++ b/vllm/model_executor/models/siglip.py @@ -6,12 +6,11 @@ import numpy as np import torch -import torch.nn.functional as F from PIL import Image from torch import nn from transformers import SiglipVisionConfig -from vllm.attention.selector import _Backend +from vllm.attention.layer import MultiHeadAttention from vllm.config import ModelConfig from vllm.distributed import divide, get_tensor_model_parallel_world_size from vllm.inputs import DecoderOnlyInputs, token_inputs @@ -29,8 +28,6 @@ resolve_visual_encoder_outputs) from vllm.sequence import SequenceData -from .utils import get_vit_attn_backend - def get_siglip_patch_grid_length(*, image_size: int, patch_size: int) -> int: # Since interpolation is applied, the image size need not be divisible @@ -291,52 +288,18 @@ def __init__( self.tp_size = get_tensor_model_parallel_world_size() self.num_heads_per_partition = divide(self.num_heads, self.tp_size) - self.attn_backend = get_vit_attn_backend(support_fa=False) - if self.attn_backend not in {_Backend.TORCH_SDPA, _Backend.XFORMERS}: - raise RuntimeError( - f"SIGLIP does not support {self.attn_backend} backend now.") + self.attn = MultiHeadAttention(self.num_heads_per_partition, + self.head_dim, self.scale) def forward( self, hidden_states: torch.Tensor, ) -> torch.Tensor: """Input shape: Batch x Time x Channel""" - batch_size, q_len, _ = hidden_states.size() - qkv_states, _ = self.qkv_proj(hidden_states) query_states, key_states, value_states = qkv_states.chunk(3, dim=-1) - query_states = query_states.view(batch_size, q_len, - self.num_heads_per_partition, - self.head_dim) - key_states = key_states.view(batch_size, q_len, - self.num_heads_per_partition, - self.head_dim) - value_states = value_states.view(batch_size, q_len, - self.num_heads_per_partition, - self.head_dim) - - if self.attn_backend == _Backend.XFORMERS: - from xformers import ops as xops - - out = xops.memory_efficient_attention_forward(query_states, - key_states, - value_states, - p=self.dropout, - scale=self.scale) - elif self.attn_backend == _Backend.TORCH_SDPA: - query_states, key_states, value_states = (x.transpose(1, 2) - for x in (query_states, - key_states, - value_states)) - out = F.scaled_dot_product_attention(query_states, - key_states, - value_states, - dropout_p=self.dropout, - scale=self.scale) - out = out.transpose(1, 2) - - out = out.view(batch_size, q_len, -1) + out = self.attn(query_states, key_states, value_states) attn_output, _ = self.out_proj(out) return attn_output, None diff --git a/vllm/model_executor/models/solar.py b/vllm/model_executor/models/solar.py index f58710d215056..caae0b65d7d10 100644 --- a/vllm/model_executor/models/solar.py +++ b/vllm/model_executor/models/solar.py @@ -443,10 +443,11 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): self.logits_processor = LogitsProcessor(self.unpadded_vocab_size, config.vocab_size, logit_scale) - self.sampler = get_sampler() else: self.lm_head = PPMissingLayer() + self.sampler = get_sampler() + self.make_empty_intermediate_tensors = ( self.model.make_empty_intermediate_tensors) diff --git a/vllm/model_executor/models/starcoder2.py b/vllm/model_executor/models/starcoder2.py index 15e8f2af52cda..22189a517d313 100644 --- a/vllm/model_executor/models/starcoder2.py +++ b/vllm/model_executor/models/starcoder2.py @@ -37,7 +37,8 @@ from vllm.model_executor.layers.sampler import SamplerOutput, get_sampler from vllm.model_executor.layers.vocab_parallel_embedding import ( DEFAULT_VOCAB_PADDING_SIZE, ParallelLMHead, VocabParallelEmbedding) -from vllm.model_executor.model_loader.weight_utils import default_weight_loader +from vllm.model_executor.model_loader.weight_utils import ( + default_weight_loader, maybe_remap_kv_scale_name) from vllm.model_executor.sampling_metadata import SamplingMetadata from vllm.sequence import IntermediateTensors @@ -345,6 +346,10 @@ def load_weights(self, weights: Iterable[Tuple[str, weight_loader(param, loaded_weight, shard_id) break else: + name = maybe_remap_kv_scale_name(name, params_dict) + if name is None: + continue + if self.config.tie_word_embeddings and "lm_head.weight" in name: continue if is_pp_missing_parameter(name, self): diff --git a/vllm/model_executor/models/transformers.py b/vllm/model_executor/models/transformers.py new file mode 100644 index 0000000000000..b0324f0e871cc --- /dev/null +++ b/vllm/model_executor/models/transformers.py @@ -0,0 +1,225 @@ +# Copyright 2024 The vLLM team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Wrapper around `transformers` models""" +import re +from typing import Dict, Iterable, List, Optional, Set, Tuple, TypedDict, Union + +import torch +from torch import nn +from transformers import AutoModel +from transformers.modeling_utils import ALL_ATTENTION_FUNCTIONS + +from vllm.attention import Attention, AttentionMetadata +from vllm.config import VllmConfig +from vllm.distributed import get_tensor_model_parallel_world_size +from vllm.distributed.utils import divide +from vllm.model_executor.layers.logits_processor import LogitsProcessor +from vllm.model_executor.layers.linear import (ColumnParallelLinear, + RowParallelLinear) +from vllm.model_executor.layers.vocab_parallel_embedding import VocabParallelEmbedding, ParallelLMHead +from vllm.model_executor.layers.sampler import SamplerOutput, get_sampler +from vllm.model_executor.model_loader.weight_utils import default_weight_loader +from vllm.model_executor.sampling_metadata import SamplingMetadata +from vllm.sequence import IntermediateTensors + + +from .utils import maybe_prefix + + +class VllmKwargsForCausalLM(TypedDict, total=False): + """ + Keyword arguments for Flash Attention with Compile. + Attributes: + kv_cache + maxattn_metadata_length + """ + kv_cache: torch.Tensor + attn_metadata: AttentionMetadata + + +def vllm_flash_attention_forward( + _module, + query: torch.Tensor, + key: torch.Tensor, + value: torch.Tensor, + attention_mask: torch.Tensor, + query_length: int=None, + kv_caches: torch.Tensor=None, + attn_metadata: AttentionMetadata=None, + attention_interface=None, + **kwargs + ): + layer_idx = _module.layer_idx + hidden = query.shape[-2] + query, key, value = [x.transpose(1,2) for x in (query, key, value)] + query, key, value = [x.reshape(hidden,-1) for x in (query, key, value)] + return attention_interface(query, key, value, kv_cache=kv_caches[layer_idx],attn_metadata=attn_metadata), None + + +ALL_ATTENTION_FUNCTIONS["vllm"] = vllm_flash_attention_forward + + +# Linear Layer that is compatiable with transformers internal forward +# TODO: This is a temporary solution, we should find a better way to intergrate +class HFColumnParallelLinear(ColumnParallelLinear): + def forward(self, input: torch.Tensor) -> torch.Tensor: + return super().forward(input)[0] + +class HFRowParallelLinear(RowParallelLinear): + def forward(self, input: torch.Tensor) -> torch.Tensor: + return super().forward(input)[0] + + +def replace_tp_linear_class(orig_module: nn.Linear, style: str): + """ + In model configurations, we use a neutral type (string) to specify parallel + styles, here we use it to translate nn.Linear into vllm-style tp Linear. + """ + + if not isinstance(style, str): + raise ValueError(f"Unsupported parallel style type {type(style)}, expected str") + + input_size = orig_module.in_features + output_size = orig_module.out_features + bias = orig_module.bias is not None + + if style == "colwise": + return HFColumnParallelLinear(input_size, output_size, bias) + elif style == "rowwise": + return HFRowParallelLinear(input_size, output_size, bias) + # We don't consider colwise_rep since it's used in lm_head + else: + raise ValueError(f"Unsupported parallel style value: {style}") + + +class TransformersModel(nn.Module): + embedding_padding_modules = ["lm_head"] + + def __init__( + self, + *, + vllm_config: VllmConfig, + prefix: str = "" + ) -> None: + super().__init__() + + config = vllm_config.model_config.hf_config + self.config = config + self.vocab_size = config.vocab_size + self.unpadded_vocab_size = config.vocab_size + + self.tp_size = get_tensor_model_parallel_world_size() + self.attention_interface = Attention( + divide(config.num_attention_heads, self.tp_size), + config.head_dim, + config.head_dim**-0.5, # ish, the sacling is different for every attn layer + num_kv_heads=divide(config.num_key_value_heads, self.tp_size), + cache_config=vllm_config.cache_config, + quant_config=vllm_config.quant_config, + ) + config._attn_implementation_internal="vllm" + + self.tp_plan = self.config.base_model_tp_plan + self.model = AutoModel.from_config(config) + self.tensor_parallelize(self.model) + + # TODO(Isotr0py): Find a better method to parallelize VocabEmbedding + # self.model.embed_tokens = VocabParallelEmbedding( + # self.vocab_size, + # config.hidden_size, + # org_num_embeddings=config.vocab_size, + # quant_config=None, + # ) + self.lm_head = ParallelLMHead(config.vocab_size, + config.hidden_size, + quant_config=None, + prefix=maybe_prefix( + prefix, "lm_head")) + if config.tie_word_embeddings: + self.lm_head.weight = self.model.get_input_embeddings().weight + + logit_scale = getattr(config, "logit_scale", 1.0) + self.logits_processor = LogitsProcessor(self.unpadded_vocab_size, + config.vocab_size, + logit_scale) + self.sampler = get_sampler() + + + def tensor_parallelize(self, module: nn.Module, prefix: str =""): + for child_name, child_module in module.named_children(): + qual_name = prefix + child_name + for pattern, style in self.tp_plan.items(): + if re.match(pattern, qual_name) and isinstance(child_module, nn.Linear): + new_module = replace_tp_linear_class(child_module, style) + print(f"{qual_name}: {child_module} -> {new_module}") + setattr(module, child_name, new_module) + else: + self.tensor_parallelize(child_module, prefix=f"{qual_name}.") + + + def _autoset_attn_implementation(self, config, + use_flash_attention_2: bool = False, + torch_dtype: Optional[torch.dtype] = None, + device_map: Optional[Union[str, Dict[str, int]]] = None, + check_device_map: bool = True, + ): + config._attn_implementation = "vllm" + config._attn_implementation_autoset = True + return config + + def forward( + self, + input_ids: torch.Tensor, + positions: torch.Tensor, + kv_caches: List[torch.Tensor], + attn_metadata: AttentionMetadata, + intermediate_tensors: Optional[IntermediateTensors] = None, + ) -> Union[torch.Tensor, IntermediateTensors]: + model_output = self.model( + input_ids[None,...], use_cache=False, + position_ids=positions[None,...], + kv_caches=kv_caches, attn_metadata=attn_metadata, + intermediate_tensors=intermediate_tensors, + attention_interface = self.attention_interface.forward, + return_dict=False + )[0][0,...] # we remove batch dimension for now + return model_output + + def compute_logits( + self, + hidden_states: torch.Tensor, + sampling_metadata: SamplingMetadata, + ) -> Optional[torch.Tensor]: + logits = self.logits_processor(self.lm_head, hidden_states, + sampling_metadata) + return logits + + def sample(self, logits: torch.Tensor, + sampling_metadata: SamplingMetadata) -> Optional[SamplerOutput]: + + next_tokens = self.sampler(logits, sampling_metadata) + return next_tokens + + def load_weights(self, weights: Iterable[Tuple[str, + torch.Tensor]]) -> Set[str]: + params_dict = dict(self.named_parameters()) + loaded_params: Set[str] = set() + for name, loaded_weight in weights: + param = params_dict[name] + weight_loader = getattr(param, "weight_loader", + default_weight_loader) + weight_loader(param, loaded_weight) + loaded_params.add(name) + return loaded_params diff --git a/vllm/model_executor/models/ultravox.py b/vllm/model_executor/models/ultravox.py index b61deccde45b7..ebaa8a4c4f38a 100644 --- a/vllm/model_executor/models/ultravox.py +++ b/vllm/model_executor/models/ultravox.py @@ -3,41 +3,39 @@ import math from functools import cached_property, lru_cache -from typing import (Iterable, List, Literal, Mapping, Optional, Set, Tuple, - TypedDict, Union, cast) +from typing import (Any, Dict, Iterable, List, Literal, Mapping, Optional, Set, + Tuple, TypedDict, Union) import numpy as np import torch import torch.utils.checkpoint from torch import nn from torch.nn import functional as F +from transformers import BatchFeature from transformers.models.whisper import WhisperFeatureExtractor from transformers.models.whisper.modeling_whisper import WhisperEncoder from vllm.attention import AttentionMetadata from vllm.config import VllmConfig -from vllm.inputs import (INPUT_REGISTRY, DecoderOnlyInputs, DummyData, - InputContext, token_inputs) +from vllm.inputs import InputContext from vllm.model_executor.layers.activation import SiluAndMul, get_act_fn from vllm.model_executor.layers.layernorm import RMSNorm from vllm.model_executor.layers.sampler import SamplerOutput, get_sampler from vllm.model_executor.model_loader.loader import DefaultModelLoader from vllm.model_executor.sampling_metadata import SamplingMetadata -from vllm.multimodal import (MULTIMODAL_REGISTRY, MultiModalKwargs, - NestedTensors) -from vllm.multimodal.utils import (cached_get_tokenizer, - consecutive_placeholder_ranges, - repeat_and_pad_placeholder_tokens) -from vllm.sequence import IntermediateTensors, SequenceData +from vllm.multimodal import MULTIMODAL_REGISTRY, NestedTensors +from vllm.multimodal.processing import (BaseMultiModalProcessor, + MultiModalDataDict, + MultiModalDataItems, ProcessorInputs, + PromptReplacement) +from vllm.sequence import IntermediateTensors from vllm.transformers_utils.configs.ultravox import UltravoxConfig -from vllm.utils import is_list_of from .interfaces import SupportsMultiModal, SupportsPP from .utils import (AutoWeightsLoader, WeightsMapper, flatten_bn, init_vllm_registered_model, maybe_prefix, merge_multimodal_embeddings_from_map) -_AUDIO_PLACEHOLDER_TOKEN = 128002 _AUDIO_TOKENS_PER_SECOND = 6.25 @@ -72,64 +70,18 @@ def get_ultravox_max_audio_tokens(ctx: InputContext): return math.ceil(feature_extractor.chunk_length * _AUDIO_TOKENS_PER_SECOND) -def dummy_seq_data_for_ultravox( - ctx: InputContext, - seq_len: int, - audio_count: int, -): - audio_length = min(get_ultravox_max_audio_tokens(ctx), - seq_len // audio_count) +class UltravoxMultiModalProcessor(BaseMultiModalProcessor): - return SequenceData.from_prompt_token_counts( - (_AUDIO_PLACEHOLDER_TOKEN, audio_length * audio_count), - (0, seq_len - audio_length * audio_count)), { - "audio": - consecutive_placeholder_ranges(num_items=audio_count, - item_size=audio_length) - } - - -def dummy_audio_for_ultravox( - ctx: InputContext, - audio_count: int, -): - feature_extractor = whisper_feature_extractor(ctx) - audio_and_sr = (np.array([0.0] * feature_extractor.chunk_length), 1) - return {"audio": [audio_and_sr] * audio_count} - - -def dummy_data_for_ultravox( - ctx: InputContext, - seq_len: int, - mm_counts: Mapping[str, int], -): - audio_count = mm_counts["audio"] - seq_data, ranges = dummy_seq_data_for_ultravox(ctx, seq_len, audio_count) - mm_dict = dummy_audio_for_ultravox(ctx, audio_count) - - return DummyData(seq_data, mm_dict, ranges) - - -def input_mapper_for_ultravox(ctx: InputContext, data: object): - if not isinstance(data, list): - data = [data] - - if len(data) == 0: - return MultiModalKwargs() - - # If the audio inputs are embeddings, no need for preprocessing - if is_list_of(data, torch.Tensor, check="all"): - return MultiModalKwargs({"audio_embeds": data}) - - audio_features = [] - for audio_input in data: - if not isinstance(audio_input, tuple): - raise NotImplementedError( - f"Unsupported data type: {type(audio_input)}") - - (audio, sr) = cast(Tuple[np.ndarray, Union[float, int]], audio_input) - feature_extractor = whisper_feature_extractor(ctx) + def _get_feature_extractor(self) -> WhisperFeatureExtractor: + return self._get_hf_processor().audio_processor.feature_extractor + def _resample_audio( + self, + audio: np.ndarray, + sr: int, + ) -> Dict[str, Union[np.ndarray, int]]: + # resample audio to the model's sampling rate + feature_extractor = self._get_feature_extractor() if sr != feature_extractor.sampling_rate: try: import librosa @@ -140,78 +92,92 @@ def input_mapper_for_ultravox(ctx: InputContext, data: object): orig_sr=sr, target_sr=feature_extractor.sampling_rate) sr = feature_extractor.sampling_rate + return {"audio": audio, "sampling_rate": sr} - minimum_audio_length = feature_extractor.n_fft // 2 + 1 - if len(audio) < minimum_audio_length: - # Not enough audio; pad it. - audio = np.pad(audio, (0, minimum_audio_length - len(audio))) - - single_audio_features = feature_extractor( - audio, sampling_rate=sr, padding="longest", - return_tensors="pt")["input_features"] - - # Remove the batch dimension because we're wrapping it in a list. - audio_features.append(single_audio_features.squeeze(0)) - - return MultiModalKwargs({"audio_features": audio_features}) - + def _apply_hf_processor( + self, + prompt: str, + mm_data: MultiModalDataDict, + mm_processor_kwargs: Mapping[str, object], + ) -> BatchFeature: + if not mm_data or not mm_data.get("audio", None): + return super()._apply_hf_processor(prompt, mm_data, + mm_processor_kwargs) + + audio_data = mm_data["audio"] + if not isinstance(audio_data, list): + audio_data = [audio_data] + + # Ultravox processor doesn't support multiple inputs, + # therefore we need to input text and audio one by one + tokenizer = self._get_tokenizer() + audio_features, audio_token_len = [], [] + processed_inputs = {} + for audio, sr in audio_data: + data = self._resample_audio(audio, sr) + processed_inputs = super()._apply_hf_processor( + prompt, data, mm_processor_kwargs) + prompt = tokenizer.decode(processed_inputs["input_ids"][0], + skip_special_tokens=False) + audio_features.append( + processed_inputs.pop("audio_values").squeeze(0)) + audio_token_len.append( + processed_inputs.pop("audio_token_len").item()) + + return dict( + **processed_inputs, + audio_features=audio_features, + audio_token_len=audio_token_len, + ) + + def _get_processor_data( + self, + mm_data: MultiModalDataDict, + ) -> Tuple[Dict[str, Any], Dict[str, Any]]: + # Ultravox uses "audio" instead of "audios" as calling keyword + processor_data, passthrough_data = super()._get_processor_data(mm_data) + if "audios" in processor_data: + processor_data["audio"] = processor_data.pop("audios") + return processor_data, passthrough_data + + def _get_prompt_replacements( + self, + mm_items: MultiModalDataItems, + hf_inputs: BatchFeature, + mm_processor_kwargs: Mapping[str, object], + ) -> list[PromptReplacement]: + hf_processor = self._get_hf_processor() + placeholder = hf_processor.audio_token_replacement + + def get_replacement_ultravox(item_idx: int): + audio_token_len = hf_inputs["audio_token_len"][item_idx] + return placeholder * audio_token_len + + return [ + PromptReplacement( + modality="audio", + target="<|audio|>", + replacement=get_replacement_ultravox, + ) + ] -def input_processor_for_ultravox(ctx: InputContext, inputs: DecoderOnlyInputs): - multi_modal_data = inputs.get("multi_modal_data") - if multi_modal_data is None or "audio" not in multi_modal_data: - return inputs + def _get_dummy_mm_inputs( + self, + mm_counts: Mapping[str, int], + ) -> ProcessorInputs: + feature_extractor = self._get_feature_extractor() + sampling_rate = feature_extractor.sampling_rate + audio_len = feature_extractor.chunk_length * sampling_rate - if "multi_modal_placeholders" in inputs and "audio" in inputs[ - "multi_modal_placeholders"]: - # The inputs already have placeholders. - return inputs + audio_count = mm_counts["audio"] + audio = np.zeros(audio_len) + data = {"audio": [(audio, sampling_rate)] * audio_count} - feature_extractor = whisper_feature_extractor(ctx) - audios = multi_modal_data["audio"] - if not isinstance(audios, list): - audios = [audios] - - audio_token_counts = [] - for audio in audios: - if isinstance(audio, torch.Tensor): - audio_num_tokens = audio.shape[1] - audio_token_counts.append(audio_num_tokens) - else: - audio_data, sample_rate = audio - audio_length = audio_data.shape[0] - if sample_rate != feature_extractor.sampling_rate: - # Account for resampling. - adjustment = feature_extractor.sampling_rate / sample_rate - audio_length = math.ceil(adjustment * audio_length) - - feature_extractor_output_length = math.ceil( - (audio_length - (feature_extractor.hop_length - 1)) / - feature_extractor.hop_length) - - uv_config = ctx.get_hf_config(UltravoxConfig) - audio_num_tokens = min( - max( - 1, - math.ceil(feature_extractor_output_length / - (uv_config.stack_factor * 2))), - get_ultravox_max_audio_tokens(ctx)) - audio_token_counts.append(audio_num_tokens) - - tokenizer = cached_get_tokenizer(ctx.model_config.tokenizer) - - new_prompt, new_token_ids, ranges = repeat_and_pad_placeholder_tokens( - tokenizer, - inputs.get("prompt"), - inputs["prompt_token_ids"], - placeholder_token_id=_AUDIO_PLACEHOLDER_TOKEN, - repeat_count=audio_token_counts, - ) - - # NOTE: Create a defensive copy of the original inputs - return token_inputs(prompt_token_ids=new_token_ids, - prompt=new_prompt, - multi_modal_data=multi_modal_data, - multi_modal_placeholders={"audio": ranges}) + return ProcessorInputs( + prompt_text="<|audio|>" * audio_count, + mm_data=data, + mm_processor_kwargs={}, + ) class StackAudioFrames(nn.Module): @@ -332,11 +298,9 @@ def forward( return hidden_states -@MULTIMODAL_REGISTRY.register_input_mapper("audio", input_mapper_for_ultravox) @MULTIMODAL_REGISTRY.register_max_multimodal_tokens( "audio", get_ultravox_max_audio_tokens) -@INPUT_REGISTRY.register_dummy_data(dummy_data_for_ultravox) -@INPUT_REGISTRY.register_input_processor(input_processor_for_ultravox) +@MULTIMODAL_REGISTRY.register_processor(UltravoxMultiModalProcessor) class UltravoxModel(nn.Module, SupportsMultiModal, SupportsPP): def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): @@ -360,9 +324,10 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): )) self.multi_modal_projector = UltravoxProjector(config) self.language_model = init_vllm_registered_model( - config.text_config, vllm_config=vllm_config, - prefix=maybe_prefix(prefix, "language_model")) + hf_config=config.text_config, + prefix=maybe_prefix(prefix, "language_model"), + ) if config.text_model_id is not None: # this prefix is not for initialization, but for loading weights # note the trailing dot diff --git a/vllm/model_executor/models/utils.py b/vllm/model_executor/models/utils.py index 4c13cbc953273..269b66806adf4 100644 --- a/vllm/model_executor/models/utils.py +++ b/vllm/model_executor/models/utils.py @@ -1,7 +1,7 @@ import itertools from dataclasses import dataclass, field -from typing import (Any, Callable, Dict, Iterable, List, Literal, Mapping, - Optional, Protocol, Set, Tuple, Union, overload) +from typing import (Callable, Dict, Iterable, List, Literal, Mapping, Optional, + Protocol, Set, Tuple, Union, overload) import torch import torch.nn as nn @@ -17,7 +17,7 @@ from vllm.multimodal import MultiModalPlaceholderMap, NestedTensors from vllm.platforms import _Backend, current_platform from vllm.sequence import IntermediateTensors -from vllm.utils import is_pin_memory_available +from vllm.utils import is_pin_memory_available, print_warning_once logger = init_logger(__name__) @@ -173,8 +173,15 @@ def _load_module( module_load_weights = getattr(module, "load_weights", None) if callable(module_load_weights): loaded_params = module_load_weights(weights) - yield from map(lambda x: self._get_qualname(base_prefix, x), - loaded_params) + if loaded_params is None: + logger.warning( + "Unable to collect loaded parameters " + "for module %s", module) + else: + yield from map( + lambda x: self._get_qualname(base_prefix, x), + loaded_params, + ) child_modules = dict(module.named_children()) child_params = dict(module.named_parameters(recurse=False)) @@ -232,17 +239,27 @@ def load_weights( def init_vllm_registered_model( - hf_config: PretrainedConfig, vllm_config: VllmConfig, + *, prefix: str = "", + hf_config: Optional[PretrainedConfig] = None, + architectures: Optional[list[str]] = None, ) -> nn.Module: """ Helper function to initialize an inner model registered to vLLM, based on the arguments passed to the outer vLLM model. """ from vllm.model_executor.model_loader.loader import _initialize_model - vllm_config = vllm_config.with_hf_config(hf_config) - return _initialize_model(vllm_config, prefix) + + if hf_config is None and architectures is not None: + # So that the architectures field is overridden + hf_config = vllm_config.model_config.hf_config + + if hf_config is not None: + vllm_config = vllm_config.with_hf_config(hf_config, + architectures=architectures) + + return _initialize_model(vllm_config=vllm_config, prefix=prefix) @overload @@ -392,16 +409,42 @@ def merge_multimodal_embeddings( input_ids: torch.Tensor, inputs_embeds: torch.Tensor, multimodal_embeddings: NestedTensors, - placeholder_token_id: int, + placeholder_token_id: Union[int, List[int]], ) -> torch.Tensor: """ Merge ``multimodal_embeddings`` into ``inputs_embeds`` by overwriting the positions in ``inputs_embeds`` corresponding to placeholder tokens in ``input_ids``. + + ``placeholder_token_id`` can be a list of token ids (e.g, token ids + of img_start, img_break, and img_end tokens) when needed: This means + the order of these tokens in the ``input_ids`` MUST MATCH the order of + their embeddings in ``multimodal_embeddings`` since we need to + slice-merge instead of individually scattering. + + For example, if input_ids is "TTTTTSIIIBIIIBIIIETTT", where + - T is text token + - S is image start token + - I is image embedding token + - B is image break token + - E is image end token. + + Then the image embeddings (that correspond to I's) from vision encoder + must be padded with embeddings of S, B, and E in the same order of + input_ids for a correct embedding merge. Note: This updates ``inputs_embeds`` in place. """ + if isinstance(placeholder_token_id, list): + placeholder_token_id = torch.tensor(placeholder_token_id, + device=input_ids.device) + return _merge_multimodal_embeddings( + inputs_embeds, + torch.isin(input_ids, placeholder_token_id), + multimodal_embeddings, + ) + return _merge_multimodal_embeddings( inputs_embeds, (input_ids == placeholder_token_id), @@ -560,30 +603,6 @@ def make_empty_intermediate_tensors( return make_empty_intermediate_tensors -class LLMWrapper(nn.Module): - """ - To align with the key names of LoRA trained with PEFT, we need to add an - additional layer to the llm's implementation. - """ - - def __init__(self, llm: nn.Module, name: str) -> None: - super().__init__() - self.model_name = name - setattr(self, name, llm) - - def __getattr__(self, key: str): - llm = super().__getattr__(self.model_name) - if key == self.model_name: - return llm - - return getattr(llm, key) - - # We need to explicitly override this - def __call__(self, *args: Any, **kwargs: Any) -> Any: - llm = super().__getattr__(self.model_name) - return llm(*args, **kwargs) - - def get_vit_attn_backend(support_fa: bool = False) -> _Backend: """ Get the available attention backend for Vision Transformer. @@ -602,7 +621,7 @@ def get_vit_attn_backend(support_fa: bool = False) -> _Backend: if is_flash_attn_2_available(): selected_backend = _Backend.FLASH_ATTN else: - logger.warning( + print_warning_once( "Current `vllm-flash-attn` has a bug inside vision module, " "so we use xformers backend instead. You can run " "`pip install flash-attn` to use flash-attention backend.") diff --git a/vllm/model_executor/models/xverse.py b/vllm/model_executor/models/xverse.py deleted file mode 100644 index 25a0d474e2863..0000000000000 --- a/vllm/model_executor/models/xverse.py +++ /dev/null @@ -1,423 +0,0 @@ -# Adapted from -# https://huggingface.co/xverse/XVERSE-7B/blob/main/modeling_xverse.py -# Copyright 2022 EleutherAI and the HuggingFace Inc. team. All rights reserved. -# -# This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX -# and OPT implementations in this library. It has been modified from its -# original forms to accommodate minor architectural differences compared -# to GPT-NeoX and OPT used by the Meta AI team that trained the model. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""Inference-only Xverse model compatible with HuggingFace weights.""" -from typing import Any, Dict, Iterable, List, Optional, Set, Tuple, Union - -import torch -from torch import nn -from transformers import PretrainedConfig - -from vllm.attention import Attention, AttentionMetadata -from vllm.compilation.decorators import support_torch_compile -from vllm.config import CacheConfig, VllmConfig -from vllm.distributed import get_pp_group, get_tensor_model_parallel_world_size -from vllm.model_executor.layers.activation import SiluAndMul -from vllm.model_executor.layers.layernorm import RMSNorm -from vllm.model_executor.layers.linear import (MergedColumnParallelLinear, - QKVParallelLinear, - RowParallelLinear) -from vllm.model_executor.layers.logits_processor import LogitsProcessor -from vllm.model_executor.layers.quantization import QuantizationConfig -from vllm.model_executor.layers.rotary_embedding import get_rope -from vllm.model_executor.layers.sampler import SamplerOutput, get_sampler -from vllm.model_executor.layers.vocab_parallel_embedding import ( - ParallelLMHead, VocabParallelEmbedding) -from vllm.model_executor.model_loader.weight_utils import default_weight_loader -from vllm.model_executor.sampling_metadata import SamplingMetadata -from vllm.sequence import IntermediateTensors - -from .interfaces import SupportsLoRA, SupportsPP -from .utils import (is_pp_missing_parameter, - make_empty_intermediate_tensors_factory, make_layers, - maybe_prefix) - - -class XverseMLP(nn.Module): - - def __init__( - self, - hidden_size: int, - intermediate_size: int, - hidden_act: str, - quant_config: Optional[QuantizationConfig] = None, - ) -> None: - super().__init__() - self.gate_up_proj = MergedColumnParallelLinear( - hidden_size, [intermediate_size] * 2, - bias=False, - quant_config=quant_config) - self.down_proj = RowParallelLinear(intermediate_size, - hidden_size, - bias=False, - quant_config=quant_config) - if hidden_act != "silu": - raise ValueError(f"Unsupported activation: {hidden_act}. " - "Only silu is supported for now.") - self.act_fn = SiluAndMul() - - def forward(self, x): - gate, _ = self.gate_up_proj(x) - x = self.act_fn(gate) - x, _ = self.down_proj(x) - return x - - -class XverseAttention(nn.Module): - - def __init__( - self, - hidden_size: int, - num_heads: int, - num_kv_heads: int, - rope_theta: float = 10000, - rope_scaling: Optional[Dict[str, Any]] = None, - max_position_embeddings: int = 8192, - quant_config: Optional[QuantizationConfig] = None, - bias: bool = False, - cache_config: Optional[CacheConfig] = None, - prefix: str = "", - ) -> None: - super().__init__() - self.hidden_size = hidden_size - tp_size = get_tensor_model_parallel_world_size() - self.total_num_heads = num_heads - assert self.total_num_heads % tp_size == 0 - self.num_heads = self.total_num_heads // tp_size - self.total_num_kv_heads = num_kv_heads - # partition the KV heads across multiple tensor parallel GPUs. - assert self.total_num_kv_heads % tp_size == 0 - self.num_kv_heads = max(1, self.total_num_kv_heads // tp_size) - self.head_dim = hidden_size // self.total_num_heads - self.q_size = self.num_heads * self.head_dim - self.kv_size = self.num_kv_heads * self.head_dim - self.scaling = self.head_dim**-0.5 - self.rope_theta = rope_theta - self.max_position_embeddings = max_position_embeddings - - self.qkv_proj = QKVParallelLinear( - hidden_size, - self.head_dim, - self.total_num_heads, - self.total_num_kv_heads, - bias=bias, - quant_config=quant_config, - ) - self.o_proj = RowParallelLinear( - self.total_num_heads * self.head_dim, - hidden_size, - bias=bias, - quant_config=quant_config, - ) - - self.rotary_emb = get_rope( - self.head_dim, - rotary_dim=self.head_dim, - max_position=max_position_embeddings, - base=rope_theta, - rope_scaling=rope_scaling, - ) - self.attn = Attention(self.num_heads, - self.head_dim, - self.scaling, - num_kv_heads=self.num_kv_heads, - cache_config=cache_config, - quant_config=quant_config, - prefix=f"{prefix}.attn") - - def forward( - self, - positions: torch.Tensor, - hidden_states: torch.Tensor, - kv_cache: torch.Tensor, - attn_metadata: AttentionMetadata, - ) -> torch.Tensor: - qkv, _ = self.qkv_proj(hidden_states) - q, k, v = qkv.split([self.q_size, self.kv_size, self.kv_size], dim=-1) - q, k = self.rotary_emb(positions, q, k) - attn_output = self.attn(q, k, v, kv_cache, attn_metadata) - output, _ = self.o_proj(attn_output) - return output - - -class XverseDecoderLayer(nn.Module): - - def __init__( - self, - config: PretrainedConfig, - cache_config: Optional[CacheConfig] = None, - quant_config: Optional[QuantizationConfig] = None, - prefix: str = "", - ) -> None: - super().__init__() - self.hidden_size = config.hidden_size - rope_theta = getattr(config, "rope_theta", 10000) - rope_scaling = getattr(config, "rope_scaling", None) - max_position_embeddings = getattr(config, "max_position_embeddings", - 8192) - self.self_attn = XverseAttention( - hidden_size=self.hidden_size, - num_heads=config.num_attention_heads, - num_kv_heads=getattr(config, "num_key_value_heads", - config.num_attention_heads), - rope_theta=rope_theta, - rope_scaling=rope_scaling, - max_position_embeddings=max_position_embeddings, - quant_config=quant_config, - bias=getattr(config, "bias", False), - cache_config=cache_config, - prefix=f"{prefix}.self_attn", - ) - self.mlp = XverseMLP( - hidden_size=self.hidden_size, - intermediate_size=config.intermediate_size, - hidden_act=config.hidden_act, - quant_config=quant_config, - ) - self.input_layernorm = RMSNorm(config.hidden_size, - eps=config.rms_norm_eps) - self.post_attention_layernorm = RMSNorm(config.hidden_size, - eps=config.rms_norm_eps) - - def forward( - self, - positions: torch.Tensor, - hidden_states: torch.Tensor, - kv_cache: torch.Tensor, - attn_metadata: AttentionMetadata, - residual: Optional[torch.Tensor], - ) -> Tuple[torch.Tensor, torch.Tensor]: - # Self Attention - if residual is None: - residual = hidden_states - hidden_states = self.input_layernorm(hidden_states) - else: - hidden_states, residual = self.input_layernorm( - hidden_states, residual) - hidden_states = self.self_attn( - positions=positions, - hidden_states=hidden_states, - kv_cache=kv_cache, - attn_metadata=attn_metadata, - ) - - # Fully Connected - hidden_states, residual = self.post_attention_layernorm( - hidden_states, residual) - hidden_states = self.mlp(hidden_states) - return hidden_states, residual - - -@support_torch_compile -class XverseModel(nn.Module): - - def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): - super().__init__() - config = vllm_config.model_config.hf_config - cache_config = vllm_config.cache_config - quant_config = vllm_config.quant_config - lora_config = vllm_config.lora_config - self.config = config - self.padding_idx = config.pad_token_id - lora_vocab = (lora_config.lora_extra_vocab_size * - (lora_config.max_loras or 1)) if lora_config else 0 - self.vocab_size = config.vocab_size + lora_vocab - self.org_vocab_size = config.vocab_size - self.embed_tokens = VocabParallelEmbedding( - self.vocab_size, - config.hidden_size, - org_num_embeddings=config.vocab_size, - ) - self.start_layer, self.end_layer, self.layers = make_layers( - config.num_hidden_layers, - lambda prefix: XverseDecoderLayer( - config, cache_config, quant_config, prefix=prefix), - prefix=f"{prefix}.layers", - ) - self.norm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps) - self.make_empty_intermediate_tensors = ( - make_empty_intermediate_tensors_factory( - ["hidden_states", "residual"], config.hidden_size)) - - def get_input_embeddings(self, input_ids: torch.Tensor) -> torch.Tensor: - return self.embed_tokens(input_ids) - - def forward( - self, - input_ids: torch.Tensor, - positions: torch.Tensor, - kv_caches: List[torch.Tensor], - attn_metadata: AttentionMetadata, - intermediate_tensors: Optional[IntermediateTensors], - inputs_embeds: Optional[torch.Tensor] = None, - ) -> Union[torch.Tensor, IntermediateTensors]: - if get_pp_group().is_first_rank: - if inputs_embeds is not None: - hidden_states = inputs_embeds - else: - hidden_states = self.get_input_embeddings(input_ids) - residual = None - else: - hidden_states = intermediate_tensors["hidden_states"] - residual = intermediate_tensors["residual"] - for i in range(self.start_layer, self.end_layer): - layer = self.layers[i] - hidden_states, residual = layer( - positions, - hidden_states, - kv_caches[i - self.start_layer], - attn_metadata, - residual, - ) - if not get_pp_group().is_last_rank: - return IntermediateTensors({ - "hidden_states": hidden_states, - "residual": residual - }) - hidden_states, _ = self.norm(hidden_states, residual) - return hidden_states - - -class XverseForCausalLM(nn.Module, SupportsLoRA, SupportsPP): - packed_modules_mapping = { - "qkv_proj": [ - "q_proj", - "k_proj", - "v_proj", - ], - "gate_up_proj": [ - "gate_proj", - "up_proj", - ], - } - - # LoRA specific attributes - supported_lora_modules = [ - "qkv_proj", - "o_proj", - "gate_up_proj", - "down_proj", - "embed_tokens", - "lm_head", - ] - embedding_modules = { - "embed_tokens": "input_embeddings", - "lm_head": "output_embeddings", - } - embedding_padding_modules = ["lm_head"] - - def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): - super().__init__() - - config = vllm_config.model_config.hf_config - quant_config = vllm_config.quant_config - lora_config = vllm_config.lora_config - - self.config = config - self.lora_config = lora_config - - self.quant_config = quant_config - self.model = XverseModel(vllm_config=vllm_config, - prefix=maybe_prefix(prefix, "model")) - self.lm_head = ParallelLMHead(config.vocab_size, - config.hidden_size, - quant_config=quant_config) - if self.config.tie_word_embeddings: - self.lm_head.weight = self.model.embed_tokens.weight - self.logits_processor = LogitsProcessor(config.vocab_size) - self.sampler = get_sampler() - self.make_empty_intermediate_tensors = ( - self.model.make_empty_intermediate_tensors) - - def get_input_embeddings(self, input_ids: torch.Tensor) -> torch.Tensor: - return self.model.get_input_embeddings(input_ids) - - def forward( - self, - input_ids: torch.Tensor, - positions: torch.Tensor, - kv_caches: List[torch.Tensor], - attn_metadata: AttentionMetadata, - intermediate_tensors: Optional[IntermediateTensors] = None, - inputs_embeds: Optional[torch.Tensor] = None, - ) -> Union[torch.Tensor, IntermediateTensors]: - hidden_states = self.model(input_ids, positions, kv_caches, - attn_metadata, intermediate_tensors, - inputs_embeds) - return hidden_states - - def compute_logits( - self, - hidden_states: torch.Tensor, - sampling_metadata: SamplingMetadata, - ) -> Optional[torch.Tensor]: - logits = self.logits_processor(self.lm_head, hidden_states, - sampling_metadata) - return logits - - def sample( - self, - logits: torch.Tensor, - sampling_metadata: SamplingMetadata, - ) -> Optional[SamplerOutput]: - next_tokens = self.sampler(logits, sampling_metadata) - return next_tokens - - def load_weights(self, weights: Iterable[Tuple[str, - torch.Tensor]]) -> Set[str]: - stacked_params_mapping = [ - ("qkv_proj", "q_proj", "q"), - ("qkv_proj", "k_proj", "k"), - ("qkv_proj", "v_proj", "v"), - ("gate_up_proj", "gate_proj", 0), - ("gate_up_proj", "up_proj", 1), - ] - params_dict = dict(self.named_parameters()) - loaded_params: Set[str] = set() - for name, loaded_weight in weights: - if ("rotary_emb.inv_freq" in name - or "rotary_emb.cos_cached" in name - or "rotary_emb.sin_cached" in name): - continue - for (param_name, weight_name, shard_id) in stacked_params_mapping: - if weight_name not in name: - continue - name = name.replace(weight_name, param_name) - # Skip loading extra bias for GPTQ models. - if name.endswith(".bias") and name not in params_dict: - continue - if is_pp_missing_parameter(name, self): - continue - param = params_dict[name] - weight_loader = param.weight_loader - weight_loader(param, loaded_weight, shard_id) - break - else: - # Skip loading extra bias for GPTQ models. - if name.endswith(".bias") and name not in params_dict: - continue - if is_pp_missing_parameter(name, self): - continue - param = params_dict[name] - weight_loader = getattr(param, "weight_loader", - default_weight_loader) - weight_loader(param, loaded_weight) - loaded_params.add(name) - return loaded_params diff --git a/vllm/model_executor/sampling_metadata.py b/vllm/model_executor/sampling_metadata.py index 84f35f75a0c32..1df8f84ed4093 100644 --- a/vllm/model_executor/sampling_metadata.py +++ b/vllm/model_executor/sampling_metadata.py @@ -454,6 +454,7 @@ def from_sampling_metadata( if do_penalties: for seq_group in sampling_metadata.seq_groups: seq_ids = seq_group.seq_ids + sampling_params = seq_group.sampling_params if (seq_group.is_prompt and sampling_params.prompt_logprobs is not None): prefill_len = len(seq_group.prompt_logprob_indices) diff --git a/vllm/multimodal/__init__.py b/vllm/multimodal/__init__.py index 03a5f3a91f7a1..928c31a2f2843 100644 --- a/vllm/multimodal/__init__.py +++ b/vllm/multimodal/__init__.py @@ -27,18 +27,3 @@ "MULTIMODAL_REGISTRY", "MultiModalRegistry", ] - - -def __getattr__(name: str): - import warnings - - if name == "MultiModalInputs": - msg = ("MultiModalInputs has been renamed to MultiModalKwargs. " - "The original name will take another meaning in an upcoming " - "version.") - - warnings.warn(DeprecationWarning(msg), stacklevel=2) - - return MultiModalKwargs - - raise AttributeError(f"module {__name__!r} has no attribute {name!r}") diff --git a/vllm/multimodal/base.py b/vllm/multimodal/base.py index 6eec660e42ac4..fe77a4635f7d8 100644 --- a/vllm/multimodal/base.py +++ b/vllm/multimodal/base.py @@ -7,7 +7,7 @@ from vllm.inputs import InputContext from vllm.logger import init_logger -from vllm.utils import (get_allowed_kwarg_only_overrides, +from vllm.utils import (ClassRegistry, get_allowed_kwarg_only_overrides, resolve_mm_processor_kwargs) if TYPE_CHECKING: @@ -54,8 +54,8 @@ class MultiModalPlugin(ABC): """ def __init__(self) -> None: - self._input_mappers: Dict[Type[nn.Module], MultiModalInputMapper] = {} - self._max_mm_tokens: Dict[Type[nn.Module], MultiModalTokensCalc] = {} + self._input_mappers = ClassRegistry[nn.Module, MultiModalInputMapper]() + self._max_mm_tokens = ClassRegistry[nn.Module, MultiModalTokensCalc]() @abstractmethod def get_data_key(self) -> str: @@ -99,7 +99,7 @@ def register_input_mapper( """ def wrapper(model_cls: N) -> N: - if model_cls in self._input_mappers: + if self._input_mappers.contains(model_cls, strict=True): logger.warning( "Model class %s already has an input mapper " "registered to %s. It is overwritten by the new one.", @@ -194,7 +194,7 @@ def register_max_multimodal_tokens( """ def wrapper(model_cls: N) -> N: - if model_cls in self._max_mm_tokens: + if self._max_mm_tokens.contains(model_cls, strict=True): logger.warning( "Model class %s already calculates maximum number of " "tokens in %s. It is overwritten by the new one.", @@ -226,16 +226,16 @@ def get_max_multimodal_tokens(self, model_config: "ModelConfig") -> int: """ # Avoid circular import from vllm.model_executor.model_loader import get_model_architecture + from vllm.model_executor.models import supports_multimodal model_cls, _ = get_model_architecture(model_config) - if model_cls not in self._input_mappers: + if not supports_multimodal(model_cls): return 0 max_mm_tokens = self._max_mm_tokens.get(model_cls) if max_mm_tokens is None: - raise KeyError(f"No maximum number of multi-modal tokens is given " - f"for model class {model_cls.__name__} in {self}.") + return 0 if callable(max_mm_tokens): mm_processor_kwargs = get_allowed_kwarg_only_overrides( @@ -326,26 +326,47 @@ def from_seq_group( src_ranges = [] dest_ranges = [] """ - if (not seq_group.multi_modal_data - or not seq_group.multi_modal_placeholders): - return seq_group.multi_modal_data, {} + seq_mm_data = seq_group.multi_modal_data + seq_mm_placeholders = seq_group.multi_modal_placeholders + + if not seq_mm_data or not seq_mm_placeholders: + return seq_mm_data, {} + + # For merged processor, we directly use mm_kwargs as mm_data + if isinstance(seq_mm_data, MultiModalKwargs): + placeholder_maps = dict[str, MultiModalPlaceholderMap]() + + for modality, placeholders in seq_mm_placeholders.items(): + placeholder_map = MultiModalPlaceholderMap() + + if positions: + placeholder_map.append_items_from_seq_group( + positions, + # Dummy, since we don't care about intersecting items + [None] * len(placeholders), + placeholders, + ) + + placeholder_maps[modality] = placeholder_map + + return seq_mm_data, placeholder_maps - mm_data = {**seq_group.multi_modal_data} - placeholder_maps: Dict[str, MultiModalPlaceholderMap] = defaultdict( + mm_data = {**seq_mm_data} + placeholder_maps = defaultdict[str, MultiModalPlaceholderMap]( MultiModalPlaceholderMap) - for ( - modality, - placeholders, - ) in seq_group.multi_modal_placeholders.items(): + for modality, placeholders in seq_mm_placeholders.items(): mm_items = mm_data.pop(modality) if not isinstance(mm_items, list): mm_items = [mm_items] if positions: - intersecting_items = placeholder_maps[ - modality].append_items_from_seq_group( - positions, mm_items, placeholders) + intersecting_items = placeholder_maps[modality] \ + .append_items_from_seq_group( + positions, + mm_items, + placeholders, + ) if intersecting_items: mm_data[modality] = intersecting_items @@ -433,18 +454,3 @@ def index_map(self) -> "IndexMap": return MultiModalPlaceholderMap.IndexMap(src=src_indices, dest=dest_indices) - - -def __getattr__(name: str): - import warnings - - if name == "MultiModalInputs": - msg = ("MultiModalInputs has been renamed to MultiModalKwargs. " - "The original name will take another meaning in an upcoming " - "version.") - - warnings.warn(DeprecationWarning(msg), stacklevel=2) - - return MultiModalKwargs - - raise AttributeError(f"module {__name__!r} has no attribute {name!r}") diff --git a/vllm/multimodal/inputs.py b/vllm/multimodal/inputs.py index 640c7c04b8817..c00943a5f26d9 100644 --- a/vllm/multimodal/inputs.py +++ b/vllm/multimodal/inputs.py @@ -96,7 +96,8 @@ class PlaceholderRange(TypedDict): """The length of the placeholder.""" -NestedTensors = Union[List["NestedTensors"], List[torch.Tensor], torch.Tensor] +NestedTensors = Union[List["NestedTensors"], List[torch.Tensor], torch.Tensor, + Tuple[torch.Tensor, ...]] """ Uses a list instead of a tensor if the dimensions of each element do not match. """ @@ -214,6 +215,9 @@ class MultiModalInputsV2(TypedDict): mm_kwargs: MultiModalKwargs """Keyword arguments to be directly passed to the model after batching.""" + mm_hashes: NotRequired[List[str]] + """The hashes of the multi-modal data.""" + mm_placeholders: MultiModalPlaceholderDict """ For each modality, information about the placeholder tokens in diff --git a/vllm/multimodal/processing.py b/vllm/multimodal/processing.py index 28c8dda581982..339e193eefe20 100644 --- a/vllm/multimodal/processing.py +++ b/vllm/multimodal/processing.py @@ -1,16 +1,19 @@ import re from abc import ABC, abstractmethod +from collections import UserDict from collections.abc import Callable, ItemsView, Iterable, Mapping, Sequence -from dataclasses import dataclass +from dataclasses import dataclass, field from functools import lru_cache -from itertools import groupby -from typing import Any, Generic, NamedTuple, Optional, Protocol, TypeVar, Union +from typing import Any, NamedTuple, Optional, Protocol, TypeVar, Union import numpy as np -from transformers import BatchFeature -from typing_extensions import TypeAlias, TypedDict +import torch +from PIL.Image import Image +from transformers import BatchFeature, ProcessorMixin +from typing_extensions import assert_never -from vllm.inputs import InputProcessingContext +from vllm.inputs import DummyData, InputProcessingContext +from vllm.logger import init_logger from vllm.transformers_utils.tokenizer import AnyTokenizer, MistralTokenizer from vllm.utils import flatten_2d_lists, full_groupby, is_list_of @@ -18,98 +21,39 @@ MultiModalInputsV2, MultiModalKwargs, PlaceholderRange, VideoItem) +logger = init_logger(__name__) -def bind_prompt_sequence( - seq: Union[str, list[int]], - tokenizer: AnyTokenizer, -) -> "_BoundPromptSequence": - """ - Bind a text or token sequence to a tokenizer so that it can be - lazily converted into the other format on demand. - """ - return _BoundPromptSequence( - tokenizer=tokenizer, - _text=seq if isinstance(seq, str) else None, - _token_ids=seq if isinstance(seq, list) else None, - ) - - -_T = TypeVar("_T") _S = TypeVar("_S", str, list[int]) +_PromptSeq = Union[str, list[int]] @dataclass -class PromptReplacement(Generic[_S, _T]): - target: _S - """The text or token sequence to find and replace.""" +class PromptReplacement: + modality: str + """The modality for which the replacement is made""" - repl_unit: _S - """ - The unit making up the replacement text or token sequence. - - See :code:`repl_count` for more details. - """ + target: _PromptSeq + """The text or token sequence to find and replace.""" - repl_count: Union[Callable[[list[_T], BatchFeature, int], int], int] + replacement: Union[Callable[[int], _PromptSeq], + _PromptSeq] = field(repr=False) """ - Given the original multi-modal items for this modality, HF-processed data, - and index of the processed item, output the number of repetitions of - :code:`repl_unit` to build up the replacement text or token sequence. + Given the index of the processed item within :attr:`modality`, output the + replacement text or token sequence. - For convenience, you can pass in an integer if the number of repetitions is - a constant. + For convenience, you can pass in the replacement instead of a function + if it does not depend on the input. """ - def __repr__(self) -> str: - return (f"{type(self).__name__}(target={self.target!r}, " - f"repl_unit={self.repl_unit!r})") - - def bind( - self, - modality: str, - tokenizer: AnyTokenizer, - ) -> "_BoundPromptReplacement[_T]": + def bind(self, tokenizer: AnyTokenizer) -> "_BoundPromptReplacement": return _BoundPromptReplacement( - modality=modality, - target=bind_prompt_sequence(self.target, tokenizer), - repl_unit=bind_prompt_sequence(self.repl_unit, tokenizer), - repl_count=self.repl_count, + tokenizer=tokenizer, + modality=self.modality, + _target=self.target, + _replacement=self.replacement, ) -@dataclass -class ModalityProcessingMetadata(Generic[_T]): - prompt_repls: Sequence[Union[PromptReplacement[str, _T], - PromptReplacement[list[int], _T]]] - """ - Defines each text or token sequence to replace in the HF-processed prompt. - - This is skipped if the HF-processed prompt is found to already contain - the replacement prompts. - """ - - -class MultiModalProcessingMetadataBuiltins(TypedDict, total=False): - """Type annotations for modality types predefined by vLLM.""" - - image: ModalityProcessingMetadata[ImageItem] - video: ModalityProcessingMetadata[VideoItem] - audio: ModalityProcessingMetadata[AudioItem] - - -MultiModalProcessingMetadata: TypeAlias = \ - Mapping[str, ModalityProcessingMetadata[Any]] -""" -A dictionary containing an entry for each modality type to process. - -Note: - This dictionary also accepts modality keys defined outside - :class:`MultiModalProcessingMetadataBuiltins` as long as a customized plugin - is registered through the :class:`~vllm.multimodal.MULTIMODAL_REGISTRY`. - Read more on that :ref:`here `. -""" - - def _encode( tokenizer: AnyTokenizer, text: str, @@ -184,7 +128,8 @@ def full_groupby_modality(values: Iterable[_M]) -> ItemsView[str, list[_M]]: @dataclass class _BoundPromptSequence: - tokenizer: AnyTokenizer + tokenizer: AnyTokenizer = field(repr=False) + _text: Optional[str] _token_ids: Optional[list[int]] @@ -209,108 +154,105 @@ def token_ids(self) -> list[int]: return self._token_ids - def __repr__(self) -> str: - return (f"{type(self).__name__}(_text={self._text!r}, " - f"_token_ids={self._token_ids!r})") - @dataclass -class _BoundPromptReplacement(Generic[_T]): +class _BoundPromptReplacement: + tokenizer: AnyTokenizer = field(repr=False) modality: str - target: _BoundPromptSequence - repl_unit: _BoundPromptSequence - repl_count: Union[Callable[[list[_T], BatchFeature, int], int], int] - def get_count( - self, - mm_items: list[_T], - hf_inputs: BatchFeature, - item_idx: int, - ) -> int: - repl_count = self.repl_count - if isinstance(repl_count, int): - return repl_count + _target: _PromptSeq + _replacement: Union[Callable[[int], _PromptSeq], + _PromptSeq] = field(repr=False) - return repl_count(mm_items, hf_inputs, item_idx) + def __post_init__(self) -> None: + self._replacement_cache = dict[int, _BoundPromptSequence]() + @property + def target(self) -> _BoundPromptSequence: + target = self._target -def to_multi_format(data: MultiModalDataDict) -> dict[str, list[Any]]: - """ - Convert a :class:`MultiModalDataDict` containing single data items - to a :class:`MultiModalMultiDataDict` containing multiple data items - per entry. - """ - multi_data = dict[str, list[Any]]() + return _BoundPromptSequence( + tokenizer=self.tokenizer, + _text=target if isinstance(target, str) else None, + _token_ids=target if isinstance(target, list) else None, + ) - for k, v in data.items(): - # yapf: disable - if k == "video": - # Special case since even a single item can be a list - multi_data[k] = v if is_list_of(v, list) else [v] # type: ignore[index] - elif k in ("image", "audio"): - multi_data[k] = v if isinstance(v, list) else [v] # type: ignore[index] + def get_replacement(self, item_idx: int) -> _BoundPromptSequence: + replacement = self._replacement + if callable(replacement): + cache_key = item_idx + if cache_key in self._replacement_cache: + return self._replacement_cache[cache_key] + + replacement = replacement(item_idx) else: - multi_data[k] = v if isinstance(v, list) else [v] # type: ignore[index] - # yapf: enable + cache_key = None - return multi_data + bound_replacement = _BoundPromptSequence( + tokenizer=self.tokenizer, + _text=replacement if isinstance(replacement, str) else None, + _token_ids=replacement if isinstance(replacement, list) else None, + ) + if cache_key is not None: + self._replacement_cache[cache_key] = bound_replacement -class _TokenRun(NamedTuple): - token_id: int + return bound_replacement - start_idx: int - length: int + +class ImageSize(NamedTuple): + width: int + height: int -def iter_token_runs(token_ids: list[int]) -> Iterable[_TokenRun]: +class MultiModalDataItems(UserDict[str, list[Any]]): """ - Yield the starting index and length of each run of tokens that are the same. + As :class:`MultiModalDataDict`, but normalized such that each entry + corresponds to a list. """ - start_idx = 0 - for token_id, it in groupby(token_ids): - length = sum(1 for _ in it) - yield _TokenRun(token_id=token_id, start_idx=start_idx, length=length) + @property + def image(self) -> list[ImageItem]: + return self["image"] - start_idx += length + @property + def video(self) -> list[VideoItem]: + return self["video"] + @property + def audio(self) -> list[AudioItem]: + return self["audio"] -class _PlaceholderInfo(NamedTuple): - modality: str - offset: int - length: int + def get_image_size(self, item_idx: int) -> ImageSize: + image = self.image[item_idx] - def to_range(self) -> PlaceholderRange: - return PlaceholderRange(offset=self.offset, length=self.length) + if isinstance(image, Image): + return ImageSize(*image.size) + if isinstance(image, (np.ndarray, torch.Tensor)): + _, h, w = image.shape + return ImageSize(w, h) + assert_never(image) -def iter_placeholders( - prompt_repls: Sequence[_BoundPromptReplacement[Any]], - token_ids: list[int], - *, - min_placeholder_count: int, -) -> Iterable[_PlaceholderInfo]: - """Yield each set of placeholder tokens found in :code:`token_ids`.""" - placeholder_ids_by_modality = { - modality: { - token_id - for prompt_repl in repls - for token_id in prompt_repl.repl_unit.token_ids - } - for modality, repls in full_groupby_modality(prompt_repls) - } - for run_info in iter_token_runs(token_ids): - if run_info.length > min_placeholder_count: - for (modality, - placeholder_ids) in placeholder_ids_by_modality.items(): - if run_info.token_id in placeholder_ids: - yield _PlaceholderInfo( - modality=modality, - offset=run_info.start_idx, - length=run_info.length, - ) +def to_multi_format(data: MultiModalDataDict) -> MultiModalDataItems: + """ + Normalize :class:`MultiModalDataDict` to :class:`MultiModalDataItems`. + """ + multi_data = MultiModalDataItems() + + for k, v in data.items(): + # yapf: disable + if k == "video": + # Special case since even a single item can be a list + multi_data[k] = v if is_list_of(v, list) else [v] # type: ignore[index] + elif k in ("image", "audio"): + multi_data[k] = v if isinstance(v, list) else [v] # type: ignore[index] + else: + multi_data[k] = v if isinstance(v, list) else [v] # type: ignore[index] + # yapf: enable + + return multi_data class _TokenMatch(NamedTuple): @@ -322,22 +264,33 @@ def iter_token_matches( token_ids: list[int], match_ids: list[int], ) -> Iterable[_TokenMatch]: - """Yield each occurrence of :code:`match_ids` in :code:`token_ids`.""" + """ + Yield each occurrence of :code:`match_ids` in :code:`token_ids`. + + Note that empty matches are ignored. + """ + prompt_len = len(token_ids) match_len = len(match_ids) - last_end_idx = 0 - for start_idx in range(len(token_ids) - match_len + 1): - if start_idx < last_end_idx: - continue # Exclude overlapping matches + if match_len == 0: + return + start_idx = 0 + while start_idx < prompt_len - match_len + 1: end_idx = start_idx + match_len + if token_ids[start_idx:end_idx] == match_ids: yield _TokenMatch(start_idx=start_idx, end_idx=end_idx) - last_end_idx = end_idx + + # Exclude overlapping matches + start_idx = end_idx + else: + start_idx += 1 -class _PromptReplacementMatch(ABC, Generic[_T, _S]): - prompt_repl: _BoundPromptReplacement[_T] +@dataclass(repr=False) +class _PromptReplacementMatch(ABC): + prompt_repl: _BoundPromptReplacement @property def modality(self) -> str: @@ -353,23 +306,13 @@ def start_idx(self) -> int: def end_idx(self) -> int: raise NotImplementedError - @abstractmethod - def get_repl( - self, - mm_items: list[_T], - hf_inputs: BatchFeature, - item_idx: int, - ) -> _S: - raise NotImplementedError - def __repr__(self) -> str: return (f"{type(self).__name__}(modality={self.modality!r}, " f"start_idx={self.start_idx!r}, end_idx={self.end_idx!r})") @dataclass(repr=False) -class _PromptReplacementTokenMatch(_PromptReplacementMatch[_T, list[int]]): - prompt_repl: _BoundPromptReplacement[_T] +class _PromptReplacementTokenMatch(_PromptReplacementMatch): match: _TokenMatch @property @@ -380,20 +323,9 @@ def start_idx(self) -> int: def end_idx(self) -> int: return self.match.end_idx - def get_repl( - self, - mm_items: list[_T], - hf_inputs: BatchFeature, - item_idx: int, - ) -> list[int]: - prompt_repl = self.prompt_repl - count = prompt_repl.get_count(mm_items, hf_inputs, item_idx) - return prompt_repl.repl_unit.token_ids * count - @dataclass(repr=False) -class _PromptReplacementTextMatch(_PromptReplacementMatch[_T, str]): - prompt_repl: _BoundPromptReplacement[_T] +class _PromptReplacementTextMatch(_PromptReplacementMatch): match: re.Match[str] @property @@ -404,21 +336,27 @@ def start_idx(self) -> int: def end_idx(self) -> int: return self.match.end() - def get_repl( - self, - mm_items: list[_T], - hf_inputs: BatchFeature, - item_idx: int, - ) -> str: - prompt_repl = self.prompt_repl - count = prompt_repl.get_count(mm_items, hf_inputs, item_idx) - return prompt_repl.repl_unit.text * count + +class _PlaceholderInfo(NamedTuple): + modality: str + start_idx: int + replacement: list[int] + + @property + def length(self) -> int: + return len(self.replacement) + + def to_range(self) -> PlaceholderRange: + return PlaceholderRange( + offset=self.start_idx, + length=self.length, + ) def find_token_matches( prompt: list[int], - prompt_repls: Sequence[_BoundPromptReplacement[_T]], -) -> list[_PromptReplacementTokenMatch[_T]]: + prompt_repls: Sequence[_BoundPromptReplacement], +) -> list[_PromptReplacementTokenMatch]: """Return each target of :code:`prompt_repls` found in :code:`prompt`.""" return [ _PromptReplacementTokenMatch(prompt_repl, match) @@ -429,8 +367,8 @@ def find_token_matches( def find_text_matches( prompt: str, - prompt_repls: Sequence[_BoundPromptReplacement[_T]], -) -> list[_PromptReplacementTextMatch[_T]]: + prompt_repls: Sequence[_BoundPromptReplacement], +) -> list[_PromptReplacementTextMatch]: """Return each target of :code:`prompt_repls` found in :code:`prompt`.""" return [ _PromptReplacementTextMatch(prompt_repl, match) @@ -440,49 +378,57 @@ def find_text_matches( def _resolve_matches( - prompt: _S, - matches: Sequence[_PromptReplacementMatch[_T, _S]], -) -> list[_PromptReplacementMatch[_T, _S]]: + prompt: _PromptSeq, + matches: Sequence[_PromptReplacementMatch], +) -> list[_PromptReplacementMatch]: """ Resolve :code:`matches` to ensure that there are no overlapping matches, and sort them such that earlier matches take priority over later ones. """ - num_matches_by_idx = np.zeros(len(prompt), dtype=int) + seen_matches: list[Optional[_PromptReplacementMatch]] = [None + ] * len(prompt) + for match in matches: - num_matches_by_idx[match.start_idx:match.end_idx] += 1 + for idx in range(match.start_idx, match.end_idx): + if seen_matches[idx] is not None: + raise ValueError("Found overlapping matches " + f"({seen_matches[idx]} and {match}) " + f"at index={idx} of prompt={prompt}") - duplicate_matches_idxs, = np.nonzero(num_matches_by_idx > 1) - if len(duplicate_matches_idxs) > 0: - raise ValueError("Unable to find a unique replacement " - f"at indices={duplicate_matches_idxs} " - f"of prompt={prompt}") + seen_matches[idx] = match return sorted(matches, key=lambda x: x.start_idx) def _replace_matches( prompt: _S, - matches: Sequence[_PromptReplacementMatch[_T, _S]], - mm_items_by_modality: Mapping[str, list[_T]], - hf_inputs: BatchFeature, + matches: Sequence[_PromptReplacementMatch], + mm_item_counts: Mapping[str, int], ) -> list[_S]: out_seqs = list[_S]() prev_end_idx = 0 - next_idx_by_modality = {modality: 0 for modality in mm_items_by_modality} + next_idx_by_modality = {modality: 0 for modality in mm_item_counts} for match in _resolve_matches(prompt, matches): modality = match.modality - mm_items = mm_items_by_modality[modality] item_idx = next_idx_by_modality[modality] - if item_idx >= len(mm_items): + if item_idx >= mm_item_counts[modality]: continue start_idx = match.start_idx end_idx = match.end_idx - repl_ids = match.get_repl(mm_items, hf_inputs, item_idx) - out_seqs.append(prompt[prev_end_idx:start_idx] + repl_ids) + repl_info = match.prompt_repl + replacement = repl_info.get_replacement(item_idx) + + if isinstance(prompt, str): + repl_seq = replacement.text + out_seqs.append(prompt[prev_end_idx:start_idx] + repl_seq) + else: + repl_seq = replacement.token_ids + out_seqs.append(prompt[prev_end_idx:start_idx] + repl_seq) + prev_end_idx = end_idx next_idx_by_modality[modality] += 1 @@ -493,58 +439,115 @@ def _replace_matches( def replace_token_matches( prompt: list[int], - matches: Sequence[_PromptReplacementMatch[_T, list[int]]], - mm_items_by_modality: Mapping[str, list[_T]], - hf_inputs: BatchFeature, + matches: Sequence[_PromptReplacementTokenMatch], + mm_item_counts: Mapping[str, int], ) -> list[int]: """Apply :code:`prompt_repls` to :code:`prompt`.""" if not matches: return prompt - token_id_seqs = _replace_matches( - prompt, - matches, - mm_items_by_modality, - hf_inputs, - ) + token_id_seqs = _replace_matches(prompt, matches, mm_item_counts) return flatten_2d_lists(token_id_seqs) def replace_text_matches( prompt: str, - matches: Sequence[_PromptReplacementMatch[_T, str]], - mm_items_by_modality: Mapping[str, list[_T]], - hf_inputs: BatchFeature, + matches: Sequence[_PromptReplacementTextMatch], + mm_item_counts: Mapping[str, int], ) -> str: """Apply :code:`prompt_repls` to :code:`prompt`.""" if not matches: return prompt - texts = _replace_matches( - prompt, - matches, - mm_items_by_modality, - hf_inputs, - ) + texts = _replace_matches(prompt, matches, mm_item_counts) return "".join(texts) -class MultiModalProcessor: +def _iter_modality_placeholders( + prompt: list[int], + modality: str, + modality_repls: Sequence[_BoundPromptReplacement], + modal_item_count: int, +) -> Iterable[_PlaceholderInfo]: + if modal_item_count == 0: + return + + prompt_len = len(prompt) + item_index = 0 + + start_idx = 0 + while start_idx < prompt_len: + found = False + + for repl_info in modality_repls: + replacement = repl_info.get_replacement(item_index) + repl_tokens = replacement.token_ids + repl_len = len(repl_tokens) + end_idx = start_idx + repl_len + + if repl_len == 0 or end_idx > prompt_len: + continue + + if prompt[start_idx:end_idx] == repl_tokens: + yield _PlaceholderInfo( + modality=modality, + start_idx=start_idx, + replacement=repl_tokens, + ) + + item_index += 1 + if item_index >= modal_item_count: + return + + # Exclude overlapping matches + start_idx = end_idx + found = True + break + + if not found: + start_idx += 1 + + +def iter_placeholders( + prompt_repls: Sequence[_BoundPromptReplacement], + prompt: list[int], + mm_item_counts: Mapping[str, int], +) -> Iterable[_PlaceholderInfo]: """ - Helper class to process multi-modal inputs to be used in vLLM. + Yield each set of placeholder tokens found in :code:`prompt`. + + Note that empty matches are ignored. """ + repls_by_modality = dict(full_groupby_modality(prompt_repls)) + + for modality, modal_item_count in mm_item_counts.items(): + if modality in repls_by_modality: + yield from _iter_modality_placeholders( + prompt, + modality, + repls_by_modality[modality], + modal_item_count, + ) - def __init__( - self, - ctx: InputProcessingContext, - metadata: MultiModalProcessingMetadata, - ) -> None: + +class ProcessorInputs(NamedTuple): + """Keyword arguments to :meth:`BaseMultiModalProcessor`""" + prompt_text: str + mm_data: MultiModalDataDict + mm_processor_kwargs: Mapping[str, object] + + +class BaseMultiModalProcessor(ABC): + """ + Abstract base class to process multi-modal inputs to be used in vLLM. + """ + + def __init__(self, ctx: InputProcessingContext) -> None: super().__init__() self.ctx = ctx - self.metadata = metadata def __call__( self, @@ -554,22 +557,65 @@ def __call__( ) -> MultiModalInputsV2: return self.apply(prompt, mm_data, mm_processor_kwargs) + def _get_hf_processor(self) -> ProcessorMixin: + """ + Subclasses can add keyword arguments to this method to accept + additional kwargs from model config or user inputs. + """ + return self.ctx.get_hf_processor() + + def _get_tokenizer(self) -> AnyTokenizer: + return self.ctx.tokenizer + + @abstractmethod + def _get_prompt_replacements( + self, + mm_items: MultiModalDataItems, + hf_inputs: BatchFeature, + mm_processor_kwargs: Mapping[str, object], + ) -> list[PromptReplacement]: + """ + Given the original multi-modal items for this modality + and HF-processed data, output the replacements to perform. + + Note: + Even when the HF processor already performs replacement for us, + we still use this replacement information to determine + the placeholder token positions for each multi-modal item. + """ + raise NotImplementedError + def _find_placeholders( self, - all_prompt_repls: Sequence[_BoundPromptReplacement[Any]], + all_prompt_repls: Sequence[_BoundPromptReplacement], new_token_ids: list[int], - *, - # To avoid false positives from multi-input when detecting - # whether placeholder tokens have been inserted, in case - # the target sequence is a subset of the replacement tokens - min_placeholder_count: int = 16, + mm_item_counts: Mapping[str, int], ) -> list[_PlaceholderInfo]: return list( - iter_placeholders( - all_prompt_repls, - new_token_ids, - min_placeholder_count=min_placeholder_count, - )) + iter_placeholders(all_prompt_repls, new_token_ids, mm_item_counts)) + + def _get_processor_data( + self, + mm_data: MultiModalDataDict, + ) -> BatchFeature: + processor_data = dict[str, Any]() + passthrough_data = dict[str, Any]() + for k, v in mm_data.items(): + # TODO: Make a separate modality for embedding inputs + # to avoid confusion + if k in ("image", "video", "audio"): + if isinstance(v, torch.Tensor) and v.ndim == 3: + # Pass through embedding inputs (single) + passthrough_data[f"{k}_embeds"] = [v] + elif is_list_of(v, torch.Tensor) and v[0].ndim == 2: + # Pass through embedding inputs (multi) + passthrough_data[f"{k}_embeds"] = v + else: + # Map keys to plural form, e.g.: image -> images + processor_data[f"{k}s"] = v + else: + processor_data[k] = v + return processor_data, passthrough_data def _apply_hf_processor( self, @@ -577,36 +623,52 @@ def _apply_hf_processor( mm_data: MultiModalDataDict, mm_processor_kwargs: Mapping[str, object], ) -> BatchFeature: - hf_processor = self.ctx.get_hf_processor() + # some mm_processor_kwargs may be used in processor initialization + # instead of processor call + hf_processor = self._get_hf_processor(**mm_processor_kwargs) - return hf_processor( - text=prompt, # type: ignore - **mm_data, - **mm_processor_kwargs, + processor_data, passthrough_data = self._get_processor_data(mm_data) + + assert callable(hf_processor) + mm_processor_kwargs = self.ctx.resolve_hf_processor_call_kwargs( + hf_processor, + mm_processor_kwargs, ) + try: + hf_inputs = hf_processor( + text=prompt, # type: ignore + **processor_data, + **mm_processor_kwargs, + return_tensors="pt", + ) + except Exception as exc: + data = dict(text=prompt, **processor_data) + + raise RuntimeError( + f"Failed to apply {type(hf_processor).__name__} " + f"on data={data} with kwargs={mm_processor_kwargs}") from exc + + hf_inputs.update(passthrough_data) + + return hf_inputs + def _bind_prompt_replacements( self, - mm_data: MultiModalDataDict, - ) -> list[_BoundPromptReplacement[Any]]: - tokenizer = self.ctx.tokenizer + prompt_repls: list[PromptReplacement], + ) -> list[_BoundPromptReplacement]: + tokenizer = self._get_tokenizer() - return [ - prompt_repl.bind(modality, tokenizer) - for modality, metadata in self.metadata.items() - if modality in mm_data for prompt_repl in metadata.prompt_repls - ] + return [prompt_repl.bind(tokenizer) for prompt_repl in prompt_repls] def _apply_prompt_replacements( self, - mm_data: MultiModalDataDict, - hf_inputs: BatchFeature, token_ids: list[int], - prompt_repls: Sequence[_BoundPromptReplacement[Any]], + prompt_repls: Sequence[_BoundPromptReplacement], + mm_item_counts: Mapping[str, int], ) -> tuple[list[int], str, list[_PlaceholderInfo]]: - tokenizer = self.ctx.tokenizer + tokenizer = self._get_tokenizer() - mm_items = to_multi_format(mm_data) token_matches = find_token_matches(token_ids, prompt_repls) # If the search text does not represent a special token, @@ -620,14 +682,13 @@ def _apply_prompt_replacements( # of the search text in the prompt, we instead perform string # replacement on the decoded token IDs, then encode them back. if all( - len(matches) >= len(mm_data[modality]) + len(matches) >= mm_item_counts[modality] for modality, matches in full_groupby_modality(token_matches) ): # yapf: disable token_ids = replace_token_matches( token_ids, token_matches, - mm_items, - hf_inputs, + mm_item_counts, ) text = _decode(tokenizer, token_ids) @@ -639,23 +700,14 @@ def _apply_prompt_replacements( text = replace_text_matches( text, text_matches, - mm_items, - hf_inputs, + mm_item_counts, ) token_ids = _encode(tokenizer, text) matched_repls = [match.prompt_repl for match in text_matches] - placeholders = self._find_placeholders(matched_repls, token_ids) - - # Sanity check - assert len(placeholders) == len(matched_repls), dict( - # Log this information for easier debugging - text=text, - token_ids=token_ids, - placeholders=placeholders, - matched_repls=matched_repls, - ) + placeholders = self._find_placeholders(matched_repls, token_ids, + mm_item_counts) return token_ids, text, placeholders @@ -678,19 +730,24 @@ def apply( 3. Extract information about the placeholder tokens from the processed token IDs. """ - tokenizer = self.ctx.tokenizer + tokenizer = self._get_tokenizer() hf_inputs = self._apply_hf_processor(prompt_text, mm_data, mm_processor_kwargs) prompt_ids, = hf_inputs.pop("input_ids").tolist() mm_kwargs = MultiModalKwargs(hf_inputs) - all_prompt_repls = self._bind_prompt_replacements(mm_data) + mm_items = to_multi_format(mm_data) + prompt_repls = self._get_prompt_replacements(mm_items, hf_inputs, + mm_processor_kwargs) + all_prompt_repls = self._bind_prompt_replacements(prompt_repls) # If HF processor already inserts placeholder tokens, # there is no need for us to insert them + mm_item_counts = {m: len(items) for m, items in mm_items.items()} all_placeholders = self._find_placeholders(all_prompt_repls, - prompt_ids) + prompt_ids, mm_item_counts) + if all_placeholders: prompt_text = _decode(tokenizer, prompt_ids) else: @@ -699,10 +756,9 @@ def apply( prompt_text, all_placeholders, ) = self._apply_prompt_replacements( - mm_data, - hf_inputs, prompt_ids, all_prompt_repls, + mm_item_counts, ) mm_placeholders = { @@ -717,3 +773,62 @@ def apply( mm_kwargs=mm_kwargs, mm_placeholders=mm_placeholders, ) + + @abstractmethod + def _get_dummy_mm_inputs( + self, + mm_counts: Mapping[str, int], + ) -> ProcessorInputs: + """ + Build the multi-modal portion of the input which, after processing, + results in `mm_max_tokens` in :meth:`get_dummy_data`. + """ + raise NotImplementedError + + def get_dummy_data( + self, + seq_len: int, + mm_counts: Mapping[str, int], + mm_max_tokens: Mapping[str, int], + ) -> DummyData: + # Avoid circular import + from vllm.sequence import SequenceData + + processor_inputs = self._get_dummy_mm_inputs(mm_counts) + mm_inputs = self.apply(*processor_inputs) + + prompt_token_ids = mm_inputs["prompt_token_ids"] + placeholders_by_modality = mm_inputs["mm_placeholders"] + + total_placeholders_by_modality = dict[str, int]() + for modality, placeholders in placeholders_by_modality.items(): + num_placeholders = sum(item["length"] for item in placeholders) + max_tokens = mm_max_tokens[modality] + + if num_placeholders != max_tokens: + logger.warning( + "The processed dummy data has a total of %d placeholder " + "tokens for the '%s' modality, which is not the expected " + "%d tokens.", num_placeholders, modality, max_tokens) + + total_placeholders_by_modality[modality] = num_placeholders + + total_len = len(prompt_token_ids) + if total_len > seq_len: + logger.warning( + "The context length (%d) of the model is too short " + "to hold the multi-modal embeddings in the worst case " + "(%d tokens in total, out of which %s are reserved for " + "multi-modal embeddings). This may cause certain multi-modal " + "inputs to fail during inference, even when the input text is " + "short. To avoid this, you should increase `max_model_len`, " + "reduce `max_num_seqs`, and/or reduce `mm_counts`.", seq_len, + total_len, total_placeholders_by_modality) + + prompt_token_ids.extend([0] * (seq_len - len(prompt_token_ids))) + + return DummyData( + seq_data=SequenceData.from_seqs(prompt_token_ids), + multi_modal_data=mm_inputs["mm_kwargs"], + multi_modal_placeholders=placeholders_by_modality, + ) diff --git a/vllm/multimodal/registry.py b/vllm/multimodal/registry.py index b992442d3b314..6cd79d414c978 100644 --- a/vllm/multimodal/registry.py +++ b/vllm/multimodal/registry.py @@ -9,12 +9,13 @@ from vllm.inputs import InputProcessingContext from vllm.logger import init_logger from vllm.transformers_utils.tokenizer import AnyTokenizer +from vllm.utils import ClassRegistry from .audio import AudioPlugin from .base import MultiModalInputMapper, MultiModalPlugin, MultiModalTokensCalc from .image import ImagePlugin from .inputs import MultiModalDataDict, MultiModalKwargs, NestedTensors -from .processing import MultiModalProcessor +from .processing import BaseMultiModalProcessor from .video import VideoPlugin if TYPE_CHECKING: @@ -25,7 +26,7 @@ N = TypeVar("N", bound=Type[nn.Module]) MultiModalProcessorFactory: TypeAlias = Callable[[InputProcessingContext], - MultiModalProcessor] + BaseMultiModalProcessor] """ Constructs a :class:`MultiModalProcessor` instance from the context. @@ -62,8 +63,8 @@ def __init__( plugins: Sequence[MultiModalPlugin] = DEFAULT_PLUGINS) -> None: self._plugins = {p.get_data_key(): p for p in plugins} - self._processor_factories: Dict[Type[nn.Module], - MultiModalProcessorFactory] = {} + self._processor_factories = ClassRegistry[nn.Module, + MultiModalProcessorFactory]() # This is used for non-multimodal models self._disabled_limits_per_plugin = {k: 0 for k in self._plugins} @@ -199,9 +200,29 @@ def register_max_image_tokens( """ return self.register_max_multimodal_tokens("image", max_mm_tokens) - def get_max_multimodal_tokens(self, model_config: "ModelConfig") -> int: + def get_max_tokens_per_item_by_modality( + self, + model_config: "ModelConfig", + ) -> Mapping[str, int]: """ - Get the maximum number of multi-modal tokens + Get the maximum number of tokens per data item from each modality + for profiling the memory usage of a model. + + Note: + This is currently directly used only in V1. + """ + + return { + key: plugin.get_max_multimodal_tokens(model_config) + for key, plugin in self._plugins.items() + } + + def get_max_tokens_by_modality( + self, + model_config: "ModelConfig", + ) -> Mapping[str, int]: + """ + Get the maximum number of tokens from each modality for profiling the memory usage of a model. See :meth:`MultiModalPlugin.get_max_multimodal_tokens` for more details. @@ -211,9 +232,23 @@ def get_max_multimodal_tokens(self, model_config: "ModelConfig") -> int: """ limits_per_plugin = self._limits_by_model[model_config] - return sum((limits_per_plugin[key] * - plugin.get_max_multimodal_tokens(model_config)) - for key, plugin in self._plugins.items()) + return { + key: limits_per_plugin[key] * max_tokens_per_mm_item + for key, max_tokens_per_mm_item in + self.get_max_tokens_per_item_by_modality(model_config).items() + } + + def get_max_multimodal_tokens(self, model_config: "ModelConfig") -> int: + """ + Get the maximum number of multi-modal tokens + for profiling the memory usage of a model. + + See :meth:`MultiModalPlugin.get_max_multimodal_tokens` for more details. + + Note: + This should be called after :meth:`init_mm_limits_per_prompt`. + """ + return sum(self.get_max_tokens_by_modality(model_config).values()) def init_mm_limits_per_prompt( self, @@ -269,7 +304,8 @@ def register_processor( factory: MultiModalProcessorFactory, ): """ - Register a multi-modal processor to a model class. + Register a multi-modal processor to a model class. The processor + is constructed lazily, hence a factory method should be passed. When the model receives multi-modal data, the provided function is invoked to transform the data into a dictionary of model inputs. @@ -280,9 +316,9 @@ def register_processor( """ def wrapper(model_cls: N) -> N: - if model_cls in self._processor_factories: + if self._processor_factories.contains(model_cls, strict=True): logger.warning( - "Model class %s already has an input mapper " + "Model class %s already has a multi-modal processor " "registered to %s. It is overwritten by the new one.", model_cls, self) @@ -306,7 +342,7 @@ def create_processor( self, model_config: "ModelConfig", tokenizer: AnyTokenizer, - ) -> MultiModalProcessor: + ) -> BaseMultiModalProcessor: """ Create a multi-modal processor for a specific model and tokenizer. """ diff --git a/vllm/multimodal/utils.py b/vllm/multimodal/utils.py index d4333b7519b47..c898ca4e6573e 100644 --- a/vllm/multimodal/utils.py +++ b/vllm/multimodal/utils.py @@ -535,11 +535,13 @@ def repeat_and_pad_placeholder_tokens( return new_prompt, new_token_ids, placeholder_ranges -def consecutive_placeholder_ranges(num_items: int, - item_size: int) -> List[PlaceholderRange]: +def consecutive_placeholder_ranges( + num_items: int, + item_size: int, + initial_offset: int = 0) -> List[PlaceholderRange]: """Returns a list of consecutive PlaceholderRanges of a fixed size""" return [ - PlaceholderRange(offset=i * item_size, length=item_size) - for i in range(num_items) + PlaceholderRange(offset=initial_offset + i * item_size, + length=item_size) for i in range(num_items) ] diff --git a/vllm/outputs.py b/vllm/outputs.py index 2d256803edfe8..2ecdf74ee59b3 100644 --- a/vllm/outputs.py +++ b/vllm/outputs.py @@ -1,9 +1,12 @@ import time from dataclasses import dataclass -from typing import Dict, List, Optional +from typing import Dict, Generic, List, Optional from typing import Sequence as GenericSequence from typing import Union +import torch +from typing_extensions import TypeVar, deprecated + from vllm.lora.request import LoRARequest from vllm.multimodal.inputs import MultiModalPlaceholderDict from vllm.sampling_params import RequestOutputKind @@ -53,18 +56,28 @@ def __repr__(self) -> str: @dataclass -class EmbeddingOutput: - """The output data of one completion output of a request. +class PoolingOutput: + """The output data of one pooling output of a request. Args: - embedding: The embedding vector, which is a list of floats. The - length of vector depends on the model as listed in the embedding guide. + data: The extracted hidden states. """ - embedding: List[float] + data: torch.Tensor def __repr__(self) -> str: - return (f"EmbeddingOutput(" - f"embedding={len(self.embedding)})") + return (f"PoolingOutput(data={self.data})") + + def __eq__(self, other: object) -> bool: + return (isinstance(other, self.__class__) and bool( + (self.data == other.data).all())) + + @property + @deprecated("`LLM.encode()` now stores raw outputs in the `data` " + "attribute. To return embeddings, use `LLM.embed()`. " + "To return class probabilities, use `LLM.classify()` " + "and access the `probs` attribute. ") + def embedding(self) -> list[float]: + return self.data.tolist() class RequestOutput: @@ -316,106 +329,178 @@ def __repr__(self) -> str: f"multi_modal_placeholders={self.multi_modal_placeholders})") -class EmbeddingRequestOutput: +_O = TypeVar("_O", default=PoolingOutput) + + +class PoolingRequestOutput(Generic[_O]): """ - The output data of an embedding request to the LLM. + The output data of a pooling request to the LLM. Args: - request_id (str): A unique identifier for the embedding request. - outputs (EmbeddingOutput): The embedding results for the given input. + request_id (str): A unique identifier for the pooling request. + outputs (PoolingOutput): The pooling results for the given input. prompt_token_ids (List[int]): A list of token IDs used in the prompt. - finished (bool): A flag indicating whether the embedding is completed. + finished (bool): A flag indicating whether the pooling is completed. """ - def __init__(self, request_id: str, outputs: "EmbeddingOutput", + def __init__(self, request_id: str, outputs: _O, prompt_token_ids: List[int], finished: bool): self.request_id = request_id self.prompt_token_ids = prompt_token_ids self.finished = finished self.outputs = outputs - @classmethod - def from_seq_group(cls, - seq_group: 'SequenceGroup') -> "EmbeddingRequestOutput": - if seq_group.embeddings is None: - raise ValueError( - "Embeddings are missing in seq_group for EmbeddingRequest.") - output = EmbeddingOutput(seq_group.embeddings) + @staticmethod + def from_seq_group(seq_group: SequenceGroup) -> "PoolingRequestOutput": + pooled_data = seq_group.pooled_data + assert pooled_data is not None + + output = PoolingOutput(pooled_data) prompt_token_ids = seq_group.prompt_token_ids finished = seq_group.is_finished() - return cls(seq_group.request_id, output, prompt_token_ids, finished) + return PoolingRequestOutput(seq_group.request_id, output, + prompt_token_ids, finished) def __repr__(self): """ - Returns a string representation of an EmbeddingRequestOutput instance. + Returns a string representation of an PoolingRequestOutput instance. The representation includes the request_id and the number of outputs, - providing a quick overview of the embedding request's results. + providing a quick overview of the pooling request's results. Returns: - str: A string representation of the EmbeddingRequestOutput instance. + str: A string representation of the PoolingRequestOutput instance. """ - return (f"EmbeddingRequestOutput(request_id='{self.request_id}', " - f"outputs={repr(self.outputs)}, " + return (f"{type(self).__name__}(request_id={self.request_id!r}, " + f"outputs={self.outputs!r}, " f"prompt_token_ids={self.prompt_token_ids}, " f"finished={self.finished})") +class RequestOutputFactory: + + @staticmethod + def create(seq_group: SequenceGroup, + seq_id_to_seq_group: Dict[str, SequenceGroupBase], + use_cache: bool = False): + if seq_group.pooled_data is not None: + return PoolingRequestOutput.from_seq_group(seq_group) + else: + return RequestOutput.from_seq_group(seq_group, use_cache, + seq_id_to_seq_group) + + @dataclass -class ScoreOutput: - """The output data of one completion output of a request. +class EmbeddingOutput: + """The output data of one embedding output of a request. Args: - score: The score, which is a list of floats. - index: The correspondent text index of the score. + embedding: The embedding vector, which is a list of floats. + Its length depends on the hidden dimension of the model. """ - index: int - score: List[float] + embedding: list[float] + + @staticmethod + def from_base(pooling_output: PoolingOutput): + pooled_data = pooling_output.data + if pooled_data.ndim != 1: + raise ValueError("pooled_data should be a 1-D embedding vector") + + return EmbeddingOutput(pooled_data.tolist()) + + @property + def hidden_size(self) -> int: + return len(self.embedding) def __repr__(self) -> str: - return (f"ScoreOutput(" - f"score={self.score}), " - f"index={self.index})") + return f"EmbeddingOutput(hidden_size={self.hidden_size})" + + +class EmbeddingRequestOutput(PoolingRequestOutput[EmbeddingOutput]): + + @staticmethod + def from_base(request_output: PoolingRequestOutput): + return EmbeddingRequestOutput( + request_id=request_output.request_id, + outputs=EmbeddingOutput.from_base(request_output.outputs), + prompt_token_ids=request_output.prompt_token_ids, + finished=request_output.finished, + ) -class ScoreRequestOutput: +@dataclass +class ClassificationOutput: + """The output data of one classification output of a request. + + Args: + probs: The probability vector, which is a list of floats. + Its length depends on the number of classes. """ - The output data of an score request to the LLM. + probs: list[float] + + @staticmethod + def from_base(pooling_output: PoolingOutput): + pooled_data = pooling_output.data + if pooled_data.ndim != 1: + raise ValueError("pooled_data should be a 1-D probability vector") + + return ClassificationOutput(pooled_data.tolist()) + + @property + def num_classes(self) -> int: + return len(self.probs) + + def __repr__(self) -> str: + return f"ClassificationOutput(num_classes={self.num_classes})" + + +class ClassificationRequestOutput(PoolingRequestOutput[ClassificationOutput]): + + @staticmethod + def from_base(request_output: PoolingRequestOutput): + return ClassificationRequestOutput( + request_id=request_output.request_id, + outputs=ClassificationOutput.from_base(request_output.outputs), + prompt_token_ids=request_output.prompt_token_ids, + finished=request_output.finished, + ) + + +@dataclass +class ScoringOutput: + """The output data of one scoring output of a request. Args: - request_id (str): A unique identifier for the score request. - outputs (score): The embedding results for the given input. + score: The similarity score, which is a scalar value. """ + score: float - def __init__(self, request_id: str, outputs: "ScoreOutput"): - self.request_id = request_id - self.outputs = outputs + @staticmethod + def from_base(pooling_output: PoolingOutput): + pooled_data = pooling_output.data + if pooled_data.ndim != 0: + raise ValueError("pooled_data should be a scalar score") - def __repr__(self): - """ - Returns a string representation of an ScoreRequestOutput instance. + return ScoringOutput(pooled_data.item()) - The representation includes the request_id and the number of outputs, - providing a quick overview of the embedding request's results. + def __repr__(self) -> str: + return f"ScoringOutput(score={self.score})" - Returns: - str: A string representation of the ScoreRequestOutput instance. - """ - return (f"ScoreRequestOutput(request_id='{self.request_id}', " - f"outputs={repr(self.outputs)}") + @property + @deprecated("`LLM.score()` now returns scalar scores. " + "Please access it via the `score` attribute. ") + def embedding(self) -> list[float]: + return [self.score] -class RequestOutputFactory: +class ScoringRequestOutput(PoolingRequestOutput[ScoringOutput]): @staticmethod - def create(seq_group: SequenceGroup, - seq_id_to_seq_group: Dict[str, SequenceGroupBase], - use_cache: bool = False): - # Determine the type based on a condition, for example: - if hasattr(seq_group, - 'embeddings') and seq_group.embeddings is not None: - return EmbeddingRequestOutput.from_seq_group(seq_group) - else: - return RequestOutput.from_seq_group(seq_group, use_cache, - seq_id_to_seq_group) + def from_base(request_output: PoolingRequestOutput): + return ScoringRequestOutput( + request_id=request_output.request_id, + outputs=ScoringOutput.from_base(request_output.outputs), + prompt_token_ids=request_output.prompt_token_ids, + finished=request_output.finished, + ) diff --git a/vllm/platforms/__init__.py b/vllm/platforms/__init__.py index 7cb8ac4b0a1e0..419237c252ffd 100644 --- a/vllm/platforms/__init__.py +++ b/vllm/platforms/__init__.py @@ -1,5 +1,5 @@ from .interface import _Backend # noqa: F401 -from .interface import Platform, PlatformEnum, UnspecifiedPlatform +from .interface import CpuArchEnum, Platform, PlatformEnum, UnspecifiedPlatform current_platform: Platform @@ -120,4 +120,4 @@ def cuda_is_jetson() -> bool: else: current_platform = UnspecifiedPlatform() -__all__ = ['Platform', 'PlatformEnum', 'current_platform'] +__all__ = ['Platform', 'PlatformEnum', 'current_platform', 'CpuArchEnum'] diff --git a/vllm/platforms/cpu.py b/vllm/platforms/cpu.py index 3e22c87f61fac..d95a2b4cd5565 100644 --- a/vllm/platforms/cpu.py +++ b/vllm/platforms/cpu.py @@ -1,4 +1,4 @@ -from typing import TYPE_CHECKING +from typing import TYPE_CHECKING, Optional import psutil import torch @@ -19,6 +19,7 @@ class CpuPlatform(Platform): _enum = PlatformEnum.CPU + device_name: str = "cpu" device_type: str = "cpu" dispatch_key: str = "CPU" @@ -36,6 +37,10 @@ def get_default_attn_backend(cls, selected_backend: _Backend) -> _Backend: def get_device_total_memory(cls, device_id: int = 0) -> int: return psutil.virtual_memory().total + @classmethod + def is_async_output_supported(cls, enforce_eager: Optional[bool]) -> bool: + return False + @classmethod def inference_mode(cls): return torch.no_grad() @@ -45,7 +50,7 @@ def check_and_update_config(cls, vllm_config: VllmConfig) -> None: import vllm.envs as envs from vllm.utils import GiB_bytes model_config = vllm_config.model_config - # Reminder: Please update docs/source/serving/compatibility_matrix.rst + # Reminder: Please update docs/source/usage/compatibility_matrix.rst # If the feature combo become valid if not model_config.enforce_eager: logger.warning( @@ -55,6 +60,9 @@ def check_and_update_config(cls, vllm_config: VllmConfig) -> None: cache_config = vllm_config.cache_config + if cache_config and cache_config.block_size is None: + cache_config.block_size = 16 + kv_cache_space = envs.VLLM_CPU_KVCACHE_SPACE if kv_cache_space >= 0: @@ -93,3 +101,8 @@ def check_and_update_config(cls, vllm_config: VllmConfig) -> None: "vllm.worker.cpu_worker.CPUWorker" else: parallel_config.worker_cls = "vllm.worker.cpu_worker.CPUWorker" + + @classmethod + def is_pin_memory_available(cls) -> bool: + logger.warning("Pin memory is not supported on CPU.") + return False diff --git a/vllm/platforms/cuda.py b/vllm/platforms/cuda.py index 5e9ce551f2332..3c5350b778345 100644 --- a/vllm/platforms/cuda.py +++ b/vllm/platforms/cuda.py @@ -4,7 +4,8 @@ import os from functools import lru_cache, wraps -from typing import TYPE_CHECKING, Callable, List, TypeVar +from typing import (TYPE_CHECKING, Callable, List, Optional, Tuple, TypeVar, + Union) import pynvml import torch @@ -12,6 +13,7 @@ # import custom ops, trigger op registration import vllm._C # noqa +import vllm.envs as envs from vllm.logger import init_logger from .interface import DeviceCapability, Platform, PlatformEnum @@ -72,11 +74,14 @@ def wrapper(*args: _P.args, **kwargs: _P.kwargs) -> _R: class CudaPlatformBase(Platform): _enum = PlatformEnum.CUDA + device_name: str = "cuda" device_type: str = "cuda" dispatch_key: str = "CUDA" @classmethod - def get_device_capability(cls, device_id: int = 0) -> DeviceCapability: + def get_device_capability(cls, + device_id: int = 0 + ) -> Optional[DeviceCapability]: raise NotImplementedError @classmethod @@ -87,6 +92,16 @@ def get_device_name(cls, device_id: int = 0) -> str: def get_device_total_memory(cls, device_id: int = 0) -> int: raise NotImplementedError + @classmethod + def is_async_output_supported(cls, enforce_eager: Optional[bool]) -> bool: + if enforce_eager: + logger.warning( + "To see benefits of async output processing, enable CUDA " + "graph. Since, enforce-eager is enabled, async output " + "processor cannot be used") + return False + return True + @classmethod def is_full_nvlink(cls, device_ids: List[int]) -> bool: raise NotImplementedError @@ -99,17 +114,32 @@ def log_warnings(cls): def check_and_update_config(cls, vllm_config: VllmConfig) -> None: parallel_config = vllm_config.parallel_config scheduler_config = vllm_config.scheduler_config + if parallel_config.worker_cls == "auto": if scheduler_config.is_multi_step: - parallel_config.worker_cls = \ - "vllm.worker.multi_step_worker.MultiStepWorker" + if envs.VLLM_USE_V1: + raise NotImplementedError + else: + parallel_config.worker_cls = \ + "vllm.worker.multi_step_worker.MultiStepWorker" elif vllm_config.speculative_config: - parallel_config.worker_cls = \ - "vllm.spec_decode.spec_decode_worker.create_spec_worker" - parallel_config.sd_worker_cls = \ - "vllm.worker.worker.Worker" + if envs.VLLM_USE_V1: + raise NotImplementedError + else: + parallel_config.worker_cls = \ + "vllm.spec_decode.spec_decode_worker.create_spec_worker" + parallel_config.sd_worker_cls = \ + "vllm.worker.worker.Worker" else: - parallel_config.worker_cls = "vllm.worker.worker.Worker" + if envs.VLLM_USE_V1: + parallel_config.worker_cls = \ + "vllm.v1.worker.gpu_worker.Worker" + else: + parallel_config.worker_cls = "vllm.worker.worker.Worker" + + cache_config = vllm_config.cache_config + if cache_config and cache_config.block_size is None: + cache_config.block_size = 16 # NVML utils @@ -121,11 +151,29 @@ class NvmlCudaPlatform(CudaPlatformBase): @classmethod @lru_cache(maxsize=8) @with_nvml_context - def get_device_capability(cls, device_id: int = 0) -> DeviceCapability: - physical_device_id = device_id_to_physical_device_id(device_id) - handle = pynvml.nvmlDeviceGetHandleByIndex(physical_device_id) - major, minor = pynvml.nvmlDeviceGetCudaComputeCapability(handle) - return DeviceCapability(major=major, minor=minor) + def get_device_capability(cls, + device_id: int = 0 + ) -> Optional[DeviceCapability]: + try: + physical_device_id = device_id_to_physical_device_id(device_id) + handle = pynvml.nvmlDeviceGetHandleByIndex(physical_device_id) + major, minor = pynvml.nvmlDeviceGetCudaComputeCapability(handle) + return DeviceCapability(major=major, minor=minor) + except RuntimeError: + return None + + @classmethod + @lru_cache(maxsize=8) + @with_nvml_context + def has_device_capability( + cls, + capability: Union[Tuple[int, int], int], + device_id: int = 0, + ) -> bool: + try: + return super().has_device_capability(capability, device_id) + except RuntimeError: + return False @classmethod @lru_cache(maxsize=8) @@ -238,4 +286,4 @@ def is_full_nvlink(cls, physical_device_ids: List[int]) -> bool: if not isinstance(pynvml, _MockModule): CudaPlatform.log_warnings() except ModuleNotFoundError: - CudaPlatform.log_warnings() \ No newline at end of file + CudaPlatform.log_warnings() diff --git a/vllm/platforms/hpu.py b/vllm/platforms/hpu.py index 3071136e43b85..0a44f2b74163a 100644 --- a/vllm/platforms/hpu.py +++ b/vllm/platforms/hpu.py @@ -1,7 +1,9 @@ -from typing import TYPE_CHECKING +from typing import TYPE_CHECKING, Optional import torch +from vllm.logger import init_logger + from .interface import Platform, PlatformEnum, _Backend if TYPE_CHECKING: @@ -9,9 +11,12 @@ else: VllmConfig = None +logger = init_logger(__name__) + class HpuPlatform(Platform): _enum = PlatformEnum.HPU + device_name: str = "hpu" device_type: str = "hpu" dispatch_key: str = "HPU" @@ -19,6 +24,10 @@ class HpuPlatform(Platform): def get_default_attn_backend(cls, selected_backend: _Backend) -> _Backend: return _Backend.HPU_ATTN + @classmethod + def is_async_output_supported(cls, enforce_eager: Optional[bool]) -> bool: + return True + @staticmethod def inference_mode(): return torch.no_grad() @@ -38,3 +47,14 @@ def check_and_update_config(cls, vllm_config: VllmConfig) -> None: parallel_config = vllm_config.parallel_config if parallel_config.worker_cls == "auto": parallel_config.worker_cls = "vllm.worker.hpu_worker.HPUWorker" + + # NOTE(kzawora): default block size for Gaudi should be 128 + # smaller sizes still work, but very inefficiently + cache_config = vllm_config.cache_config + if cache_config and cache_config.block_size is None: + cache_config.block_size = 128 + + @classmethod + def is_pin_memory_available(cls): + logger.warning("Pin memory is not supported on HPU.") + return False diff --git a/vllm/platforms/interface.py b/vllm/platforms/interface.py index 3328665029039..4150b0cdf836a 100644 --- a/vllm/platforms/interface.py +++ b/vllm/platforms/interface.py @@ -1,15 +1,26 @@ import enum +import platform import random +from platform import uname from typing import TYPE_CHECKING, NamedTuple, Optional, Tuple, Union import numpy as np import torch +from vllm.logger import init_logger + if TYPE_CHECKING: from vllm.config import VllmConfig else: VllmConfig = None +logger = init_logger(__name__) + + +def in_wsl() -> bool: + # Reference: https://github.com/microsoft/WSL/issues/4071 + return "microsoft" in " ".join(uname()).lower() + class _Backend(enum.Enum): FLASH_ATTN = enum.auto() @@ -37,6 +48,14 @@ class PlatformEnum(enum.Enum): UNSPECIFIED = enum.auto() +class CpuArchEnum(enum.Enum): + X86 = enum.auto() + ARM = enum.auto() + POWERPC = enum.auto() + OTHER = enum.auto() + UNKNOWN = enum.auto() + + class DeviceCapability(NamedTuple): major: int minor: int @@ -56,11 +75,13 @@ def to_int(self) -> int: class Platform: _enum: PlatformEnum + device_name: str device_type: str # available dispatch keys: # check https://github.com/pytorch/pytorch/blob/313dac6c1ca0fa0cde32477509cce32089f8532a/torchgen/model.py#L134 # noqa # use "CPU" as a fallback for platforms not registered in PyTorch dispatch_key: str = "CPU" + supported_quantization: list[str] = [] def is_cuda(self) -> bool: return self._enum == PlatformEnum.CUDA @@ -136,6 +157,13 @@ def get_device_total_memory(cls, device_id: int = 0) -> int: """Get the total memory of a device in bytes.""" raise NotImplementedError + @classmethod + def is_async_output_supported(cls, enforce_eager: Optional[bool]) -> bool: + """ + Check if the current platform supports async output. + """ + raise NotImplementedError + @classmethod def inference_mode(cls): """A device-specific wrapper of `torch.inference_mode`. @@ -171,6 +199,45 @@ def check_and_update_config(cls, vllm_config: VllmConfig) -> None: """ pass + @classmethod + def verify_quantization(cls, quant: str) -> None: + """ + Verify whether the quantization is supported by the current platform. + """ + if cls.supported_quantization and \ + quant not in cls.supported_quantization: + raise ValueError( + f"{quant} quantization is currently not supported in " + f"{cls.device_name}.") + + @classmethod + def get_cpu_architecture(cls) -> CpuArchEnum: + """ + Determine the CPU architecture of the current system. + Returns CpuArchEnum indicating the architecture type. + """ + machine = platform.machine().lower() + + if machine in ("x86_64", "amd64", "i386", "i686"): + return CpuArchEnum.X86 + elif machine.startswith("arm") or machine.startswith("aarch"): + return CpuArchEnum.ARM + elif machine.startswith("ppc"): + return CpuArchEnum.POWERPC + + return CpuArchEnum.OTHER if machine else CpuArchEnum.UNKNOWN + + @classmethod + def is_pin_memory_available(cls) -> bool: + """Checks whether pin memory is available on the current platform.""" + if in_wsl(): + # Pinning memory in WSL is not supported. + # https://docs.nvidia.com/cuda/wsl-user-guide/index.html#known-limitations-for-linux-cuda-applications + logger.warning("Using 'pin_memory=False' as WSL is detected. " + "This may slow down the performance.") + return False + return True + class UnspecifiedPlatform(Platform): _enum = PlatformEnum.UNSPECIFIED diff --git a/vllm/platforms/neuron.py b/vllm/platforms/neuron.py index 4c4d778ed3dd4..a4bbbd27c8a89 100644 --- a/vllm/platforms/neuron.py +++ b/vllm/platforms/neuron.py @@ -1,4 +1,6 @@ -from typing import TYPE_CHECKING +from typing import TYPE_CHECKING, Optional + +from vllm.logger import init_logger from .interface import Platform, PlatformEnum @@ -7,18 +9,37 @@ else: VllmConfig = None +logger = init_logger(__name__) + class NeuronPlatform(Platform): _enum = PlatformEnum.NEURON + device_name: str = "neuron" device_type: str = "neuron" + supported_quantization: list[str] = ["neuron_quant"] @classmethod def get_device_name(cls, device_id: int = 0) -> str: return "neuron" + @classmethod + def is_async_output_supported(cls, enforce_eager: Optional[bool]) -> bool: + return False + @classmethod def check_and_update_config(cls, vllm_config: VllmConfig) -> None: parallel_config = vllm_config.parallel_config if parallel_config.worker_cls == "auto": parallel_config.worker_cls = \ "vllm.worker.neuron_worker.NeuronWorker" + + cache_config = vllm_config.cache_config + if cache_config: + # neuron needs block_size = max_model_len + vllm_config.cache_config.block_size = \ + vllm_config.model_config.max_model_len + + @classmethod + def is_pin_memory_available(cls) -> bool: + logger.warning("Pin memory is not supported on Neuron.") + return False diff --git a/vllm/platforms/openvino.py b/vllm/platforms/openvino.py index ea5ec7b40b95c..16eb8dc81efc2 100644 --- a/vllm/platforms/openvino.py +++ b/vllm/platforms/openvino.py @@ -1,4 +1,4 @@ -from typing import TYPE_CHECKING +from typing import TYPE_CHECKING, Optional import torch @@ -23,6 +23,7 @@ class OpenVinoPlatform(Platform): _enum = PlatformEnum.OPENVINO + device_name: str = "openvino" device_type: str = "openvino" dispatch_key: str = "CPU" @@ -33,23 +34,27 @@ def get_default_attn_backend(cls, selected_backend: _Backend) -> _Backend: return _Backend.OPENVINO @classmethod - def get_device_name(self, device_id: int = 0) -> str: + def get_device_name(cls, device_id: int = 0) -> str: return "openvino" @classmethod - def inference_mode(self): + def is_async_output_supported(cls, enforce_eager: Optional[bool]) -> bool: + return False + + @classmethod + def inference_mode(cls): return torch.inference_mode(mode=True) @classmethod - def is_openvino_cpu(self) -> bool: + def is_openvino_cpu(cls) -> bool: return "CPU" in envs.VLLM_OPENVINO_DEVICE @classmethod - def is_openvino_gpu(self) -> bool: + def is_openvino_gpu(cls) -> bool: return "GPU" in envs.VLLM_OPENVINO_DEVICE @classmethod - def is_pin_memory_available(self) -> bool: + def is_pin_memory_available(cls) -> bool: logger.warning("Pin memory is not supported on OpenViNO.") return False @@ -82,6 +87,9 @@ def check_and_update_config(cls, vllm_config: VllmConfig) -> None: # check and update cache config ov_core = ov.Core() cache_config = vllm_config.cache_config + if cache_config and cache_config.block_size is None: + cache_config.block_size = 16 + if envs.VLLM_OPENVINO_CPU_KV_CACHE_PRECISION == "u8": if not OpenVinoPlatform.is_openvino_cpu(): logger.info("VLLM_OPENVINO_CPU_KV_CACHE_PRECISION is" diff --git a/vllm/platforms/rocm.py b/vllm/platforms/rocm.py index d2f44c3e423e3..7778b565372cb 100644 --- a/vllm/platforms/rocm.py +++ b/vllm/platforms/rocm.py @@ -1,9 +1,10 @@ import os from functools import lru_cache -from typing import TYPE_CHECKING +from typing import TYPE_CHECKING, Optional import torch +import vllm.envs as envs from vllm.logger import init_logger from .interface import DeviceCapability, Platform, PlatformEnum, _Backend @@ -35,8 +36,13 @@ class RocmPlatform(Platform): _enum = PlatformEnum.ROCM + device_name: str = "rocm" device_type: str = "cuda" dispatch_key: str = "CUDA" + supported_quantization: list[str] = [ + "awq", "gptq", "fp8", "compressed_tensors", "compressed-tensors", + "fbgemm_fp8", "gguf" + ] @classmethod def get_default_attn_backend(cls, selected_backend: _Backend) -> _Backend: @@ -66,8 +72,22 @@ def get_device_total_memory(cls, device_id: int = 0) -> int: device_props = torch.cuda.get_device_properties(device_id) return device_props.total_memory + @classmethod + def is_async_output_supported(cls, enforce_eager: Optional[bool]) -> bool: + if enforce_eager: + logger.warning( + "To see benefits of async output processing, enable CUDA " + "graph. Since, enforce-eager is enabled, async output " + "processor cannot be used") + return False + return True + @classmethod def check_and_update_config(cls, vllm_config: VllmConfig) -> None: + cache_config = vllm_config.cache_config + if cache_config and cache_config.block_size is None: + cache_config.block_size = 16 + parallel_config = vllm_config.parallel_config scheduler_config = vllm_config.scheduler_config if parallel_config.worker_cls == "auto": @@ -77,5 +97,16 @@ def check_and_update_config(cls, vllm_config: VllmConfig) -> None: elif vllm_config.speculative_config: parallel_config.worker_cls = \ "vllm.spec_decode.spec_decode_worker.create_spec_worker" + parallel_config.sd_worker_cls = \ + "vllm.worker.worker.Worker" else: parallel_config.worker_cls = "vllm.worker.worker.Worker" + + @classmethod + def verify_quantization(cls, quant: str) -> None: + super().verify_quantization(quant) + if quant == "awq" and not envs.VLLM_USE_TRITON_AWQ: + logger.warning( + "Using AWQ quantization with ROCm, but VLLM_USE_TRITON_AWQ" + " is not set, enabling VLLM_USE_TRITON_AWQ.") + envs.VLLM_USE_TRITON_AWQ = True diff --git a/vllm/platforms/tpu.py b/vllm/platforms/tpu.py index 137af57023ea9..77f5c8401424b 100644 --- a/vllm/platforms/tpu.py +++ b/vllm/platforms/tpu.py @@ -1,4 +1,4 @@ -from typing import TYPE_CHECKING +from typing import TYPE_CHECKING, Optional import torch @@ -16,8 +16,10 @@ class TpuPlatform(Platform): _enum = PlatformEnum.TPU + device_name: str = "tpu" device_type: str = "tpu" dispatch_key: str = "XLA" + supported_quantization: list[str] = ["tpu_int8"] @classmethod def get_default_attn_backend(cls, selected_backend: _Backend) -> _Backend: @@ -33,6 +35,10 @@ def get_device_name(cls, device_id: int = 0) -> str: def get_device_total_memory(cls, device_id: int = 0) -> int: raise NotImplementedError + @classmethod + def is_async_output_supported(cls, enforce_eager: Optional[bool]) -> bool: + return True + @classmethod def inference_mode(cls): return torch.no_grad() @@ -40,6 +46,11 @@ def inference_mode(cls): @classmethod def check_and_update_config(cls, vllm_config: VllmConfig) -> None: from vllm.config import CompilationLevel + + cache_config = vllm_config.cache_config + if cache_config and cache_config.block_size is None: + cache_config.block_size = 16 + compilation_config = vllm_config.compilation_config if compilation_config.level == CompilationLevel.NO_COMPILATION: # TPU does not support NO_COMPILATION diff --git a/vllm/platforms/xpu.py b/vllm/platforms/xpu.py index 69388a8e0f27c..78e17c2afec65 100644 --- a/vllm/platforms/xpu.py +++ b/vllm/platforms/xpu.py @@ -1,4 +1,4 @@ -from typing import TYPE_CHECKING +from typing import TYPE_CHECKING, Optional import torch @@ -16,6 +16,7 @@ class XPUPlatform(Platform): _enum = PlatformEnum.XPU + device_name: str = "xpu" device_type: str = "xpu" dispatch_key: str = "XPU" @@ -40,12 +41,20 @@ def get_device_total_memory(cls, device_id: int = 0) -> int: device_props = torch.xpu.get_device_properties(device_id) return device_props.total_memory + @classmethod + def is_async_output_supported(cls, enforce_eager: Optional[bool]) -> bool: + return True + @staticmethod def inference_mode(): return torch.no_grad() @classmethod def check_and_update_config(cls, vllm_config: VllmConfig) -> None: + cache_config = vllm_config.cache_config + if cache_config and cache_config.block_size is None: + cache_config.block_size = 16 + # check and update model config model_config = vllm_config.model_config if model_config.dtype == torch.bfloat16: @@ -73,3 +82,8 @@ def check_and_update_config(cls, vllm_config: VllmConfig) -> None: parallel_config.distributed_executor_backend = "ray" if parallel_config.worker_cls == "auto": parallel_config.worker_cls = "vllm.worker.xpu_worker.XPUWorker" + + @classmethod + def is_pin_memory_available(cls): + logger.warning("Pin memory is not supported on XPU.") + return False diff --git a/vllm/plugins/__init__.py b/vllm/plugins/__init__.py index 3c64726ca3344..17f604ea0e202 100644 --- a/vllm/plugins/__init__.py +++ b/vllm/plugins/__init__.py @@ -4,6 +4,7 @@ import torch import vllm.envs as envs +from vllm.platforms import current_platform logger = logging.getLogger(__name__) @@ -25,6 +26,23 @@ def load_general_plugins(): os.environ['TORCHINDUCTOR_COMPILE_THREADS'] = '1' # see https://github.com/vllm-project/vllm/issues/10619 torch._inductor.config.compile_threads = 1 + if current_platform.is_xpu(): + # see https://github.com/pytorch/pytorch/blob/8cada5cbe5450e17c26fb8b358116785324537b2/torch/_dynamo/config.py#L158 # noqa + os.environ['TORCH_COMPILE_DISABLE'] = 'True' + if current_platform.is_hpu(): + # NOTE(kzawora): PT HPU lazy backend (PT_HPU_LAZY_MODE = 1) + # does not support torch.compile + # Eager backend (PT_HPU_LAZY_MODE = 0) must be selected for + # torch.compile support + is_lazy = os.environ.get('PT_HPU_LAZY_MODE', '1') == '1' + if is_lazy: + # see https://github.com/pytorch/pytorch/blob/43c5f59/torch/_dynamo/config.py#L158 + torch._dynamo.config.disable = True + # NOTE(kzawora) multi-HPU inference with HPUGraphs (lazy-only) + # requires enabling lazy collectives + # see https://docs.habana.ai/en/latest/PyTorch/Inference_on_PyTorch/Inference_Using_HPU_Graphs.html # noqa: E501 + os.environ['PT_HPU_ENABLE_LAZY_COLLECTIVES'] = 'true' + global plugins_loaded if plugins_loaded: return @@ -39,7 +57,7 @@ def load_general_plugins(): discovered_plugins = entry_points(group='vllm.general_plugins') if len(discovered_plugins) == 0: - logger.info("No plugins found.") + logger.debug("No plugins found.") return logger.info("Available plugins:") for plugin in discovered_plugins: diff --git a/vllm/profiler/layerwise_profile.py b/vllm/profiler/layerwise_profile.py index 9d9f427e807f6..33babfebdca1e 100644 --- a/vllm/profiler/layerwise_profile.py +++ b/vllm/profiler/layerwise_profile.py @@ -72,6 +72,9 @@ class LayerwiseProfileResults(profile): _model_stats_tree: List[_StatsTreeNode] = field(init=False) _summary_stats_tree: List[_StatsTreeNode] = field(init=False) + # profile metadata + num_running_seqs: Optional[int] = None + def __post_init__(self): self._build_correlation_map() self._build_module_tree() @@ -127,6 +130,9 @@ def export_summary_stats_table_csv(self, filename: str): def convert_stats_to_dict(self) -> str: return { + "metadata": { + "num_running_seqs": self.num_running_seqs + }, "summary_stats": self._convert_stats_tree_to_dict(self._summary_stats_tree), "model_stats": @@ -338,7 +344,15 @@ def df_traversal(node: _StatsTreeNode, curr_json_list: List[Dict]): class layerwise_profile(profile): - def __init__(self): + def __init__(self, num_running_seqs: Optional[int] = None): + """ + layerwise profile constructor. + + Args: + num_running_seqs (Optional[int], optional): When given, + num_running_seqs will be passed to LayerProfileResults for metadata + update. Defaults to None. + """ super().__init__( activities=[ProfilerActivity.CPU, ProfilerActivity.CUDA], record_shapes=True, @@ -346,9 +360,13 @@ def __init__(self): with_modules=True, experimental_config=_ExperimentalConfig(verbose=True)) + self.num_running_seqs = num_running_seqs + def __enter__(self): return super().__enter__() def __exit__(self, exc_type, exc_val, exc_tb): super().__exit__(exc_type, exc_val, exc_tb) - self.results = LayerwiseProfileResults(self.profiler.kineto_results) + self.results = LayerwiseProfileResults( + self.profiler.kineto_results, + num_running_seqs=self.num_running_seqs) diff --git a/vllm/sampling_params.py b/vllm/sampling_params.py index 5c6df5aaf5446..fc77f3ca529b2 100644 --- a/vllm/sampling_params.py +++ b/vllm/sampling_params.py @@ -293,8 +293,9 @@ def __post_init__(self) -> None: raise ValueError( f"best_of must be greater than or equal to n, " f"got n={self.n} and best_of={self.best_of}.") - self._real_n = self.n - self.n = self.best_of + if not self._real_n: + self._real_n = self.n + self.n = self.best_of if 0 < self.temperature < _MAX_TEMP: logger.warning( diff --git a/vllm/sequence.py b/vllm/sequence.py index 669124319c4f4..cc3d96fc93a79 100644 --- a/vllm/sequence.py +++ b/vllm/sequence.py @@ -527,6 +527,19 @@ def hash_of_block(self, logical_idx: int) -> int: hashed_tokens = self.data.get_prefix_token_ids(num_tokens) return hash((hashed_tokens, self.lora_int_id)) + def extra_hash(self) -> Optional[int]: + """ + This function computes an extra hash for a sequence, specifically + designed for prefix caching mode. The final sequence hash is determined + by applying token_ids from the sequence's blocks. + """ + if self.prompt_adapter_id == 0 and self.lora_int_id == 0: + return None + + # NOTE: If there are additional factors influencing the block aside from + # token_ids, include them as input parameters to the hash. + return hash((self.prompt_adapter_id, self.lora_int_id)) + def num_hashed_tokens_of_block(self, logical_idx: int): return logical_idx * self.block_size + self.block_size @@ -617,10 +630,9 @@ class SequenceGroup: sampling_params: The sampling parameters used to generate the outputs. arrival_time: The arrival time of the request. lora_request: LoRA request. - embeddings: The embeddings vectors of the prompt of the sequence group - for an embedding model. - pooling_params: The pooling parameters used to generate the pooling - for an embedding model. + pooling_params: The parameters used to generate the pooler + for a pooling model. + pooled_data: The extracted hidden states from a pooling model. encoder_seq: Optional, the single encoder sequence. Should be None unless you are working with an encoder/decoder model. trace_headers: OpenTelemetry trace headers. @@ -635,8 +647,8 @@ def __init__( arrival_time: float, sampling_params: Optional[SamplingParams] = None, lora_request: Optional[LoRARequest] = None, - embeddings: Optional[List[float]] = None, pooling_params: Optional[PoolingParams] = None, + pooled_data: Optional[torch.Tensor] = None, encoder_seq: Optional[Sequence] = None, trace_headers: Optional[Mapping[str, str]] = None, prompt_adapter_request: Optional[PromptAdapterRequest] = None, @@ -658,8 +670,8 @@ def __init__( self.lora_request = lora_request self.prompt_logprobs: Optional[PromptLogprobs] = None self.state = SequenceGroupState() - self.embeddings = embeddings self.pooling_params = pooling_params + self.pooled_data = pooled_data self.prompt_adapter_request = prompt_adapter_request self.encoder_seq = encoder_seq self.trace_headers = trace_headers @@ -1033,8 +1045,8 @@ class CompletionSequenceGroupOutput( msgspec.Struct, omit_defaults=True, # type: ignore[call-arg] array_like=True): # type: ignore[call-arg] - __metaclass__ = SequenceGroupOutput """The model output associated with a completion sequence group.""" + __metaclass__ = SequenceGroupOutput samples: List[SequenceOutput] # Prompt logprob for each prompt query token. prompt_logprobs: Optional[PromptLogprobs] @@ -1050,23 +1062,24 @@ def __eq__(self, other: object) -> bool: and self.prompt_logprobs == other.prompt_logprobs) -class EmbeddingSequenceGroupOutput( +class PoolingSequenceGroupOutput( msgspec.Struct, omit_defaults=True, # type: ignore[call-arg] array_like=True, # type: ignore[call-arg] ): - """The model output associated with an embedding sequence group.""" + """The model output associated with a pooling sequence group.""" __metaclass__ = SequenceGroupOutput - embeddings: List[int] + # Annotated as Any to be compatible with msgspec + # The actual type is in SequenceGroup.pooled_data + data: Any def __repr__(self) -> str: - return (f"EmbeddingSequenceGroupOutput(" - f"embeddings_shape={len(self.embeddings)})") + return f"PoolingSequenceGroupOutput(data={self.data}" def __eq__(self, other: object) -> bool: - if not isinstance(other, EmbeddingSequenceGroupOutput): + if not isinstance(other, PoolingSequenceGroupOutput): raise NotImplementedError() - return self.embeddings == other.embeddings + return self.data == other.data # cannot use msgspec.Struct here because Dynamo does not support it @@ -1085,7 +1098,7 @@ def __getitem__(self, key: Union[str, slice]): elif isinstance(key, slice): return self.__class__({k: v[key] for k, v in self.tensors.items()}) - def __setitem__(self, key: str, value): + def __setitem__(self, key: str, value: torch.Tensor): self.tensors[key] = value def __len__(self): @@ -1102,17 +1115,13 @@ class PoolerOutput( msgspec.Struct, omit_defaults=True, # type: ignore[call-arg] array_like=True): # type: ignore[call-arg] - """The output from a pooling operation in the embedding model.""" - outputs: List[EmbeddingSequenceGroupOutput] - - # lazy import to avoid circular import - from vllm.spec_decode.metrics import SpecDecodeWorkerMetrics - spec_decode_worker_metrics: Optional[SpecDecodeWorkerMetrics] = None + """The output from a pooling operation in the pooling model.""" + outputs: List[PoolingSequenceGroupOutput] - def __getitem__(self, idx: int) -> EmbeddingSequenceGroupOutput: + def __getitem__(self, idx: int) -> PoolingSequenceGroupOutput: return self.outputs[idx] - def __setitem__(self, idx: int, value): + def __setitem__(self, idx: int, value: PoolingSequenceGroupOutput): self.outputs[idx] = value def __len__(self): @@ -1385,8 +1394,8 @@ def add_request(request_id: str, engine, params, **kwargs): arrival_time=seq_group.arrival_time, sampling_params=original_params, lora_request=seq_group.lora_request, - embeddings=seq_group.embeddings, pooling_params=seq_group.pooling_params, + pooled_data=seq_group.pooled_data, encoder_seq=seq_group.encoder_seq, trace_headers=seq_group.trace_headers, prompt_adapter_request=seq_group.prompt_adapter_request, diff --git a/vllm/spec_decode/multi_step_worker.py b/vllm/spec_decode/multi_step_worker.py index d249b37c780e4..676ac5eb3609d 100644 --- a/vllm/spec_decode/multi_step_worker.py +++ b/vllm/spec_decode/multi_step_worker.py @@ -120,6 +120,9 @@ def sampler_output( indices_of_seq_with_bonus_tokens) model_outputs.append(model_output) + # move indices to device to avoid stream sync + indices_of_seq_with_bonus_tokens = torch.tensor( + indices_of_seq_with_bonus_tokens, device=self.device) filtered_model_outputs = self._filter_model_output( model_outputs, indices_of_seq_with_bonus_tokens) return filtered_model_outputs, True @@ -189,7 +192,7 @@ def _expand_execute_model_request( @staticmethod def _filter_model_output( expanded_batch_outputs: List[SamplerOutput], - output_indices_to_retain: List[int]) -> List[SamplerOutput]: + output_indices_to_retain: torch.Tensor) -> List[SamplerOutput]: """ Filters the model output to include only the specified sequence outputs. This method contracts the expanded batch output from the @@ -199,8 +202,8 @@ def _filter_model_output( Args: expanded_batch_output (List[SamplerOutput]): The expanded output batch from the model. - output_indices_to_retain (List[int]): Indices of the model outputs - to retain. + output_indices_to_retain (torch.Tensor): Indices of the model + outputs to retain. Returns: List[SamplerOutput]: A list containing the filtered model diff --git a/vllm/spec_decode/spec_decode_worker.py b/vllm/spec_decode/spec_decode_worker.py index 53634f7b0b366..2689802161987 100644 --- a/vllm/spec_decode/spec_decode_worker.py +++ b/vllm/spec_decode/spec_decode_worker.py @@ -54,6 +54,10 @@ def create_spec_worker(*args, **kwargs) -> "SpecDecodeWorker": speculative_config: SpeculativeConfig = vllm_config.speculative_config assert speculative_config is not None + if vllm_config.parallel_config.pipeline_parallel_size > 1: + raise NotImplementedError("Speculative decoding is currently " + "incompatible with pipeline parallelism") + draft_worker_kwargs = kwargs.copy() kwargs["model_runner_cls"] = TargetModelRunner @@ -104,7 +108,7 @@ def create_spec_worker(*args, **kwargs) -> "SpecDecodeWorker": return spec_decode_worker -# Reminder: Please update docs/source/serving/compatibility_matrix.rst +# Reminder: Please update docs/source/usage/compatibility_matrix.rst # If the feature combo become valid class SpecDecodeWorker(LoraNotSupportedWorkerBase): """Worker which implements speculative decoding. diff --git a/vllm/transformers_utils/config.py b/vllm/transformers_utils/config.py index 3da99bcbee9ae..4529cf27ef565 100644 --- a/vllm/transformers_utils/config.py +++ b/vllm/transformers_utils/config.py @@ -1,5 +1,6 @@ import enum import json +import os from pathlib import Path from typing import Any, Dict, Optional, Type, Union @@ -41,6 +42,7 @@ from transformers import AutoConfig MISTRAL_CONFIG_NAME = "params.json" +HF_TOKEN = os.getenv('HF_TOKEN', None) logger = init_logger(__name__) @@ -77,8 +79,8 @@ class ConfigFormat(str, enum.Enum): MISTRAL = "mistral" -def file_or_path_exists(model: Union[str, Path], config_name, revision, - token) -> bool: +def file_or_path_exists(model: Union[str, Path], config_name: str, + revision: Optional[str]) -> bool: if Path(model).exists(): return (Path(model) / config_name).is_file() @@ -93,7 +95,10 @@ def file_or_path_exists(model: Union[str, Path], config_name, revision, # NB: file_exists will only check for the existence of the config file on # hf_hub. This will fail in offline mode. try: - return file_exists(model, config_name, revision=revision, token=token) + return file_exists(model, + config_name, + revision=revision, + token=HF_TOKEN) except huggingface_hub.errors.OfflineModeIsEnabled: # Don't raise in offline mode, all we know is that we don't have this # file cached. @@ -161,7 +166,6 @@ def get_config( revision: Optional[str] = None, code_revision: Optional[str] = None, config_format: ConfigFormat = ConfigFormat.AUTO, - token: Optional[str] = None, **kwargs, ) -> PretrainedConfig: # Separate model folder from file path for GGUF models @@ -173,19 +177,20 @@ def get_config( if config_format == ConfigFormat.AUTO: if is_gguf or file_or_path_exists( - model, HF_CONFIG_NAME, revision=revision, token=token): + model, HF_CONFIG_NAME, revision=revision): config_format = ConfigFormat.HF - elif file_or_path_exists(model, - MISTRAL_CONFIG_NAME, - revision=revision, - token=token): + elif file_or_path_exists(model, MISTRAL_CONFIG_NAME, + revision=revision): config_format = ConfigFormat.MISTRAL else: # If we're in offline mode and found no valid config format, then # raise an offline mode error to indicate to the user that they # don't have files cached and may need to go online. # This is conveniently triggered by calling file_exists(). - file_exists(model, HF_CONFIG_NAME, revision=revision, token=token) + file_exists(model, + HF_CONFIG_NAME, + revision=revision, + token=HF_TOKEN) raise ValueError(f"No supported config format found in {model}") @@ -194,7 +199,7 @@ def get_config( model, revision=revision, code_revision=code_revision, - token=token, + token=HF_TOKEN, **kwargs, ) @@ -206,7 +211,7 @@ def get_config( model, revision=revision, code_revision=code_revision, - token=token, + token=HF_TOKEN, **kwargs, ) else: @@ -216,7 +221,7 @@ def get_config( trust_remote_code=trust_remote_code, revision=revision, code_revision=code_revision, - token=token, + token=HF_TOKEN, **kwargs, ) except ValueError as e: @@ -234,7 +239,7 @@ def get_config( raise e elif config_format == ConfigFormat.MISTRAL: - config = load_params_config(model, revision, token=token, **kwargs) + config = load_params_config(model, revision, token=HF_TOKEN, **kwargs) else: raise ValueError(f"Unsupported config format: {config_format}") @@ -256,8 +261,7 @@ def get_config( def get_hf_file_to_dict(file_name: str, model: Union[str, Path], - revision: Optional[str] = 'main', - token: Optional[str] = None): + revision: Optional[str] = 'main'): """ Downloads a file from the Hugging Face Hub and returns its contents as a dictionary. @@ -266,7 +270,6 @@ def get_hf_file_to_dict(file_name: str, - file_name (str): The name of the file to download. - model (str): The name of the model on the Hugging Face Hub. - revision (str): The specific version of the model. - - token (str): The Hugging Face authentication token. Returns: - config_dict (dict): A dictionary containing @@ -276,8 +279,7 @@ def get_hf_file_to_dict(file_name: str, if file_or_path_exists(model=model, config_name=file_name, - revision=revision, - token=token): + revision=revision): if not file_path.is_file(): try: @@ -296,9 +298,7 @@ def get_hf_file_to_dict(file_name: str, return None -def get_pooling_config(model: str, - revision: Optional[str] = 'main', - token: Optional[str] = None): +def get_pooling_config(model: str, revision: Optional[str] = 'main'): """ This function gets the pooling and normalize config from the model - only applies to @@ -315,8 +315,7 @@ def get_pooling_config(model: str, """ modules_file_name = "modules.json" - modules_dict = get_hf_file_to_dict(modules_file_name, model, revision, - token) + modules_dict = get_hf_file_to_dict(modules_file_name, model, revision) if modules_dict is None: return None @@ -332,8 +331,7 @@ def get_pooling_config(model: str, if pooling: pooling_file_name = "{}/config.json".format(pooling["path"]) - pooling_dict = get_hf_file_to_dict(pooling_file_name, model, revision, - token) + pooling_dict = get_hf_file_to_dict(pooling_file_name, model, revision) pooling_type_name = next( (item for item, val in pooling_dict.items() if val is True), None) @@ -368,8 +366,8 @@ def get_pooling_config_name(pooling_name: str) -> Union[str, None]: def get_sentence_transformer_tokenizer_config(model: str, - revision: Optional[str] = 'main', - token: Optional[str] = None): + revision: Optional[str] = 'main' + ): """ Returns the tokenization configuration dictionary for a given Sentence Transformer BERT model. @@ -379,7 +377,6 @@ def get_sentence_transformer_tokenizer_config(model: str, BERT model. - revision (str, optional): The revision of the m odel to use. Defaults to 'main'. - - token (str): A Hugging Face access token. Returns: - dict: A dictionary containing the configuration parameters @@ -394,7 +391,7 @@ def get_sentence_transformer_tokenizer_config(model: str, "sentence_xlm-roberta_config.json", "sentence_xlnet_config.json", ]: - encoder_dict = get_hf_file_to_dict(config_name, model, revision, token) + encoder_dict = get_hf_file_to_dict(config_name, model, revision) if encoder_dict: break @@ -474,16 +471,14 @@ def _reduce_config(config: VllmConfig): exc_info=e) -def load_params_config(model: Union[str, Path], - revision: Optional[str], - token: Optional[str] = None, +def load_params_config(model: Union[str, Path], revision: Optional[str], **kwargs) -> PretrainedConfig: # This function loads a params.json config which # should be used when loading models in mistral format config_file_name = "params.json" - config_dict = get_hf_file_to_dict(config_file_name, model, revision, token) + config_dict = get_hf_file_to_dict(config_file_name, model, revision) assert isinstance(config_dict, dict) config_mapping = { diff --git a/vllm/transformers_utils/tokenizer_group/__init__.py b/vllm/transformers_utils/tokenizer_group/__init__.py index 6a114b513f382..c0b3d2585a962 100644 --- a/vllm/transformers_utils/tokenizer_group/__init__.py +++ b/vllm/transformers_utils/tokenizer_group/__init__.py @@ -1,7 +1,7 @@ from typing import Optional, Type -from vllm.config import (ModelConfig, ParallelConfig, SchedulerConfig, - TokenizerPoolConfig) +from vllm.config import (LoRAConfig, ModelConfig, ParallelConfig, + SchedulerConfig, TokenizerPoolConfig) from vllm.executor.ray_utils import ray from .base_tokenizer_group import AnyTokenizer, BaseTokenizerGroup @@ -16,10 +16,11 @@ def init_tokenizer_from_configs(model_config: ModelConfig, scheduler_config: SchedulerConfig, parallel_config: ParallelConfig, - enable_lora: bool): + lora_config: LoRAConfig): init_kwargs = dict(tokenizer_id=model_config.tokenizer, - enable_lora=enable_lora, + enable_lora=bool(lora_config), max_num_seqs=scheduler_config.max_num_seqs, + max_loras=lora_config.max_loras if lora_config else 0, max_input_length=None, tokenizer_mode=model_config.tokenizer_mode, trust_remote_code=model_config.trust_remote_code, diff --git a/vllm/transformers_utils/tokenizer_group/tokenizer_group.py b/vllm/transformers_utils/tokenizer_group/tokenizer_group.py index e516eeabaadef..761b07f34d2f9 100644 --- a/vllm/transformers_utils/tokenizer_group/tokenizer_group.py +++ b/vllm/transformers_utils/tokenizer_group/tokenizer_group.py @@ -21,8 +21,9 @@ def __init__(self, tokenizer_id: str, enable_lora: bool, max_num_seqs: int, self.enable_lora = enable_lora self.max_input_length = max_input_length self.tokenizer = get_tokenizer(self.tokenizer_id, **tokenizer_config) + max_loras = tokenizer_config.get("max_loras", 0) self.lora_tokenizers = LRUCache[AnyTokenizer]( - capacity=max_num_seqs if enable_lora else 0) + capacity=max(max_loras, max_num_seqs) if enable_lora else 0) @classmethod def from_config(cls, tokenizer_pool_config: Optional[TokenizerPoolConfig], diff --git a/vllm/utils.py b/vllm/utils.py index bec876d983701..38c7dea6d2d3d 100644 --- a/vllm/utils.py +++ b/vllm/utils.py @@ -1,5 +1,6 @@ import argparse import asyncio +import concurrent import contextlib import datetime import enum @@ -9,6 +10,7 @@ import inspect import ipaddress import os +import signal import socket import subprocess import sys @@ -18,14 +20,15 @@ import uuid import warnings import weakref -from asyncio import FIRST_COMPLETED, AbstractEventLoop, Future, Task -from collections import defaultdict +from asyncio import FIRST_COMPLETED, AbstractEventLoop, Task +from collections import UserDict, defaultdict from collections.abc import Iterable, Mapping +from dataclasses import dataclass, field from functools import lru_cache, partial, wraps -from platform import uname -from typing import (Any, AsyncGenerator, Awaitable, Callable, Dict, Generic, - Hashable, List, Literal, Optional, OrderedDict, Set, Tuple, - Type, TypeVar, Union, overload) +from typing import (TYPE_CHECKING, Any, AsyncGenerator, Awaitable, Callable, + Dict, Generator, Generic, Hashable, List, Literal, + Optional, OrderedDict, Set, Tuple, Type, TypeVar, Union, + overload) from uuid import uuid4 import numpy as np @@ -42,11 +45,14 @@ from vllm.logger import enable_trace_function_call, init_logger from vllm.platforms import current_platform +if TYPE_CHECKING: + from vllm.config import VllmConfig + logger = init_logger(__name__) # Exception strings for non-implemented encoder/decoder scenarios -# Reminder: Please update docs/source/serving/compatibility_matrix.rst +# Reminder: Please update docs/source/usage/compatibility_matrix.rst # If the feature combo become valid STR_NOT_IMPL_ENC_DEC_SWA = \ @@ -165,6 +171,11 @@ class Device(enum.Enum): CPU = enum.auto() +class LayerBlockType(enum.Enum): + attention = "attention" + mamba = "mamba" + + class Counter: def __init__(self, start: int = 0) -> None: @@ -334,24 +345,10 @@ def random_uuid() -> str: return str(uuid.uuid4().hex) -@lru_cache(maxsize=None) -def get_vllm_instance_id() -> str: - """ - If the environment variable VLLM_INSTANCE_ID is set, return it. - Otherwise, return a random UUID. - Instance id represents an instance of the VLLM. All processes in the same - instance should have the same instance id. - """ - return envs.VLLM_INSTANCE_ID or f"vllm-instance-{random_uuid()}" - - -@lru_cache(maxsize=None) -def in_wsl() -> bool: - # Reference: https://github.com/microsoft/WSL/issues/4071 - return "microsoft" in " ".join(uname()).lower() - - -def make_async(func: Callable[P, T]) -> Callable[P, Awaitable[T]]: +def make_async( + func: Callable[P, T], + executor: Optional[concurrent.futures.Executor] = None +) -> Callable[P, Awaitable[T]]: """Take a blocking function, and run it on in an executor thread. This function prevents the blocking function from blocking the @@ -362,7 +359,7 @@ def make_async(func: Callable[P, T]) -> Callable[P, Awaitable[T]]: def _async_wrapper(*args: P.args, **kwargs: P.kwargs) -> asyncio.Future: loop = asyncio.get_event_loop() p_func = partial(func, *args, **kwargs) - return loop.run_in_executor(executor=None, func=p_func) + return loop.run_in_executor(executor=executor, func=p_func) return _async_wrapper @@ -373,72 +370,23 @@ def _next_task(iterator: AsyncGenerator[T, None], return loop.create_task(iterator.__anext__()) # type: ignore[arg-type] -async def iterate_with_cancellation( - iterator: AsyncGenerator[T, None], - is_cancelled: Callable[[], Awaitable[bool]], -) -> AsyncGenerator[T, None]: - """Convert async iterator into one that polls the provided function - at least once per second to check for client cancellation. - """ - - loop = asyncio.get_running_loop() - - awaits: List[Future[T]] = [_next_task(iterator, loop)] - next_cancel_check: float = 0 - while True: - done, pending = await asyncio.wait(awaits, timeout=1.5) - - # Check for cancellation at most once per second - time_now = time.time() - if time_now >= next_cancel_check: - if await is_cancelled(): - with contextlib.suppress(BaseException): - awaits[0].cancel() - await iterator.aclose() - raise asyncio.CancelledError("client cancelled") - next_cancel_check = time_now + 1 - - if done: - try: - item = await awaits[0] - awaits[0] = _next_task(iterator, loop) - yield item - except StopAsyncIteration: - # we are done - return - - async def merge_async_iterators( - *iterators: AsyncGenerator[T, None], - is_cancelled: Optional[Callable[[], Awaitable[bool]]] = None, -) -> AsyncGenerator[Tuple[int, T], None]: + *iterators: AsyncGenerator[T, + None], ) -> AsyncGenerator[Tuple[int, T], None]: """Merge multiple asynchronous iterators into a single iterator. This method handle the case where some iterators finish before others. When it yields, it yields a tuple (i, item) where i is the index of the iterator that yields the item. - - It also optionally polls a provided function at least once per second - to check for client cancellation. """ loop = asyncio.get_running_loop() awaits = {_next_task(pair[1], loop): pair for pair in enumerate(iterators)} - timeout = None if is_cancelled is None else 1.5 - next_cancel_check: float = 0 try: while awaits: - done, pending = await asyncio.wait(awaits.keys(), - return_when=FIRST_COMPLETED, - timeout=timeout) - if is_cancelled is not None: - # Check for cancellation at most once per second - time_now = time.time() - if time_now >= next_cancel_check: - if await is_cancelled(): - raise asyncio.CancelledError("client cancelled") - next_cancel_check = time_now + 1 + done, _ = await asyncio.wait(awaits.keys(), + return_when=FIRST_COMPLETED) for d in done: pair = awaits.pop(d) try: @@ -727,25 +675,7 @@ def print_warning_once(msg: str) -> None: @lru_cache(maxsize=None) def is_pin_memory_available() -> bool: - - if in_wsl(): - # Pinning memory in WSL is not supported. - # https://docs.nvidia.com/cuda/wsl-user-guide/index.html#known-limitations-for-linux-cuda-applications - print_warning_once("Using 'pin_memory=False' as WSL is detected. " - "This may slow down the performance.") - return False - elif current_platform.is_xpu(): - print_warning_once("Pin memory is not supported on XPU.") - return False - elif current_platform.is_neuron(): - print_warning_once("Pin memory is not supported on Neuron.") - return False - elif current_platform.is_hpu(): - print_warning_once("Pin memory is not supported on HPU.") - return False - elif current_platform.is_cpu() or current_platform.is_openvino(): - return False - return True + return current_platform.is_pin_memory_available() class DeviceMemoryProfiler: @@ -993,7 +923,7 @@ def find_nccl_library() -> str: return so_file -def enable_trace_function_call_for_thread() -> None: +def enable_trace_function_call_for_thread(vllm_config: "VllmConfig") -> None: """Set up function tracing for the current thread, if enabled via the VLLM_TRACE_FUNCTION environment variable """ @@ -1005,7 +935,8 @@ def enable_trace_function_call_for_thread() -> None: filename = (f"VLLM_TRACE_FUNCTION_for_process_{os.getpid()}" f"_thread_{threading.get_ident()}_" f"at_{datetime.datetime.now()}.log").replace(" ", "_") - log_path = os.path.join(tmp_dir, "vllm", get_vllm_instance_id(), + log_path = os.path.join(tmp_dir, "vllm", + f"vllm-instance-{vllm_config.instance_id}", filename) os.makedirs(os.path.dirname(log_path), exist_ok=True) enable_trace_function_call(log_path) @@ -1392,8 +1323,8 @@ def supports_kw( def resolve_mm_processor_kwargs( - init_kwargs: Optional[Dict[str, Any]], - inference_kwargs: Optional[Dict[str, Any]], + init_kwargs: Optional[Mapping[str, object]], + inference_kwargs: Optional[Mapping[str, object]], callable: Callable[..., object], allow_var_kwargs: bool = False, ) -> Dict[str, Any]: @@ -1427,7 +1358,7 @@ def resolve_mm_processor_kwargs( def get_allowed_kwarg_only_overrides( callable: Callable[..., object], - overrides: Optional[Dict[str, Any]], + overrides: Optional[Mapping[str, object]], allow_var_kwargs: bool = False, ) -> Dict[str, Any]: """ @@ -1513,13 +1444,13 @@ def value(self): # Adapted from: https://stackoverflow.com/a/47212782/5082708 -class LazyDict(Mapping, Generic[T]): +class LazyDict(Mapping[str, T], Generic[T]): def __init__(self, factory: Dict[str, Callable[[], T]]): self._factory = factory self._dict: Dict[str, T] = {} - def __getitem__(self, key) -> T: + def __getitem__(self, key: str) -> T: if key not in self._dict: if key not in self._factory: raise KeyError(key) @@ -1536,6 +1467,28 @@ def __len__(self): return len(self._factory) +class ClassRegistry(UserDict[Type[T], _V]): + + def __getitem__(self, key: Type[T]) -> _V: + for cls in key.mro(): + if cls in self.data: + return self.data[cls] + + raise KeyError(key) + + def __contains__(self, key: object) -> bool: + return self.contains(key) + + def contains(self, key: object, *, strict: bool = False) -> bool: + if not isinstance(key, type): + return False + + if strict: + return key in self.data + + return any(cls in self.data for cls in key.mro()) + + def weak_ref_tensor(tensor: torch.Tensor) -> torch.Tensor: """ Create a weak reference to a tensor. @@ -1615,7 +1568,7 @@ def direct_register_custom_op( library object. If you want to bind the operator to a different library, make sure the library object is alive when the operator is used. """ - if is_in_doc_build(): + if is_in_doc_build() or not supports_custom_op(): return import torch.library if hasattr(torch.library, "infer_schema"): @@ -1639,3 +1592,147 @@ def resolve_obj_by_qualname(qualname: str) -> Any: module_name, obj_name = qualname.rsplit(".", 1) module = importlib.import_module(module_name) return getattr(module, obj_name) + + +def kill_process_tree(pid: int): + """ + Kills all descendant processes of the given pid by sending SIGKILL. + + Args: + pid (int): Process ID of the parent process + """ + try: + parent = psutil.Process(pid) + except psutil.NoSuchProcess: + return + + # Get all children recursively + children = parent.children(recursive=True) + + # Send SIGKILL to all children first + for child in children: + with contextlib.suppress(ProcessLookupError): + os.kill(child.pid, signal.SIGKILL) + + # Finally kill the parent + with contextlib.suppress(ProcessLookupError): + os.kill(pid, signal.SIGKILL) + + +@dataclass +class MemorySnapshot: + """Memory snapshot.""" + torch_peak_in_bytes: int = 0 + torch_memory_in_bytes: int = 0 + timestamp: float = 0.0 + + def measure(self): + self.torch_peak_in_bytes = torch.cuda.memory_stats( + )["allocated_bytes.all.peak"] + self.torch_memory_in_bytes = torch.cuda.memory_stats( + )["allocated_bytes.all.current"] + self.timestamp = time.time() + + def __sub__(self, other: "MemorySnapshot") -> "MemorySnapshot": + """support a - b""" + return MemorySnapshot( + torch_peak_in_bytes=self.torch_peak_in_bytes - + other.torch_peak_in_bytes, + torch_memory_in_bytes=self.torch_memory_in_bytes - + other.torch_memory_in_bytes, + timestamp=self.timestamp - other.timestamp) + + +@dataclass +class MemoryProfilingResult: + """Memory profiling result. + """ # noqa + baseline_memory_in_bytes: int = 0 + non_kv_cache_memory_in_bytes: int = 0 + torch_peak_increase_in_bytes: int = 0 + non_torch_increase_in_bytes: int = 0 + weights_memory_in_bytes: float = 0 + before_profile: MemorySnapshot = field(default_factory=MemorySnapshot) + after_profile: MemorySnapshot = field(default_factory=MemorySnapshot) + profile_time: float = 0.0 + + +@contextlib.contextmanager +def memory_profiling( + baseline_memory_in_bytes: int, weights_memory_in_bytes: int +) -> Generator[MemoryProfilingResult, None, None]: + """Memory profiling context manager. + baseline_memory_in_bytes: memory used by all the components other than + the current vLLM instance. It contains: memory used by other processes, memory + used by another vLLM instance in the same process, etc. It is usually measured + before the current vLLM instance initialize the device. And we assume it is + constant during the profiling of the current vLLM instance. + weights_memory_in_bytes: memory used by PyTorch when loading the model weights. + Note that, before loading the model weights, we also initialize the device + and distributed environment, which may consume some memory. This part is not + included in the weights_memory_in_bytes because PyTorch does not control it. + + The memory in one GPU can be classified into 3 categories: + 1. memory used by anything other than the current vLLM instance. + 2. memory used by torch in the current vLLM instance. + 3. memory used in the current vLLM instance, but not by torch. + + A quantitive example: + + Before creating the current vLLM instance: + category 1: 1 GiB + category 2: 0 GiB + category 3: 0 GiB + + After creating the current vLLM instance and loading the model, + (i.e. before profiling): + category 1: 1 GiB + category 2: 2 GiB (model weights take 2 GiB) + category 3: 0.5 GiB (memory used by NCCL) + + During profiling (peak): + category 1: 1 GiB + category 2: 4 GiB (peak activation tensors take 2 GiB) + category 3: 1 GiB (memory used by NCCL + buffers for some attention backends) + + After profiling: + category 1: 1 GiB + category 2: 3 GiB (after garbage-collecting activation tensors) + category 3: 1 GiB (memory used by NCCL + buffers for some attention backends) + + In this case, non-kv cache takes 5 GiB in total, including: + a. 2 GiB used by the model weights (category 2) + b. 2 GiB reserved for the peak activation tensors (category 2) + c. 1 GiB used by non-torch components (category 3) + + The memory used for loading weights (a.) is directly given from the argument `weights_memory_in_bytes`. + + The increase of ``torch.cuda.memory_stats()["allocated_bytes.all.peak"]` after profiling gives (b.). + + (c.) is tricky. We measure the total memory used in this GPU (`torch.cuda.mem_get_info()[1] - torch.cuda.mem_get_info()[0]`), + subtract the baseline memory, the memory used by the model weights, and diff of `torch.cuda.memory_stats()["allocated_bytes.all.current"]`. + """ # noqa + torch.cuda.reset_peak_memory_stats() + + result = MemoryProfilingResult() + + result.baseline_memory_in_bytes = baseline_memory_in_bytes + # the part of memory used for holding the model weights + result.weights_memory_in_bytes = weights_memory_in_bytes + + result.before_profile.measure() + + yield result + + gc.collect() + torch.cuda.empty_cache() + + result.after_profile.measure() + + diff = result.after_profile - result.before_profile + result.torch_peak_increase_in_bytes = diff.torch_peak_in_bytes + current_cuda_memory_bytes = torch.cuda.mem_get_info( + )[1] - torch.cuda.mem_get_info()[0] + result.non_torch_increase_in_bytes = current_cuda_memory_bytes - baseline_memory_in_bytes - weights_memory_in_bytes - diff.torch_memory_in_bytes # noqa + result.profile_time = diff.timestamp + result.non_kv_cache_memory_in_bytes = result.non_torch_increase_in_bytes + result.torch_peak_increase_in_bytes + result.weights_memory_in_bytes # noqa diff --git a/vllm/v1/attention/backends/flash_attn.py b/vllm/v1/attention/backends/flash_attn.py index 5f8535eaa303f..026a0292cc339 100644 --- a/vllm/v1/attention/backends/flash_attn.py +++ b/vllm/v1/attention/backends/flash_attn.py @@ -6,8 +6,6 @@ from vllm.attention.backends.abstract import (AttentionBackend, AttentionImpl, AttentionMetadata, AttentionType) -from vllm.forward_context import get_forward_context -from vllm.utils import direct_register_custom_op from vllm.vllm_flash_attn import flash_attn_varlen_func @@ -58,6 +56,7 @@ class FlashAttentionMetadata: seq_start_loc: torch.Tensor block_table: torch.Tensor slot_mapping: torch.Tensor + num_input_tokens: int = 0 # Number of tokens including padding. class FlashAttentionImpl(AttentionImpl): @@ -113,13 +112,14 @@ def forward( k_scale: float = 1.0, v_scale: float = 1.0, attn_type: AttentionType = AttentionType.DECODER, + output: Optional[torch.Tensor] = None, ) -> torch.Tensor: """Forward pass with FlashAttention. Args: - query: shape = [num_tokens, num_heads * head_size] - key: shape = [num_tokens, num_kv_heads * head_size] - value: shape = [num_tokens, num_kv_heads * head_size] + query: shape = [num_tokens, num_heads, head_size] + key: shape = [num_tokens, num_kv_heads, head_size] + value: shape = [num_tokens, num_kv_heads, head_size] kv_cache = [2, num_blocks, block_size, num_kv_heads, head_size] attn_metadata: Metadata for attention. Returns: @@ -135,117 +135,55 @@ def forward( assert k_scale == 1.0 and v_scale == 1.0, ( "key/v_scale is not supported in FlashAttention.") - output = torch.empty_like(query) - torch.ops.vllm.unified_v1_flash_attention( - output, - query, + assert output is not None, "Output tensor must be provided." + + if attn_metadata is None: + # Profiling run. + return output + + # IMPORTANT! + # NOTE(woosuk): With piece-wise CUDA graphs, this method is executed in + # eager-mode PyTorch. Thus, we need to be careful about any CPU overhead + # in this method. For example, `view` and `slice` (or `[:n]`) operations + # are surprisingly slow even in the case they do not invoke any GPU ops. + # Minimize the PyTorch ops in this method as much as possible. + # Whenever making a change in this method, please benchmark the + # performance to make sure it does not introduce any overhead. + + num_actual_tokens = attn_metadata.num_actual_tokens + # Reshape the input keys and values and store them in the cache. + # NOTE(woosuk): Here, key and value are padded while slot_mapping is + # not padded. However, we don't need to do key[:num_actual_tokens] and + # value[:num_actual_tokens] because the reshape_and_cache_flash op uses + # the slot_mapping's shape to determine the number of actual tokens. + key_cache, value_cache = kv_cache.unbind(0) + torch.ops._C_cache_ops.reshape_and_cache_flash( key, value, - self.num_heads, - self.head_size, - self.num_kv_heads, - kv_cache, + key_cache, + value_cache, + attn_metadata.slot_mapping, self.kv_cache_dtype, k_scale, v_scale, - self.scale, - self.sliding_window, - self.alibi_slopes, - self.logits_soft_cap, ) - return output + # Compute attention and update output up to `num_actual_tokens`. + flash_attn_varlen_func( + q=query[:num_actual_tokens], + k=key_cache, + v=value_cache, + out=output[:num_actual_tokens], + cu_seqlens_q=attn_metadata.query_start_loc, + max_seqlen_q=attn_metadata.max_query_len, + cu_seqlens_k=attn_metadata.seq_start_loc, + max_seqlen_k=attn_metadata.max_seq_len, + softmax_scale=self.scale, + causal=True, + alibi_slopes=self.alibi_slopes, + window_size=self.sliding_window, + block_table=attn_metadata.block_table, + softcap=self.logits_soft_cap, + ) -def unified_v1_flash_attention( - output: torch.Tensor, - query: torch.Tensor, - key: torch.Tensor, - value: torch.Tensor, - num_heads: int, - head_size: int, - num_kv_heads: int, - kv_cache: torch.Tensor, - kv_cache_dtype: str, - k_scale: float, - v_scale: float, - softmax_scale: float, - window_size: Optional[List[int]] = None, - alibi_slopes: Optional[torch.Tensor] = None, - logits_soft_cap: Optional[float] = None, -) -> None: - context = get_forward_context() - current_metadata = context.dynamic_forward_context - if current_metadata is None: - # Profiling run. - return - - assert current_metadata is not None - assert isinstance(current_metadata, FlashAttentionMetadata) - attn_metadata: FlashAttentionMetadata = current_metadata - num_actual_tokens = attn_metadata.num_actual_tokens - - # Reshape the query, key, and value tensors. - query = query.view(-1, num_heads, head_size) - key = key.view(-1, num_kv_heads, head_size) - value = value.view(-1, num_kv_heads, head_size) - - # Reshape the input keys and values and store them in the cache. - key_cache = kv_cache[0] - value_cache = kv_cache[1] - torch.ops._C_cache_ops.reshape_and_cache_flash( - key[:num_actual_tokens], - value[:num_actual_tokens], - key_cache, - value_cache, - attn_metadata.slot_mapping, - kv_cache_dtype, - k_scale, - v_scale, - ) - - attn_output = flash_attn_varlen_func( - q=query[:num_actual_tokens], - k=key_cache, - v=value_cache, - cu_seqlens_q=attn_metadata.query_start_loc, - max_seqlen_q=attn_metadata.max_query_len, - cu_seqlens_k=attn_metadata.seq_start_loc, - max_seqlen_k=attn_metadata.max_seq_len, - softmax_scale=softmax_scale, - causal=True, - alibi_slopes=alibi_slopes, - window_size=window_size, - block_table=attn_metadata.block_table, - softcap=logits_soft_cap, - ) - attn_output = attn_output.view(num_actual_tokens, -1) - # TODO(woosuk): Optimize this. - output[:num_actual_tokens].copy_(attn_output) - - -def unified_v1_flash_attention_fake( - output: torch.Tensor, - query: torch.Tensor, - key: torch.Tensor, - value: torch.Tensor, - num_heads: int, - head_size: int, - num_kv_heads: int, - kv_cache: torch.Tensor, - kv_cache_dtype: str, - k_scale: float, - v_scale: float, - softmax_scale: float, - window_size: Optional[List[int]] = None, - alibi_slopes: Optional[torch.Tensor] = None, - logits_soft_cap: Optional[float] = None, -) -> None: - return - - -direct_register_custom_op( - op_name="unified_v1_flash_attention", - op_func=unified_v1_flash_attention, - mutates_args=["kv_cache", "output"], - fake_impl=unified_v1_flash_attention_fake, -) + return output diff --git a/vllm/v1/core/kv_cache_manager.py b/vllm/v1/core/kv_cache_manager.py index 8eb3fb976eb87..61a3f5fd6d841 100644 --- a/vllm/v1/core/kv_cache_manager.py +++ b/vllm/v1/core/kv_cache_manager.py @@ -1,10 +1,12 @@ from collections import defaultdict -from typing import Dict, List, Optional +from typing import Dict, Iterable, List, Optional from vllm.logger import init_logger from vllm.utils import cdiv from vllm.v1.core.kv_cache_utils import (BlockHashType, FreeKVCacheBlockQueue, - KVCacheBlock, hash_block_tokens, + KVCacheBlock, + generate_block_hash_extra_keys, + hash_block_tokens, hash_request_tokens) from vllm.v1.request import Request @@ -17,12 +19,15 @@ def __init__( self, block_size: int, num_gpu_blocks: int, + max_model_len: int, sliding_window: Optional[int] = None, enable_caching: bool = True, num_preallocate_tokens: int = 64, ) -> None: self.block_size = block_size self.num_gpu_blocks = num_gpu_blocks + self.max_model_len = max_model_len + self.max_num_blocks_per_req = cdiv(max_model_len, block_size) self.sliding_window = sliding_window self.enable_caching = enable_caching # NOTE(woosuk): To avoid frequent block allocation, we preallocate some @@ -80,10 +85,12 @@ def get_computed_blocks(self, request: Request) -> List[KVCacheBlock]: computed_blocks = [] - # TODO(rickyx): potentially we could cache this so we don't have to - # recompute it every time. - block_hashes = hash_request_tokens(self.block_size, - request.all_token_ids) + # The block hashes for the request may already be computed + # if the request was preempted and resumed. + if not request.kv_block_hashes: + request.set_kv_block_hashes( + hash_request_tokens(self.block_size, request)) + block_hashes = request.kv_block_hashes for block_hash in block_hashes: # block_hashes is a chain of block hashes. If a block hash is not @@ -132,7 +139,14 @@ def append_slots( num_new_blocks = min( num_new_blocks + self.num_preallocate_blocks, self.free_block_queue.num_free_blocks, + # Should not exceed the maximum number of blocks per request. + # This is especially because the block table has the shape + # [..., max_num_blocks_per_req]. + # TODO(woosuk): Check and reject requests if + # num_prompt_tokens + max_tokens > max_model_len. + self.max_num_blocks_per_req - len(req_blocks), ) + assert num_new_blocks > 0 new_blocks = self._get_new_blocks(num_new_blocks) req_blocks.extend(new_blocks) @@ -154,13 +168,14 @@ def append_slots( new_full_blocks = req_blocks[ num_computed_full_blocks:num_full_blocks_after_append] - self._cache_full_blocks( - request=request, - blk_start_idx=num_computed_full_blocks, - full_blocks=new_full_blocks, - prev_block=req_blocks[num_computed_full_blocks - 1] - if num_computed_full_blocks >= 1 else None, - ) + if new_full_blocks: + self._cache_full_blocks( + request=request, + blk_start_idx=num_computed_full_blocks, + full_blocks=new_full_blocks, + prev_block=req_blocks[num_computed_full_blocks - 1] + if num_computed_full_blocks >= 1 else None, + ) return new_blocks @@ -212,7 +227,14 @@ def allocate_slots( num_required_blocks + self.num_preallocate_blocks, self.free_block_queue.num_free_blocks - num_evictable_computed_blocks, + # Should not exceed the maximum number of blocks per request. + # This is especially because the block table has the shape + # [..., max_num_blocks_per_req]. + # TODO(woosuk): Check and reject requests if + # num_prompt_tokens + max_tokens > max_model_len. + self.max_num_blocks_per_req - len(computed_blocks), ) + assert num_new_blocks > 0 # Concatenate the computed block IDs and the new block IDs. new_blocks = self._get_new_blocks(num_new_blocks) @@ -224,14 +246,16 @@ def allocate_slots( num_computed_tokens = len(computed_blocks) * self.block_size num_full_blocks = (num_computed_tokens + num_tokens) // self.block_size - self._cache_full_blocks( - request=request, - blk_start_idx=len(computed_blocks), - # The new full blocks are the full blocks that are not computed. - full_blocks=self.req_to_blocks[request.request_id] - [len(computed_blocks):num_full_blocks], - prev_block=computed_blocks[-1] if computed_blocks else None, - ) + new_full_blocks = self.req_to_blocks[ + request.request_id][len(computed_blocks):num_full_blocks] + if new_full_blocks: + self._cache_full_blocks( + request=request, + blk_start_idx=len(computed_blocks), + # The new full blocks are the full blocks that are not computed. + full_blocks=new_full_blocks, + prev_block=computed_blocks[-1] if computed_blocks else None, + ) return new_blocks @@ -245,12 +269,13 @@ def free(self, request: Request) -> None: """ # Default to [] in case a request is freed (aborted) before alloc. blocks = self.req_to_blocks.pop(request.request_id, []) + ordered_blocks: Iterable[KVCacheBlock] = blocks if self.enable_caching: # Free blocks in reverse order so that the tail blocks are # freed first. - blocks = reversed(blocks) + ordered_blocks = reversed(blocks) - for block in blocks: + for block in ordered_blocks: block.decr_ref() if block.ref_cnt == 0: self.free_block_queue.append(block) @@ -357,26 +382,50 @@ def _cache_full_blocks( full_blocks: The list of blocks to update hash metadata. prev_block: The previous block in the chain. """ + num_cached_block_hashes = len(request.kv_block_hashes) + # Update the new blocks with the block hashes through the chain. - prev_block_hash = (prev_block.block_hash - if prev_block is not None else None) + prev_block_hash_value = None + if prev_block is not None: + # Previous block must have a block hash because it must be + # a full, cached block. + assert prev_block.block_hash is not None + prev_block_hash_value = prev_block.block_hash.hash_value + for i, blk in enumerate(full_blocks): blk_idx = blk_start_idx + i - block_tokens = request.all_token_ids[blk_idx * - self.block_size:(blk_idx + - 1) * - self.block_size] - assert len(block_tokens) == self.block_size, ( - f"Expected {self.block_size} tokens, got {len(block_tokens)} " - f"at {blk_idx}th block for request " - f"{request.request_id}({request})") - - # Compute the hash of the current block. - block_hash = hash_block_tokens(prev_block_hash, - tuple(block_tokens)) + if blk_idx < num_cached_block_hashes: + # The block hash may already be computed in + # "get_computed_blocks" if the tokens are not generated by + # this request (either the prompt tokens or the previously + # generated tokens with preemption). In this case we simply + # reuse the block hash. + block_hash = request.kv_block_hashes[blk_idx] + else: + # Otherwise compute the block hash and cache it in the request + # in case it will be preempted in the future. + start_token_idx = blk_idx * self.block_size + end_token_idx = (blk_idx + 1) * self.block_size + block_tokens = request.all_token_ids[ + start_token_idx:end_token_idx] + assert len(block_tokens) == self.block_size, ( + f"Expected {self.block_size} tokens, got " + f"{len(block_tokens)} at {blk_idx}th block for request " + f"{request.request_id}({request})") + + # Generate extra keys for multi-modal inputs. Note that since + # we reach to this branch only when the block is completed with + # generated tokens, we only need to consider the last mm input. + extra_keys, _ = generate_block_hash_extra_keys( + request, start_token_idx, end_token_idx, -1) + + # Compute the hash of the current block. + block_hash = hash_block_tokens(prev_block_hash_value, + block_tokens, extra_keys) + request.append_kv_block_hashes(block_hash) # Update and added the full block to the cache. blk.block_hash = block_hash self.cached_block_hash_to_block[block_hash][blk.block_id] = blk - prev_block_hash = block_hash + prev_block_hash_value = block_hash.hash_value diff --git a/vllm/v1/core/kv_cache_utils.py b/vllm/v1/core/kv_cache_utils.py index fb666c364bfb2..d80ea128c7749 100644 --- a/vllm/v1/core/kv_cache_utils.py +++ b/vllm/v1/core/kv_cache_utils.py @@ -1,12 +1,25 @@ """KV-Cache Utilities.""" +from collections.abc import Sequence from dataclasses import dataclass -from typing import List, Optional, Tuple +from typing import Any, List, NamedTuple, Optional, Tuple from vllm.logger import init_logger +from vllm.v1.request import Request logger = init_logger(__name__) -BlockHashType = Tuple[int, Tuple[int]] + +class BlockHashType(NamedTuple): + """Hash value of a block (int), the token IDs in the block, and extra keys. + The reason we keep a tuple of token IDs and extra keys is to make sure + no hash collision happens when the hash value is the same. + """ + # Hash value of the block in an integer. + hash_value: int + # Token IDs in the block. + token_ids: Tuple[int, ...] + # Extra keys for the block. + extra_keys: Optional[Any] = None @dataclass @@ -72,8 +85,8 @@ def __init__(self, blocks: List[KVCacheBlock]) -> None: self.num_free_blocks = len(blocks) # Initialize the doubly linked list of free blocks. - self.free_list_head = blocks[0] - self.free_list_tail = blocks[-1] + self.free_list_head: Optional[KVCacheBlock] = blocks[0] + self.free_list_tail: Optional[KVCacheBlock] = blocks[-1] for i in range(self.num_free_blocks): if i > 0: blocks[i].prev_free_block = blocks[i - 1] @@ -151,8 +164,80 @@ def get_all_free_blocks(self) -> List[KVCacheBlock]: return ret -def hash_block_tokens(parent_block_hash: Optional[int], - curr_block_token_ids: Tuple[int]) -> BlockHashType: +def generate_block_hash_extra_keys( + request: Request, start_token_idx: int, end_token_idx: int, + start_mm_idx: int) -> Tuple[Optional[Tuple[Any, ...]], int]: + """Generate extra keys for the block hash. The extra keys can come from + the multi-modal inputs and request specific metadata (e.g., LoRA ID). + For multi-modal inputs, the extra keys are (mm_hash, start_offset) that + indicate a mm input contained in the block and its starting offset in + the block tokens. + + Args: + request: The request object. + start_token_idx: The start token index of the block. + end_token_idx: The end token index of the block. + start_mm_idx: The start multi-modal index of the block. + + Returns: + A tuple of extra keys and the next multi-modal index. + """ + + mm_positions, mm_hashes = request.mm_positions, request.mm_hashes + if not mm_positions: + return None, start_mm_idx + + if mm_positions and len(mm_positions) != len(mm_hashes): + raise ValueError( + "The number of multi-modal positions and hashes must match. This " + "is likely because you do not enable MM preprocessor hashing. " + "Please set mm_cache_preprocessor=True.") + + # Note that we assume mm_positions is sorted by offset. + # We do not need to check all mm inputs if the start token index is out of + # range. This usually happens in the late prefill phase and decoding phase. + if mm_positions[-1]["offset"] + mm_positions[-1][ + "length"] < start_token_idx: + return None, start_mm_idx + + # Support start_mm_idx == -1 to indicate the last mm input. + if start_mm_idx < 0: + assert -start_mm_idx <= len(mm_positions) + start_mm_idx = len(mm_positions) + start_mm_idx + + extra_keys = [] + curr_mm_idx = start_mm_idx + while mm_positions and curr_mm_idx < len(mm_positions): + assert mm_hashes[curr_mm_idx] is not None + offset = mm_positions[curr_mm_idx]["offset"] + length = mm_positions[curr_mm_idx]["length"] + if end_token_idx > offset: + if start_token_idx > offset + length: + # This block has passed the current mm input. + curr_mm_idx += 1 + continue + + # The block contains the current mm input. + mm_start = max(0, start_token_idx - offset) + extra_keys.append((mm_hashes[curr_mm_idx], mm_start)) + if end_token_idx >= offset + length: + # If this block contains the end of the current mm input, + # move to the next mm input as this block may also contain + # the next mm input. + curr_mm_idx += 1 + else: + # Otherwise this block is done with mm inputs. + break + else: + # This block has not reached the current mm input. + break + return tuple(extra_keys), curr_mm_idx + + +def hash_block_tokens( + parent_block_hash: Optional[int], + curr_block_token_ids: Sequence[int], + extra_keys: Optional[Tuple[Any, ...]] = None) -> BlockHashType: """Computes a hash value corresponding to the contents of a block and the contents of the preceding block(s). The hash value is used for prefix caching. We use LRU cache for this function to avoid recomputing @@ -164,38 +249,57 @@ def hash_block_tokens(parent_block_hash: Optional[int], Args: parent_block_hash: The hash of the parent block. None if this is the first block. - curr_block_token_ids: A tuple of token ids in the current + curr_block_token_ids: A list of token ids in the current block. The current block is assumed to be full. + extra_keys: Extra keys for the block. Returns: The hash value of the block and the token ids in the block. The entire tuple is used as the hash key of the block. """ - return (hash( - (parent_block_hash, *curr_block_token_ids)), curr_block_token_ids) + return BlockHashType(hash((parent_block_hash, *curr_block_token_ids)), + tuple(curr_block_token_ids), extra_keys) def hash_request_tokens(block_size: int, - token_ids: List[int]) -> List[BlockHashType]: + request: Request) -> List[BlockHashType]: """Computes hash values of a chain of blocks given a sequence of token IDs. The hash value is used for prefix caching. Args: block_size: The size of each block. - token_ids: A sequence of token ids in the request. + request: The request object. Returns: The list of computed hash values. """ + token_ids = request.all_token_ids + mm_positions, mm_hashes = request.mm_positions, request.mm_hashes + if mm_positions and len(mm_positions) != len(mm_hashes): + raise ValueError( + "The number of multi-modal positions and hashes must match.") + + # TODO: Extend this to support other features such as LoRA. + need_extra_keys = bool(mm_positions) + extra_keys = None + curr_mm_idx = 0 + ret = [] - parent_block_hash = None + parent_block_hash_value = None for start in range(0, len(token_ids), block_size): end = start + block_size - block_token_ids = tuple(token_ids[start:end]) + block_token_ids = token_ids[start:end] # Do not hash the block if it is not full. if len(block_token_ids) < block_size: break - block_hash = hash_block_tokens(parent_block_hash, block_token_ids) + + # Add extra keys if the block is a multi-modal block. + if need_extra_keys: + extra_keys, curr_mm_idx = generate_block_hash_extra_keys( + request, start, end, curr_mm_idx) + + block_hash = hash_block_tokens(parent_block_hash_value, + block_token_ids, extra_keys) ret.append(block_hash) - parent_block_hash = block_hash + parent_block_hash_value = block_hash.hash_value return ret diff --git a/vllm/v1/core/scheduler.py b/vllm/v1/core/scheduler.py index ba50a9786d805..08e7c0fd4dc9b 100644 --- a/vllm/v1/core/scheduler.py +++ b/vllm/v1/core/scheduler.py @@ -5,6 +5,8 @@ from vllm.config import CacheConfig, LoRAConfig, SchedulerConfig from vllm.logger import init_logger +from vllm.multimodal import MultiModalKwargs +from vllm.multimodal.base import PlaceholderRange from vllm.sampling_params import SamplingParams from vllm.v1.core.encoder_cache_manager import EncoderCacheManager from vllm.v1.core.kv_cache_manager import KVCacheManager @@ -33,22 +35,23 @@ def __init__( # TODO: Support LoRA. assert lora_config is None, "V1 does not support LoRA yet." + # Scheduling constraints. + self.max_num_running_reqs = self.scheduler_config.max_num_seqs + self.max_num_scheduled_tokens = \ + self.scheduler_config.max_num_batched_tokens + self.max_model_len = self.scheduler_config.max_model_len + num_gpu_blocks = cache_config.num_gpu_blocks assert isinstance(num_gpu_blocks, int) and num_gpu_blocks > 0 - # Create the block space manager. + # Create the KV cache manager. self.kv_cache_manager = KVCacheManager( block_size=self.cache_config.block_size, num_gpu_blocks=num_gpu_blocks, + max_model_len=self.max_model_len, sliding_window=self.cache_config.sliding_window, enable_caching=self.cache_config.enable_prefix_caching) self.block_size = self.cache_config.block_size - # Scheduling constraints. - self.max_num_running_reqs = self.scheduler_config.max_num_seqs - self.max_num_scheduled_tokens = \ - self.scheduler_config.max_num_batched_tokens - self.max_model_len = self.scheduler_config.max_model_len - # req_id -> Request self.requests: Dict[str, Request] = {} # Priority queues for requests. @@ -70,14 +73,13 @@ def __init__( # NOTE(woosuk): Here, "encoder" includes the vision encoder (and # projector if needed). Currently, we assume that the encoder also # has the Transformer architecture (e.g., ViT). - # FIXME(woosuk): Below are placeholder values. We need to calculate the - # actual values from the configurations. - self.max_num_encoder_input_tokens = 2048 + self.max_num_encoder_input_tokens = self.scheduler_config.max_num_encoder_input_tokens #noqa: E501 # NOTE(woosuk): For the models without encoder (e.g., text-only models), # the encoder cache will not be initialized and used, regardless of # the cache size. This is because the memory space for the encoder cache # is preallocated in the profiling run. - self.encoder_cache_manager = EncoderCacheManager(cache_size=2048) + self.encoder_cache_manager = EncoderCacheManager( + cache_size=self.scheduler_config.encoder_cache_size) def schedule(self) -> "SchedulerOutput": # NOTE(woosuk) on the scheduling algorithm: @@ -149,6 +151,7 @@ def schedule(self) -> "SchedulerOutput": break if not can_schedule: break + assert new_blocks is not None # Schedule the request. scheduled_running_reqs.append(request) @@ -196,9 +199,13 @@ def schedule(self) -> "SchedulerOutput": if num_new_tokens == 0: # The happens when prompt length is divisible by the block # size and all blocks are cached. Now we force to recompute - # the last token. - num_computed_tokens -= 1 - num_new_tokens = 1 + # the last block. Note that we have to re-compute an entire + # block because allocate_slots() assumes num_computed_tokens + # is always a multiple of the block size. This limitation + # can potentially be removed in the future to slightly + # improve the performance. + num_computed_tokens -= self.block_size + num_new_tokens = self.block_size computed_blocks.pop() num_new_tokens = min(num_new_tokens, token_budget) assert num_new_tokens > 0 @@ -382,7 +389,7 @@ def update_from_output( model_runner_output: "ModelRunnerOutput", ) -> List[EngineCoreOutput]: # NOTE(woosuk): This method doesn't consider speculative decoding. - sampled_token_ids = model_runner_output.sampled_token_ids_cpu.tolist() + sampled_token_ids = model_runner_output.sampled_token_ids num_scheduled_tokens = scheduler_output.num_scheduled_tokens new_running: List[Request] = [] engine_core_outputs: List[EngineCoreOutput] = [] @@ -509,6 +516,7 @@ class NewRequestData: prompt_token_ids: List[int] prompt: Optional[str] mm_inputs: List["MultiModalKwargs"] + mm_hashes: List[str] mm_positions: List["PlaceholderRange"] sampling_params: SamplingParams block_ids: List[int] @@ -526,6 +534,7 @@ def from_request( prompt_token_ids=request.prompt_token_ids, prompt=request.prompt, mm_inputs=request.mm_inputs, + mm_hashes=request.mm_hashes, mm_positions=request.mm_positions, sampling_params=request.sampling_params, block_ids=block_ids, diff --git a/vllm/v1/engine/__init__.py b/vllm/v1/engine/__init__.py index 967124fd850ea..cc0c7ea23469a 100644 --- a/vllm/v1/engine/__init__.py +++ b/vllm/v1/engine/__init__.py @@ -1,11 +1,11 @@ import enum from dataclasses import dataclass -from typing import Any, Dict, List, Optional, Union +from typing import List, Optional, Union import msgspec from vllm.lora.request import LoRARequest -from vllm.multimodal import MultiModalDataDict, MultiModalPlaceholderDict +from vllm.multimodal import MultiModalKwargs, MultiModalPlaceholderDict from vllm.sampling_params import RequestOutputKind, SamplingParams @@ -35,19 +35,20 @@ class EngineCoreRequest: # always be tokenized? prompt: Optional[str] prompt_token_ids: List[int] - mm_data: Optional[MultiModalDataDict] + mm_inputs: Optional[List[Optional[MultiModalKwargs]]] + mm_hashes: Optional[List[str]] mm_placeholders: Optional[MultiModalPlaceholderDict] - mm_processor_kwargs: Optional[Dict[str, Any]] sampling_params: SamplingParams eos_token_id: Optional[int] arrival_time: float lora_request: Optional[LoRARequest] -class EngineCoreOutput(msgspec.Struct, - array_like=True, - omit_defaults=True, - gc=False): +class EngineCoreOutput( + msgspec.Struct, + array_like=True, # type: ignore[call-arg] + omit_defaults=True, # type: ignore[call-arg] + gc=False): # type: ignore[call-arg] request_id: str new_token_ids: List[int] @@ -56,10 +57,11 @@ class EngineCoreOutput(msgspec.Struct, stop_reason: Union[int, str, None] = None -class EngineCoreOutputs(msgspec.Struct, - array_like=True, - omit_defaults=True, - gc=False): +class EngineCoreOutputs( + msgspec.Struct, + array_like=True, # type: ignore[call-arg] + omit_defaults=True, # type: ignore[call-arg] + gc=False): # type: ignore[call-arg] #NOTE(Nick): We could consider ways to make this more compact, # e.g. columnwise layout and using an int enum for finish/stop reason @@ -81,3 +83,6 @@ class EngineCoreRequestType(enum.Enum): ADD = b'\x00' ABORT = b'\x01' PROFILE = b'\x02' + + +EngineCoreRequestUnion = Union[EngineCoreRequest, EngineCoreProfile, List[str]] diff --git a/vllm/v1/engine/async_llm.py b/vllm/v1/engine/async_llm.py index a17c8eac4b77c..41fb4b25d45bb 100644 --- a/vllm/v1/engine/async_llm.py +++ b/vllm/v1/engine/async_llm.py @@ -9,7 +9,7 @@ from vllm.inputs.preprocess import InputPreprocessor from vllm.logger import init_logger from vllm.lora.request import LoRARequest -from vllm.outputs import EmbeddingRequestOutput, RequestOutput +from vllm.outputs import PoolingRequestOutput, RequestOutput from vllm.pooling_params import PoolingParams from vllm.prompt_adapter.request import PromptAdapterRequest from vllm.sampling_params import SamplingParams @@ -20,7 +20,7 @@ from vllm.v1.engine.core_client import EngineCoreClient from vllm.v1.engine.detokenizer import Detokenizer from vllm.v1.engine.processor import Processor -from vllm.v1.executor.gpu_executor import GPUExecutor +from vllm.v1.executor.abstract import Executor logger = init_logger(__name__) @@ -30,7 +30,7 @@ class AsyncLLM(EngineClient): def __init__( self, vllm_config: VllmConfig, - executor_class: Type[GPUExecutor], + executor_class: Type[Executor], log_stats: bool, usage_context: UsageContext = UsageContext.ENGINE_CONTEXT, stat_loggers: Optional[Dict[str, StatLoggerBase]] = None, @@ -51,7 +51,7 @@ def __init__( model_config=vllm_config.model_config, scheduler_config=vllm_config.scheduler_config, parallel_config=vllm_config.parallel_config, - enable_lora=bool(vllm_config.lora_config)) + lora_config=vllm_config.lora_config) self.tokenizer.ping() # Request streams (map of request_id -> AsyncStream). @@ -60,12 +60,21 @@ def __init__( self.client_aborted_requests: List[str] = [] # Processor (converts Inputs --> EngineCoreRequests). - self.processor = Processor(vllm_config.model_config, - vllm_config.lora_config, self.tokenizer, - input_registry) + self.processor = Processor( + model_config=vllm_config.model_config, + cache_config=vllm_config.cache_config, + lora_config=vllm_config.lora_config, + tokenizer=self.tokenizer, + input_registry=input_registry, + ) # Detokenizer (converts EngineCoreOutputs --> RequestOutput). - self.detokenizer = Detokenizer(vllm_config.model_config.tokenizer) + self.detokenizer = Detokenizer( + tokenizer_name=vllm_config.model_config.tokenizer, + tokenizer_mode=vllm_config.model_config.tokenizer_mode, + trust_remote_code=vllm_config.model_config.trust_remote_code, + revision=vllm_config.model_config.tokenizer_revision, + ) # EngineCore (starts the engine in background process). self.engine_core = EngineCoreClient.make_client( @@ -76,7 +85,7 @@ def __init__( asyncio_mode=True, ) - self.output_handler = None + self.output_handler: Optional[asyncio.Task] = None def __del__(self): self.shutdown() @@ -114,14 +123,25 @@ def from_engine_args( def shutdown(self): """Shutdown, cleaning up the background proc and IPC.""" - self.engine_core.shutdown() + if engine_core := getattr(self, "engine_core", None): + engine_core.shutdown() if handler := getattr(self, "output_handler", None): handler.cancel() @classmethod - def _get_executor_cls(cls, vllm_config: VllmConfig): - return GPUExecutor + def _get_executor_cls(cls, vllm_config: VllmConfig) -> Type[Executor]: + executor_class: Type[Executor] + distributed_executor_backend = ( + vllm_config.parallel_config.distributed_executor_backend) + if distributed_executor_backend == "mp": + from vllm.v1.executor.multiproc_executor import MultiprocExecutor + executor_class = MultiprocExecutor + else: + assert (distributed_executor_backend is None) + from vllm.v1.executor.uniproc_executor import UniprocExecutor + executor_class = UniprocExecutor + return executor_class async def add_request( self, @@ -133,11 +153,11 @@ async def add_request( trace_headers: Optional[Mapping[str, str]] = None, prompt_adapter_request: Optional[PromptAdapterRequest] = None, priority: int = 0, - ) -> AsyncGenerator[Union[RequestOutput, EmbeddingRequestOutput], None]: + ) -> AsyncGenerator[Union[RequestOutput, PoolingRequestOutput], None]: """Add new request to the AsyncLLM.""" if self.detokenizer.is_request_active(request_id): - raise KeyError(f"Request {request_id} already exists.") + raise ValueError(f"Request {request_id} already exists.") # 1) Create a new AsyncStream for the request. stream = self._add_request_to_streams(request_id) @@ -346,10 +366,10 @@ async def check_health(self) -> None: logger.debug("Called check_health.") async def start_profile(self) -> None: - await self.engine_core.profile(True) + await self.engine_core.profile_async(True) async def stop_profile(self) -> None: - await self.engine_core.profile(False) + await self.engine_core.profile_async(False) @property def is_running(self) -> bool: @@ -365,7 +385,7 @@ def errored(self) -> bool: @property def dead_error(self) -> BaseException: - return Exception + return Exception() # TODO: implement # Retain V0 name for backwards compatibility. diff --git a/vllm/v1/engine/async_stream.py b/vllm/v1/engine/async_stream.py index 3e6c759ad5ebd..35449238c3259 100644 --- a/vllm/v1/engine/async_stream.py +++ b/vllm/v1/engine/async_stream.py @@ -1,11 +1,11 @@ import asyncio from typing import Any, AsyncGenerator, Callable, Optional, Type, Union -from vllm.outputs import EmbeddingRequestOutput, RequestOutput +from vllm.outputs import PoolingRequestOutput, RequestOutput class AsyncStream: - """A stream of RequestOutputs or EmbeddingRequestOutputs for a request + """A stream of RequestOutputs or PoolingRequestOutputs for a request that can be iterated over asynchronously via an async generator.""" STOP_ITERATION = Exception() # Sentinel @@ -16,7 +16,7 @@ def __init__(self, request_id: str, cancel: Callable[[str], None]) -> None: self._queue: asyncio.Queue = asyncio.Queue() self._finished = False - def put(self, item: Union[RequestOutput, EmbeddingRequestOutput, + def put(self, item: Union[RequestOutput, PoolingRequestOutput, Exception]) -> None: if not self._finished: self._queue.put_nowait(item) @@ -32,7 +32,7 @@ def finish( async def generator( self - ) -> AsyncGenerator[Union[RequestOutput, EmbeddingRequestOutput], None]: + ) -> AsyncGenerator[Union[RequestOutput, PoolingRequestOutput], None]: finished = False try: while True: diff --git a/vllm/v1/engine/core.py b/vllm/v1/engine/core.py index 34f99dd30ef2e..497d5db5b4c99 100644 --- a/vllm/v1/engine/core.py +++ b/vllm/v1/engine/core.py @@ -1,28 +1,31 @@ -import multiprocessing import pickle import queue +import signal import threading import time -from contextlib import contextmanager +from dataclasses import dataclass from multiprocessing.process import BaseProcess -from multiprocessing.sharedctypes import Synchronized -from typing import Any, Iterator, List, Tuple, Type, Union +from typing import List, Tuple, Type import zmq import zmq.asyncio from msgspec import msgpack from vllm.config import CacheConfig, VllmConfig +from vllm.executor.multiproc_worker_utils import get_mp_context from vllm.logger import init_logger +from vllm.transformers_utils.config import ( + maybe_register_config_serialize_by_value) from vllm.usage.usage_lib import UsageContext from vllm.v1.core.scheduler import Scheduler from vllm.v1.engine import (EngineCoreOutput, EngineCoreOutputs, EngineCoreProfile, EngineCoreRequest, - EngineCoreRequestType) -from vllm.v1.engine.mm_input_mapper import MMInputMapper -from vllm.v1.executor.gpu_executor import GPUExecutor + EngineCoreRequestType, EngineCoreRequestUnion) +from vllm.v1.engine.mm_input_mapper import MMInputMapperServer +from vllm.v1.executor.abstract import Executor from vllm.v1.request import Request, RequestStatus from vllm.v1.serial_utils import PickleEncoder +from vllm.v1.utils import make_zmq_socket from vllm.version import __version__ as VLLM_VERSION logger = init_logger(__name__) @@ -38,10 +41,10 @@ class EngineCore: def __init__( self, vllm_config: VllmConfig, - executor_class: Type[GPUExecutor], + executor_class: Type[Executor], usage_context: UsageContext, ): - assert vllm_config.model_config.task != "embedding" + assert vllm_config.model_config.runner_type != "pooling" logger.info("Initializing an LLM engine (v%s) with config: %s", VLLM_VERSION, vllm_config) @@ -55,9 +58,6 @@ def __init__( vllm_config.cache_config.num_gpu_blocks = num_gpu_blocks vllm_config.cache_config.num_cpu_blocks = num_cpu_blocks - # Set up multimodal input mapper (e.g., convert PIL images to tensors). - self.mm_input_mapper = MMInputMapper(vllm_config.model_config) - # Setup scheduler. self.scheduler = Scheduler(vllm_config.scheduler_config, vllm_config.cache_config, @@ -65,8 +65,12 @@ def __init__( self._last_logging_time = time.time() + self.mm_input_mapper_server = MMInputMapperServer( + vllm_config.model_config) + def _initialize_kv_caches(self, cache_config: CacheConfig) -> Tuple[int, int]: + start = time.time() num_gpu_blocks, _ = self.model_executor.determine_num_available_blocks( ) @@ -79,19 +83,27 @@ def _initialize_kv_caches(self, num_gpu_blocks = num_gpu_blocks_override num_cpu_blocks = 0 - self.model_executor.initialize_cache(num_gpu_blocks) + self.model_executor.initialize(num_gpu_blocks) + elapsed = time.time() - start + logger.info(("init engine (profile, create kv cache, " + "warmup model) took %.2f seconds"), elapsed) return num_gpu_blocks, num_cpu_blocks def add_request(self, request: EngineCoreRequest): """Add request to the scheduler.""" + if request.mm_hashes is not None: + # Here, if hash exists for an image, then it will be fetched + # from the cache, else it will be added to the cache. + # Note that the cache here is mirrored with the client side of the + # MM mapper, so anything that has a hash must have a HIT cache + # entry here as well. + assert request.mm_inputs is not None + request.mm_inputs = self.mm_input_mapper_server.process_inputs( + request.mm_inputs, request.mm_hashes) + req = Request.from_engine_core_request(request) - # FIXME(woosuk): The input mapping (e.g., PIL images to tensors) may - # take 10-50 ms, which can cause a spike in the latency. We should - # consider moving this to a separate thread. - if req.mm_data: - req.mm_inputs = self.mm_input_mapper.process_inputs( - req.mm_data, req.mm_processor_kwargs) + self.scheduler.add_request(req) def abort_requests(self, request_ids: List[str]): @@ -115,8 +127,19 @@ def step(self) -> List[EngineCoreOutput]: scheduler_output, output) return engine_core_outputs - def profile(self, is_start=True): - self.model_executor.worker.profile(is_start) + def shutdown(self): + self.model_executor.shutdown() + + def profile(self, is_start: bool = True): + self.model_executor.profile(is_start) + + +@dataclass +class EngineCoreProcHandle: + proc: BaseProcess + ready_path: str + input_path: str + output_path: str class EngineCoreProc(EngineCore): @@ -127,25 +150,21 @@ class EngineCoreProc(EngineCore): def __init__( self, vllm_config: VllmConfig, - executor_class: Type[GPUExecutor], + executor_class: Type[Executor], usage_context: UsageContext, input_path: str, output_path: str, ready_path: str, - should_shutdown: Synchronized, ): super().__init__(vllm_config, executor_class, usage_context) - # Signal from main process to shutdown (multiprocessing.Value). - self.should_shutdown = should_shutdown - # Background Threads and Queues for IO. These enable us to # overlap ZMQ socket IO with GPU since they release the GIL, # and to overlap some serialization/deserialization with the # model forward pass. # Threads handle Socket <-> Queues and core_busy_loop uses Queue. - self.input_queue = queue.Queue() - self.output_queue = queue.Queue() + self.input_queue: queue.Queue[EngineCoreRequestUnion] = queue.Queue() + self.output_queue: queue.Queue[List[EngineCoreOutput]] = queue.Queue() threading.Thread(target=self.process_input_socket, args=(input_path, ), daemon=True).start() @@ -154,32 +173,9 @@ def __init__( daemon=True).start() # Send Readiness signal to EngineClient. - with self.make_socket(ready_path, zmq.constants.PUSH) as ready_socket: + with make_zmq_socket(ready_path, zmq.constants.PUSH) as ready_socket: ready_socket.send_string(EngineCoreProc.READY_STR) - @contextmanager - def make_socket(self, path: str, type: Any) -> Iterator[zmq.Socket]: - """Context manager for use """ - - ctx = zmq.Context() - try: - socket = ctx.socket(type) - - if type == zmq.constants.PULL: - socket.connect(path) - elif type == zmq.constants.PUSH: - socket.bind(path) - else: - raise ValueError(f"Unknown Socket Type: {type}") - - yield socket - - except KeyboardInterrupt: - logger.debug("EngineCore had Keyboard Interrupt.") - - finally: - ctx.destroy(linger=0) - @staticmethod def wait_for_startup( proc: BaseProcess, @@ -212,18 +208,13 @@ def wait_for_startup( @staticmethod def make_engine_core_process( vllm_config: VllmConfig, - executor_class: Type[GPUExecutor], + executor_class: Type[Executor], usage_context: UsageContext, input_path: str, output_path: str, ready_path: str, - should_shutdown: Synchronized, - ) -> BaseProcess: - # The current process might have CUDA context, - # so we need to spawn a new process. - # NOTE(rob): this is a problem for using EngineCoreProc w/ - # LLM, since we need a if __name__ == "__main__" guard. - context = multiprocessing.get_context("spawn") + ) -> EngineCoreProcHandle: + context = get_mp_context() process_kwargs = { "input_path": input_path, @@ -232,7 +223,6 @@ def make_engine_core_process( "vllm_config": vllm_config, "executor_class": executor_class, "usage_context": usage_context, - "should_shutdown": should_shutdown } # Run EngineCore busy loop in background process. proc = context.Process(target=EngineCoreProc.run_engine_core, @@ -241,28 +231,55 @@ def make_engine_core_process( # Wait for startup EngineCoreProc.wait_for_startup(proc, ready_path) - return proc + return EngineCoreProcHandle(proc=proc, + ready_path=ready_path, + input_path=input_path, + output_path=output_path) @staticmethod def run_engine_core(*args, **kwargs): """Launch EngineCore busy loop in background process.""" + # Signal handler used for graceful termination. + # SystemExit exception is only raised once to allow this and worker + # processes to terminate without error + shutdown_requested = False + + # Ensure we can serialize transformer config after spawning + maybe_register_config_serialize_by_value() + + def signal_handler(signum, frame): + nonlocal shutdown_requested + if not shutdown_requested: + shutdown_requested = True + raise SystemExit() + + # Either SIGTERM or SIGINT will terminate the engine_core + signal.signal(signal.SIGTERM, signal_handler) + signal.signal(signal.SIGINT, signal_handler) + + engine_core = None try: engine_core = EngineCoreProc(*args, **kwargs) engine_core.run_busy_loop() - except KeyboardInterrupt: + except SystemExit: logger.debug("EngineCore interrupted.") except BaseException as e: logger.exception(e) raise e + finally: + if engine_core is not None: + engine_core.shutdown() + engine_core = None + def run_busy_loop(self): """Core busy loop of the EngineCore.""" - # Loop until we get a shutdown signal. - while not self.should_shutdown: + # Loop until process is sent a SIGINT or SIGTERM + while True: # 1) Poll the input queue until there is work to do. if not self.scheduler.has_unfinished_requests(): while True: @@ -273,8 +290,8 @@ def run_busy_loop(self): except queue.Empty: self._log_stats() logger.debug("EngineCore busy loop waiting.") - if self.should_shutdown: - return + except BaseException: + raise # 2) Handle any new client requests (Abort or Add). while not self.input_queue.empty(): @@ -303,15 +320,13 @@ def _log_stats(self): self._last_logging_time = now - def _handle_client_request( - self, request: Union[EngineCoreRequest, EngineCoreProfile, - List[str]]) -> None: + def _handle_client_request(self, request: EngineCoreRequestUnion) -> None: """Handle EngineCoreRequest or EngineCoreABORT from Client.""" if isinstance(request, EngineCoreRequest): self.add_request(request) elif isinstance(request, EngineCoreProfile): - self.model_executor.worker.profile(request.is_start) + self.model_executor.profile(request.is_start) else: # TODO: make an EngineCoreAbort wrapper assert isinstance(request, list) @@ -324,7 +339,7 @@ def process_input_socket(self, input_path: str): decoder_add_req = PickleEncoder() decoder_abort_req = PickleEncoder() - with self.make_socket(input_path, zmq.constants.PULL) as socket: + with make_zmq_socket(input_path, zmq.constants.PULL) as socket: while True: # (RequestType, RequestData) type_frame, data_frame = socket.recv_multipart(copy=False) @@ -352,7 +367,7 @@ def process_output_socket(self, output_path: str): # Reuse send buffer. buffer = bytearray() - with self.make_socket(output_path, zmq.constants.PUSH) as socket: + with make_zmq_socket(output_path, zmq.constants.PUSH) as socket: while True: engine_core_outputs = self.output_queue.get() outputs = EngineCoreOutputs(outputs=engine_core_outputs) diff --git a/vllm/v1/engine/core_client.py b/vllm/v1/engine/core_client.py index 835963f7ee86c..d56fcbdb1e7c4 100644 --- a/vllm/v1/engine/core_client.py +++ b/vllm/v1/engine/core_client.py @@ -1,17 +1,18 @@ -import multiprocessing -import time -from typing import List, Union +import os +import weakref +from typing import List, Optional import msgspec import zmq import zmq.asyncio from vllm.logger import init_logger -from vllm.utils import get_open_zmq_ipc_path +from vllm.utils import get_open_zmq_ipc_path, kill_process_tree from vllm.v1.engine import (EngineCoreOutput, EngineCoreOutputs, EngineCoreProfile, EngineCoreRequest, - EngineCoreRequestType) -from vllm.v1.engine.core import EngineCore, EngineCoreProc + EngineCoreRequestType, EngineCoreRequestUnion) +from vllm.v1.engine.core import (EngineCore, EngineCoreProc, + EngineCoreProcHandle) from vllm.v1.serial_utils import PickleEncoder logger = init_logger(__name__) @@ -59,7 +60,7 @@ def get_output(self) -> List[EngineCoreOutput]: def add_request(self, request: EngineCoreRequest) -> None: raise NotImplementedError - async def profile(self, is_start=True) -> None: + def profile(self, is_start: bool = True) -> None: raise NotImplementedError def abort_requests(self, request_ids: List[str]) -> None: @@ -71,6 +72,9 @@ async def get_output_async(self) -> List[EngineCoreOutput]: async def add_request_async(self, request: EngineCoreRequest) -> None: raise NotImplementedError + async def profile_async(self, is_start: bool = True) -> None: + raise NotImplementedError + async def abort_requests_async(self, request_ids: List[str]) -> None: raise NotImplementedError @@ -99,7 +103,13 @@ def add_request(self, request: EngineCoreRequest) -> None: def abort_requests(self, request_ids: List[str]) -> None: self.engine_core.abort_requests(request_ids) - async def profile(self, is_start=True) -> None: + def shutdown(self): + self.engine_core.shutdown() + + def __del__(self): + self.shutdown() + + def profile(self, is_start: bool = True) -> None: self.engine_core.profile(is_start) @@ -127,7 +137,10 @@ def __init__( self.decoder = msgspec.msgpack.Decoder(EngineCoreOutputs) # ZMQ setup. - self.ctx = (zmq.asyncio.Context() if asyncio_mode else zmq.Context()) + if asyncio_mode: + self.ctx = zmq.asyncio.Context() + else: + self.ctx = zmq.Context() # type: ignore[attr-defined] # Path for IPC. ready_path = get_open_zmq_ipc_path() @@ -143,33 +156,40 @@ def __init__( self.input_socket.bind(input_path) # Start EngineCore in background process. - self.should_shutdown = multiprocessing.Value('b', False, lock=False) - self.proc = EngineCoreProc.make_engine_core_process( + self.proc_handle: Optional[EngineCoreProcHandle] + self.proc_handle = EngineCoreProc.make_engine_core_process( *args, - input_path=input_path, - output_path=output_path, - ready_path=ready_path, - should_shutdown=self.should_shutdown, + input_path= + input_path, # type: ignore[misc] # MyPy incorrectly flags duplicate keywords + output_path=output_path, # type: ignore[misc] + ready_path=ready_path, # type: ignore[misc] **kwargs, ) + self._finalizer = weakref.finalize(self, self.shutdown) def shutdown(self): - # Send shutdown signal to background process. - self.should_shutdown = True - # Shut down the zmq context. self.ctx.destroy(linger=0) - # Shutdown the process if needed. - if hasattr(self, "proc") and self.proc.is_alive(): - self.proc.terminate() + if hasattr(self, "proc_handle") and self.proc_handle: + # Shutdown the process if needed. + if self.proc_handle.proc.is_alive(): + self.proc_handle.proc.terminate() + self.proc_handle.proc.join(5) - time.sleep(5) - if self.proc.is_alive(): - self.proc.kill() + if self.proc_handle.proc.is_alive(): + kill_process_tree(self.proc_handle.proc.pid) - def __del__(self): - self.shutdown() + # Remove zmq ipc socket files + ipc_sockets = [ + self.proc_handle.ready_path, self.proc_handle.output_path, + self.proc_handle.input_path + ] + for ipc_socket in ipc_sockets: + socket_file = ipc_socket.replace("ipc://", "") + if os and os.path.exists(socket_file): + os.remove(socket_file) + self.proc_handle = None class SyncMPClient(MPClient): @@ -184,10 +204,8 @@ def get_output(self) -> List[EngineCoreOutput]: engine_core_outputs = self.decoder.decode(frame.buffer).outputs return engine_core_outputs - def _send_input( - self, request_type: EngineCoreRequestType, - request: Union[EngineCoreRequest, EngineCoreProfile, - List[str]]) -> None: + def _send_input(self, request_type: EngineCoreRequestType, + request: EngineCoreRequestUnion) -> None: # (RequestType, SerializedRequest) msg = (request_type.value, self.encoder.encode(request)) @@ -199,7 +217,7 @@ def add_request(self, request: EngineCoreRequest) -> None: def abort_requests(self, request_ids: List[str]) -> None: self._send_input(EngineCoreRequestType.ABORT, request_ids) - async def profile(self, is_start=True) -> None: + def profile(self, is_start: bool = True) -> None: self._send_input(EngineCoreRequestType.PROFILE, EngineCoreProfile(is_start)) @@ -217,10 +235,8 @@ async def get_output_async(self) -> List[EngineCoreOutput]: return engine_core_outputs - async def _send_input( - self, request_type: EngineCoreRequestType, - request: Union[EngineCoreRequest, EngineCoreProfile, - List[str]]) -> None: + async def _send_input(self, request_type: EngineCoreRequestType, + request: EngineCoreRequestUnion) -> None: msg = (request_type.value, self.encoder.encode(request)) await self.input_socket.send_multipart(msg, copy=False) @@ -232,6 +248,6 @@ async def abort_requests_async(self, request_ids: List[str]) -> None: if len(request_ids) > 0: await self._send_input(EngineCoreRequestType.ABORT, request_ids) - async def profile(self, is_start=True) -> None: + async def profile_async(self, is_start: bool = True) -> None: await self._send_input(EngineCoreRequestType.PROFILE, EngineCoreProfile(is_start)) diff --git a/vllm/v1/engine/detokenizer.py b/vllm/v1/engine/detokenizer.py index 6249d60199a62..02f34e2b54dd5 100644 --- a/vllm/v1/engine/detokenizer.py +++ b/vllm/v1/engine/detokenizer.py @@ -1,5 +1,5 @@ from dataclasses import dataclass -from typing import Dict, Iterable, List, Optional, Tuple +from typing import Dict, Iterable, List, Optional, Tuple, Union from vllm.engine.output_processor.stop_checker import StopChecker from vllm.logger import init_logger @@ -97,7 +97,7 @@ def add_tokens( self, new_token_ids: List[int], finish_reason: Optional[str], - stop_reason: Optional[str], + stop_reason: Optional[Union[int, str, None]], ) -> Optional[RequestOutput]: """ Update RequestState for the request_id by: diff --git a/vllm/v1/engine/llm_engine.py b/vllm/v1/engine/llm_engine.py index bd19d998a4adb..bea8c5502f612 100644 --- a/vllm/v1/engine/llm_engine.py +++ b/vllm/v1/engine/llm_engine.py @@ -1,5 +1,7 @@ from typing import Dict, List, Mapping, Optional, Type, Union +from typing_extensions import TypeVar + from vllm.config import VllmConfig from vllm.engine.arg_utils import EngineArgs from vllm.engine.metrics_types import StatLoggerBase @@ -12,15 +14,18 @@ from vllm.pooling_params import PoolingParams from vllm.prompt_adapter.request import PromptAdapterRequest from vllm.sampling_params import SamplingParams -from vllm.transformers_utils.tokenizer_group import init_tokenizer_from_configs +from vllm.transformers_utils.tokenizer_group import ( + BaseTokenizerGroup, init_tokenizer_from_configs) from vllm.usage.usage_lib import UsageContext from vllm.v1.engine.core_client import EngineCoreClient from vllm.v1.engine.detokenizer import Detokenizer from vllm.v1.engine.processor import Processor -from vllm.v1.executor.gpu_executor import GPUExecutor +from vllm.v1.executor.abstract import Executor logger = init_logger(__name__) +_G = TypeVar("_G", bound=BaseTokenizerGroup, default=BaseTokenizerGroup) + class LLMEngine: """Legacy LLMEngine for backwards compatibility.""" @@ -28,7 +33,7 @@ class LLMEngine: def __init__( self, vllm_config: VllmConfig, - executor_class: Type[GPUExecutor], + executor_class: Type[Executor], log_stats: bool, usage_context: UsageContext = UsageContext.ENGINE_CONTEXT, stat_loggers: Optional[Dict[str, StatLoggerBase]] = None, @@ -46,13 +51,16 @@ def __init__( model_config=vllm_config.model_config, scheduler_config=vllm_config.scheduler_config, parallel_config=vllm_config.parallel_config, - enable_lora=bool(vllm_config.lora_config)) + lora_config=vllm_config.lora_config) self.tokenizer.ping() # Processor (convert Inputs --> EngineCoreRequests) - self.processor = Processor(vllm_config.model_config, - vllm_config.lora_config, self.tokenizer, - input_registry, mm_registry) + self.processor = Processor(model_config=vllm_config.model_config, + cache_config=vllm_config.cache_config, + lora_config=vllm_config.lora_config, + tokenizer=self.tokenizer, + input_registry=input_registry, + mm_registry=mm_registry) # Detokenizer (converts EngineCoreOutputs --> RequestOutput) self.detokenizer = Detokenizer( @@ -98,11 +106,19 @@ def from_engine_args( multiprocess_mode=enable_multiprocessing) @classmethod - def _get_executor_cls(cls, vllm_config: VllmConfig): - return GPUExecutor - - def stop_remote_worker_execution_loop(self) -> None: - raise NotImplementedError("TP not implemented yet.") + def _get_executor_cls(cls, vllm_config: VllmConfig) -> Type[Executor]: + executor_class: Type[Executor] + distributed_executor_backend = ( + vllm_config.parallel_config.distributed_executor_backend) + if distributed_executor_backend == "mp": + from vllm.v1.executor.multiproc_executor import MultiprocExecutor + executor_class = MultiprocExecutor + else: + assert (distributed_executor_backend is None) + from vllm.v1.executor.uniproc_executor import UniprocExecutor + executor_class = UniprocExecutor + + return executor_class def get_num_unfinished_requests(self) -> int: return self.detokenizer.get_num_unfinished_requests() @@ -169,5 +185,25 @@ def start_profile(self): def stop_profile(self): self.engine_core.profile(False) - def get_tokenizer_group(self, group_type): - pass + def get_tokenizer_group( + self, + group_type: Type[_G] = BaseTokenizerGroup, + ) -> _G: + tokenizer_group = self.tokenizer + + if tokenizer_group is None: + raise ValueError("Unable to get tokenizer because " + "skip_tokenizer_init is True") + if not isinstance(tokenizer_group, group_type): + raise TypeError("Invalid type of tokenizer group. " + f"Expected type: {group_type}, but " + f"found type: {type(tokenizer_group)}") + + return tokenizer_group + + def __del__(self): + self.shutdown() + + def shutdown(self): + if engine_core := getattr(self, "engine_core", None): + engine_core.shutdown() diff --git a/vllm/v1/engine/mm_input_mapper.py b/vllm/v1/engine/mm_input_mapper.py index 594c973678235..e53ba092ede04 100644 --- a/vllm/v1/engine/mm_input_mapper.py +++ b/vllm/v1/engine/mm_input_mapper.py @@ -1,39 +1,176 @@ from typing import Any, Dict, List, Optional +import PIL +from blake3 import blake3 + from vllm.config import ModelConfig +from vllm.inputs import PromptType +from vllm.logger import init_logger from vllm.multimodal import (MULTIMODAL_REGISTRY, MultiModalDataDict, MultiModalKwargs, MultiModalRegistry) +from vllm.v1.utils import LRUDictCache + +logger = init_logger(__name__) + +# The idea of MM preprocessor caching is based on having a client and a server, +# where the client executes in the frontend process (=P0) and the server in the +# core process (=P1). +# +# -- Client: Executes the MM mapper and performs caching of the results. +# -- Server: Performs caching of the results +# +# The caching for both client and server is mirrored/similar, and this allows us +# to avoid the serialization of "mm_inputs" (like pixel values) between +# client (=P0) and server (=P1) processes. +# Both Client and Server must use the same cache size +# (to perform mirrored caching) +# TODO: Tune the MM cache size +MM_CACHE_SIZE = 256 -class MMInputMapper: + +class MMInputMapperClient: def __init__( self, model_config: ModelConfig, mm_registry: MultiModalRegistry = MULTIMODAL_REGISTRY, ): + self.model_config = model_config self.mm_registry = mm_registry self.multi_modal_input_mapper = mm_registry.create_input_mapper( model_config) self.mm_registry.init_mm_limits_per_prompt(model_config) + # Init cache + self.use_cache = model_config.mm_cache_preprocessor + self.mm_cache = LRUDictCache[str, MultiModalKwargs](MM_CACHE_SIZE) + + # DEBUG: Set to None to disable + self.mm_debug_cache_hit_ratio_steps = None + self.mm_cache_hits = 0 + self.mm_cache_total = 0 + + def cache_hit_ratio(self, steps): + if self.mm_cache_total > 0 and self.mm_cache_total % steps == 0: + logger.debug("MMInputMapper: cache_hit_ratio = %.2f ", + self.mm_cache_hits / self.mm_cache_total) + + # TODO: Support modalities beyond image. def process_inputs( self, mm_data: MultiModalDataDict, + mm_hashes: Optional[List[str]], mm_processor_kwargs: Optional[Dict[str, Any]], + precomputed_mm_inputs: Optional[List[MultiModalKwargs]], ) -> List[MultiModalKwargs]: + if precomputed_mm_inputs is None: + image_inputs = mm_data["image"] + if not isinstance(image_inputs, list): + image_inputs = [image_inputs] + num_inputs = len(image_inputs) + else: + num_inputs = len(precomputed_mm_inputs) + + # Sanity + if self.use_cache: + assert mm_hashes is not None + assert num_inputs == len(mm_hashes) + + # Process each image input separately, so that later we can schedule + # them in a fine-grained manner. + # Apply caching (if enabled) and reuse precomputed inputs (if provided) + ret_inputs: List[MultiModalKwargs] = [] + for input_id in range(num_inputs): + if self.mm_debug_cache_hit_ratio_steps is not None: + self.cache_hit_ratio(self.mm_debug_cache_hit_ratio_steps) + + mm_input = None + if self.use_cache: + assert mm_hashes is not None + mm_hash = mm_hashes[input_id] + mm_input = self.mm_cache.get(mm_hash) + + self.mm_cache_total += 1 + if mm_input is None: + if precomputed_mm_inputs is not None: + # Reuse precomputed input (for merged preprocessor) + mm_input = precomputed_mm_inputs[input_id] + else: + # Apply MM mapper + mm_input = self.multi_modal_input_mapper( + {"image": [image_inputs[input_id]]}, + mm_processor_kwargs=mm_processor_kwargs, + ) + + if self.use_cache: + # Add to cache + assert mm_hash is not None + self.mm_cache.put(mm_hash, mm_input) + else: + self.mm_cache_hits += 1 + mm_input = None # Avoids sending mm_input to Server + + ret_inputs.append(mm_input) + + return ret_inputs + + +class MMInputMapperServer: + + def __init__(self, model_config): + self.use_cache = model_config.mm_cache_preprocessor + self.mm_cache = LRUDictCache[str, MultiModalKwargs](MM_CACHE_SIZE) + + def process_inputs( + self, + mm_inputs: List[Optional[MultiModalKwargs]], + mm_hashes: List[str], + ) -> List[MultiModalKwargs]: + assert len(mm_inputs) == len(mm_hashes) + + if not self.use_cache: + return mm_inputs + + full_mm_inputs = [] + for mm_input, mm_hash in zip(mm_inputs, mm_hashes): + assert mm_hash is not None + if mm_input is None: + mm_input = self.mm_cache.get(mm_hash) + assert mm_input is not None + else: + self.mm_cache.put(mm_hash, mm_input) + + full_mm_inputs.append(mm_input) + + return full_mm_inputs + + +class MMHasher: + + def __init__(self): + pass + + def hash(self, prompt: PromptType) -> Optional[List[str]]: + if "multi_modal_data" not in prompt: + return None + + mm_data = prompt["multi_modal_data"] image_inputs = mm_data["image"] if not isinstance(image_inputs, list): image_inputs = [image_inputs] + assert len(image_inputs) > 0 - # Process each image input separately so that later we can schedule - # them in a fine-grained manner. - mm_inputs: List[MultiModalKwargs] = [] - num_images = len(image_inputs) - for i in range(num_images): - mm_input = self.multi_modal_input_mapper( - {"image": [image_inputs[i]]}, - mm_processor_kwargs=mm_processor_kwargs, - ) - mm_inputs.append(mm_input) - return mm_inputs + ret = [] + for image in image_inputs: + assert isinstance(image, PIL.Image.Image) + + # Convert image to bytes + bytes = image.tobytes() + + # Hash image bytes + hasher = blake3() + hasher.update(bytes) + ret.append(hasher.hexdigest()) + + return ret diff --git a/vllm/v1/engine/processor.py b/vllm/v1/engine/processor.py index 5c1577190c75a..732757d6b0ac2 100644 --- a/vllm/v1/engine/processor.py +++ b/vllm/v1/engine/processor.py @@ -1,19 +1,21 @@ import time from typing import Any, Dict, Mapping, Optional, Tuple, Union -from vllm.config import LoRAConfig, ModelConfig +from vllm.config import CacheConfig, LoRAConfig, ModelConfig from vllm.inputs import (INPUT_REGISTRY, InputRegistry, ProcessorInputs, PromptType, SingletonInputsAdapter) from vllm.inputs.parse import is_encoder_decoder_inputs from vllm.inputs.preprocess import InputPreprocessor from vllm.lora.request import LoRARequest -from vllm.multimodal import MULTIMODAL_REGISTRY, MultiModalRegistry +from vllm.multimodal import (MULTIMODAL_REGISTRY, MultiModalKwargs, + MultiModalRegistry) from vllm.pooling_params import PoolingParams from vllm.prompt_adapter.request import PromptAdapterRequest from vllm.sampling_params import SamplingParams from vllm.transformers_utils.config import try_get_generation_config from vllm.transformers_utils.tokenizer_group import BaseTokenizerGroup from vllm.v1.engine import DetokenizerRequest, EngineCoreRequest +from vllm.v1.engine.mm_input_mapper import MMHasher, MMInputMapperClient class Processor: @@ -21,6 +23,7 @@ class Processor: def __init__( self, model_config: ModelConfig, + cache_config: CacheConfig, lora_config: Optional[LoRAConfig], tokenizer: BaseTokenizerGroup, input_registry: InputRegistry = INPUT_REGISTRY, @@ -39,6 +42,14 @@ def __init__( self.input_processor = input_registry.create_input_processor( model_config) + # Multi-modal (huggingface) input mapper + self.mm_input_mapper_client = MMInputMapperClient(model_config) + + # Multi-modal hasher (for images) + self.use_hash = model_config.mm_cache_preprocessor or \ + cache_config.enable_prefix_caching + self.mm_hasher = MMHasher() + # TODO: run in an ThreadpoolExecutor or BackgroundProcess. # This ideally should releases the GIL, so we should not block the # asyncio loop while this is running. @@ -47,14 +58,14 @@ def process_inputs( request_id: str, prompt: PromptType, params: Union[SamplingParams, PoolingParams], - arrival_time: float, + arrival_time: Optional[float] = None, lora_request: Optional[LoRARequest] = None, trace_headers: Optional[Mapping[str, str]] = None, prompt_adapter_request: Optional[PromptAdapterRequest] = None, priority: int = 0, ) -> Tuple[DetokenizerRequest, EngineCoreRequest]: - # TODO(woosuk): Support embedding mode. + # TODO(woosuk): Support pooling models. # TODO(woosuk): Check max_logprobs # TODO(woosuk): Support encoder-decoder models. @@ -66,6 +77,11 @@ def process_inputs( assert priority == 0, "vLLM V1 does not support priority at the moment." assert trace_headers is None, "vLLM V1 does not support tracing yet." + # Compute MM hashes (if enabled) + mm_hashes = None + if self.use_hash: + mm_hashes = self.mm_hasher.hash(prompt) + # Process inputs. preprocessed_inputs = self.input_preprocessor.preprocess( prompt, @@ -96,6 +112,18 @@ def process_inputs( sampling_params.update_from_generation_config( self.generation_config_fields, eos_token_id) + # For merged preprocessor, mm_data is already mm_inputs + precomputed_mm_inputs = None + if isinstance(decoder_inputs.multi_modal_data, MultiModalKwargs): + precomputed_mm_inputs = [decoder_inputs.multi_modal_data] + + # Apply MM mapper + mm_inputs = None + if len(decoder_inputs.multi_modal_data) > 0: + mm_inputs = self.mm_input_mapper_client.process_inputs( + decoder_inputs.multi_modal_data, mm_hashes, + decoder_inputs.mm_processor_kwargs, precomputed_mm_inputs) + # Make Request for Detokenizer. detokenizer_request = DetokenizerRequest( request_id, @@ -113,9 +141,9 @@ def process_inputs( request_id, decoder_inputs.prompt, decoder_inputs.prompt_token_ids, - decoder_inputs.multi_modal_data, + mm_inputs, + mm_hashes, decoder_inputs.multi_modal_placeholders, - decoder_inputs.mm_processor_kwargs, sampling_params, eos_token_id, arrival_time, diff --git a/vllm/v1/executor/abstract.py b/vllm/v1/executor/abstract.py new file mode 100644 index 0000000000000..564d0447f15a6 --- /dev/null +++ b/vllm/v1/executor/abstract.py @@ -0,0 +1,40 @@ +from abc import ABC, abstractmethod +from typing import Tuple + +from vllm.config import VllmConfig +from vllm.v1.outputs import ModelRunnerOutput + + +class Executor(ABC): + """Abstract class for executors.""" + + @abstractmethod + def __init__(self, vllm_config: VllmConfig) -> None: + raise NotImplementedError + + @abstractmethod + def initialize(self, num_gpu_blocks: int) -> None: + raise NotImplementedError + + @abstractmethod + def determine_num_available_blocks(self) -> Tuple[int, int]: + raise NotImplementedError + + @abstractmethod + def execute_model( + self, + scheduler_output, + ) -> ModelRunnerOutput: + raise NotImplementedError + + @abstractmethod + def profile(self, is_start: bool = True): + raise NotImplementedError + + @abstractmethod + def shutdown(self): + pass + + @abstractmethod + def check_health(self) -> None: + raise NotImplementedError diff --git a/vllm/v1/executor/multiproc_executor.py b/vllm/v1/executor/multiproc_executor.py new file mode 100644 index 0000000000000..128101aa6956d --- /dev/null +++ b/vllm/v1/executor/multiproc_executor.py @@ -0,0 +1,387 @@ +import os +import pickle +import signal +import sys +import time +import weakref +from dataclasses import dataclass +from enum import Enum, auto +from multiprocessing.process import BaseProcess +from typing import Any, Dict, List, Optional, Tuple + +import zmq + +from vllm.config import VllmConfig +from vllm.distributed import (destroy_distributed_environment, + destroy_model_parallel) +from vllm.distributed.device_communicators.shm_broadcast import (Handle, + MessageQueue) +from vllm.executor.multiproc_worker_utils import ( + _add_prefix, get_mp_context, set_multiprocessing_worker_envs) +from vllm.logger import init_logger +from vllm.utils import (get_distributed_init_method, get_open_port, + get_open_zmq_ipc_path) +from vllm.v1.executor.abstract import Executor +from vllm.v1.outputs import ModelRunnerOutput +from vllm.v1.utils import make_zmq_socket +from vllm.worker.worker_base import WorkerWrapperBase + +logger = init_logger(__name__) + +POLLING_TIMEOUT_MS = 5000 +POLLING_TIMEOUT_S = POLLING_TIMEOUT_MS // 1000 + + +class MultiprocExecutor(Executor): + + def __init__(self, vllm_config: VllmConfig) -> None: + # Call self.shutdown at exit to clean up + # and ensure workers will be terminated. + self._finalizer = weakref.finalize(self, self.shutdown) + + self.vllm_config = vllm_config + self.parallel_config = vllm_config.parallel_config + + self.world_size = self.parallel_config.world_size + tensor_parallel_size = self.parallel_config.tensor_parallel_size + assert self.world_size == tensor_parallel_size, ( + f"world_size ({self.world_size}) must be equal to the " + f"tensor_parallel_size ({tensor_parallel_size}). " + f"Pipeline parallelism is not yet implemented in v1") + + # Set multiprocessing envs that are common to V0 and V1 + set_multiprocessing_worker_envs(self.parallel_config) + + # Multiprocessing-based executor does not support multi-node setting. + # Since it only works for single node, we can use the loopback address + # 127.0.0.1 for communication. + distributed_init_method = get_distributed_init_method( + "127.0.0.1", get_open_port()) + + # Initialize worker and set up message queues for SchedulerOutputs + # and ModelRunnerOutputs + self.rpc_broadcast_mq = MessageQueue(self.world_size, self.world_size) + scheduler_output_handle = self.rpc_broadcast_mq.export_handle() + + # Create workers + self.workers: List[WorkerProcHandle] = [] + for rank in range(self.world_size): + worker = WorkerProc.make_worker_process(vllm_config, rank, rank, + distributed_init_method, + scheduler_output_handle) + self.workers.append(worker) + + # Ensure message queues are ready. Will deadlock if re-ordered + # Must be kept consistent with the WorkerProc + self.rpc_broadcast_mq.wait_until_ready() + for w in self.workers: + w.worker_response_mq.wait_until_ready() + + def initialize(self, num_gpu_blocks: int) -> None: + """ + Initialize the KV caches and begin the model execution loop of the + underlying workers. + """ + self.collective_rpc("initialize_cache", args=(num_gpu_blocks, )) + self.collective_rpc("compile_or_warm_up_model") + + def determine_num_available_blocks(self) -> Tuple[int, int]: + """ + Determine the number of available KV blocks by invoking the + underlying worker. + """ + num_blocks = self.collective_rpc("determine_num_available_blocks") + + # Since we use a shared centralized controller, we take the minimum + # number of blocks across all workers to make sure all the memory + # operators can be applied to all workers. + num_gpu_blocks = min(b[0] for b in num_blocks) + num_cpu_blocks = min(b[1] for b in num_blocks) + + return num_gpu_blocks, num_cpu_blocks + + def collective_rpc(self, + method: str, + timeout: Optional[float] = None, + args: Tuple = (), + kwargs: Optional[Dict] = None) -> List[Any]: + """ + Execute an RPC call on workers. + + Args: + method: Name of the worker method to execute + timeout: Maximum time in seconds to wait for execution. Rases a + TimeoutError on timeout. None means wait indefinitely. + args: Positional arguments to pass to the worker method + kwargs: Keyword arguments to pass to the worker method + + Returns: + List of results from each worker + """ + start_time = time.monotonic() + kwargs = kwargs or {} + + try: + self.rpc_broadcast_mq.enqueue((method, args, kwargs)) + + responses = [None] * self.world_size + for w in self.workers: + dequeue_timeout = timeout - (time.monotonic() - start_time + ) if timeout is not None else None + status, result = w.worker_response_mq.dequeue( + timeout=dequeue_timeout) + + if status != WorkerProc.ResponseStatus.SUCCESS: + if isinstance(result, Exception): + raise result + else: + raise RuntimeError("Worker failed") + + responses[w.rank] = result + + return responses + except TimeoutError as e: + raise TimeoutError(f"RPC call to {method} timed out.") from e + except Exception as e: + # Re-raise any other exceptions + raise e + + def execute_model( + self, + scheduler_output, + ) -> ModelRunnerOutput: + model_output = self.collective_rpc("execute_model", + args=(scheduler_output, ))[0] + return model_output + + def profile(self, is_start: bool = True): + self.collective_rpc("profile", args=(is_start, )) + return + + def _ensure_worker_termination(self): + """Ensure that all worker processes are terminated. Assumes workers have + received termination requests. Waits for processing, then sends + termination and kill signals if needed.""" + + def wait_for_termination(procs, timeout): + if not time: + # If we are in late stage shutdown, the interpreter may replace + # `time` with `None`. + return all(not proc.is_alive() for proc in procs) + start_time = time.time() + while time.time() - start_time < timeout: + if all(not proc.is_alive() for proc in procs): + return True + time.sleep(0.1) + return False + + # Send SIGTERM if still running + active_procs = [w.proc for w in self.workers if w.proc.is_alive()] + for p in active_procs: + p.terminate() + if not wait_for_termination(active_procs, 4): + # Send SIGKILL if still running + active_procs = [p for p in active_procs if p.is_alive()] + for p in active_procs: + p.kill() + + self._cleanup_sockets() + + def _cleanup_sockets(self): + for w in self.workers: + # Remove the zmq ipc socket file + socket_path = w.ready_path.replace("ipc://", "") + if os and os.path.exists(socket_path): + os.remove(socket_path) + + def shutdown(self): + """Properly shut down the executor and its workers""" + if getattr(self, 'shutting_down', False): + self.shutting_down = True + for w in self.workers: + w.worker_response_mq = None + self._ensure_worker_termination() + + self.rpc_broadcast_mq = None + + def check_health(self) -> None: + self.collective_rpc("check_health", timeout=10) + return + + +@dataclass +class WorkerProcHandle: + proc: BaseProcess + rank: int + ready_path: str + worker_response_mq: MessageQueue # The worker process writes to this MQ + + +class WorkerProc: + """Wrapper that runs one Worker in a separate process.""" + + READY_STR = "READY" + + def __init__( + self, + vllm_config: VllmConfig, + local_rank: int, + rank: int, + distributed_init_method: str, + input_shm_handle: Handle, + ready_path: str, + ): + self.rank = rank + wrapper = WorkerWrapperBase(vllm_config=vllm_config) + wrapper.init_worker(vllm_config, local_rank, rank, + distributed_init_method) + self.worker = wrapper.worker + + pid = os.getpid() + _add_prefix(sys.stdout, f"VllmWorker rank={rank}", pid) + _add_prefix(sys.stderr, f"VllmWorker rank={rank}", pid) + + # Initialize MessageQueue for receiving SchedulerOutput + self.rpc_broadcast_mq = MessageQueue.create_from_handle( + input_shm_handle, self.worker.rank) + + # Initializes a message queue for sending the model output + self.worker_response_mq = MessageQueue(1, 1) + worker_response_mq_handle = self.worker_response_mq.export_handle() + + # Send Readiness signal to EngineCore process. + with make_zmq_socket(ready_path, zmq.constants.PUSH) as ready_socket: + payload = pickle.dumps(worker_response_mq_handle, + protocol=pickle.HIGHEST_PROTOCOL) + ready_socket.send_string(WorkerProc.READY_STR) + ready_socket.send(payload) + + self.worker.initialize() + self.worker.load_model() + + @staticmethod + def make_worker_process( + vllm_config: VllmConfig, + local_rank: int, + rank: int, + distributed_init_method: str, + input_shm_handle, # Receive SchedulerOutput + ) -> WorkerProcHandle: + context = get_mp_context() + + # ZMQ path for worker to send ready message and shm_broadcast handle + # back to core process. + ready_path = get_open_zmq_ipc_path() + + process_kwargs = { + "vllm_config": vllm_config, + "local_rank": local_rank, + "rank": rank, + "distributed_init_method": distributed_init_method, + "input_shm_handle": input_shm_handle, + "ready_path": ready_path, + } + # Run EngineCore busy loop in background process. + proc = context.Process(target=WorkerProc.worker_main, + kwargs=process_kwargs, + daemon=True) + proc.start() + + # Wait for startup + worker_response_mq_handle = WorkerProc.wait_for_startup( + proc, ready_path) + + worker_response_mq = MessageQueue.create_from_handle( + worker_response_mq_handle, 0) + + return WorkerProcHandle(proc, rank, ready_path, worker_response_mq) + + def shutdown(self): + self.rpc_broadcast_mq = None + self.worker_response_mq = None + destroy_model_parallel() + destroy_distributed_environment() + + @staticmethod + def worker_main(*args, **kwargs): + """ Worker initialization and execution loops. + This runs a background process """ + + # Signal handler used for graceful termination. + # SystemExit exception is only raised once to allow this and worker + # processes to terminate without error + shutdown_requested = False + + def signal_handler(signum, frame): + nonlocal shutdown_requested + if not shutdown_requested: + shutdown_requested = True + raise SystemExit() + + # Either SIGTERM or SIGINT will terminate the worker + signal.signal(signal.SIGTERM, signal_handler) + signal.signal(signal.SIGINT, signal_handler) + + worker = None + try: + worker = WorkerProc(*args, **kwargs) + + # Ensure message queues are ready. Will deadlock if re-ordered. + # Must be kept consistent with the Executor + worker.rpc_broadcast_mq.wait_until_ready() + worker.worker_response_mq.wait_until_ready() + + worker.worker_busy_loop() + + except SystemExit: + logger.debug("Worker interrupted.") + + except BaseException as e: + logger.exception(e) + raise + + finally: + # Clean up once worker exits busy loop + if worker is not None: + worker.shutdown() + worker = None + + @staticmethod + def wait_for_startup( + proc: BaseProcess, + ready_path: str, + ) -> Optional[Handle]: + """Wait until the Worker is ready.""" + with make_zmq_socket(ready_path, zmq.constants.PULL) as socket: + + # Wait for Worker to send READY. + while socket.poll(timeout=POLLING_TIMEOUT_MS) == 0: + logger.debug("Waiting for WorkerProc to startup.") + + if not proc.is_alive(): + raise RuntimeError("WorkerProc failed to start.") + + message = socket.recv_string() + assert message == WorkerProc.READY_STR + handle_frame = socket.recv(copy=False) + handle = pickle.loads(handle_frame.buffer) + return handle + + class ResponseStatus(Enum): + SUCCESS = auto() + FAILURE = auto() + + def worker_busy_loop(self): + """Main busy loop for Multiprocessing Workers""" + while True: + method, args, kwargs = self.rpc_broadcast_mq.dequeue() + + try: + output = getattr(self.worker, method)(*args, **kwargs) + except BaseException as e: + self.worker_response_mq.enqueue( + (WorkerProc.ResponseStatus.FAILURE, e)) + continue + + self.worker_response_mq.enqueue( + (WorkerProc.ResponseStatus.SUCCESS, output)) diff --git a/vllm/v1/executor/gpu_executor.py b/vllm/v1/executor/uniproc_executor.py similarity index 87% rename from vllm/v1/executor/gpu_executor.py rename to vllm/v1/executor/uniproc_executor.py index f71fa16b16e27..be058318de58b 100644 --- a/vllm/v1/executor/gpu_executor.py +++ b/vllm/v1/executor/uniproc_executor.py @@ -4,13 +4,14 @@ from vllm.config import VllmConfig from vllm.logger import init_logger from vllm.utils import get_distributed_init_method, get_ip, get_open_port +from vllm.v1.executor.abstract import Executor from vllm.v1.outputs import ModelRunnerOutput from vllm.v1.worker.gpu_worker import Worker logger = init_logger(__name__) -class GPUExecutor: +class UniprocExecutor(Executor): def __init__(self, vllm_config: VllmConfig) -> None: self.vllm_config = vllm_config @@ -25,7 +26,7 @@ def __init__(self, vllm_config: VllmConfig) -> None: self.prompt_adapter_config = vllm_config.prompt_adapter_config self.observability_config = vllm_config.observability_config - self.worker = self._create_worker() + self.worker: Worker = self._create_worker() self.worker.initialize() self.worker.load_model() @@ -54,7 +55,7 @@ def determine_num_available_blocks(self) -> Tuple[int, int]: """ return self.worker.determine_num_available_blocks() - def initialize_cache(self, num_gpu_blocks: int) -> None: + def initialize(self, num_gpu_blocks: int) -> None: """Initialize the KV cache by invoking the underlying worker. """ # NOTE: This is logged in the executor because there can be >1 worker @@ -71,7 +72,13 @@ def execute_model( output = self.worker.execute_model(scheduler_output) return output + def profile(self, is_start: bool = True): + self.worker.profile(is_start) + + def shutdown(self): + pass + def check_health(self) -> None: - # GPUExecutor will always be healthy as long as + # UniprocExecutor will always be healthy as long as # it's running. return diff --git a/vllm/v1/outputs.py b/vllm/v1/outputs.py index 8574987728844..acc3a944e21b9 100644 --- a/vllm/v1/outputs.py +++ b/vllm/v1/outputs.py @@ -8,7 +8,7 @@ class SamplerOutput: # [num_reqs] - sampled_token_ids: torch.Tensor + sampled_token_ids: List[int] # [num_reqs, max_num_logprobs + 1] logprob_token_ids: Optional[torch.Tensor] @@ -20,6 +20,8 @@ class SamplerOutput: prompt_logprobs: Optional[torch.Tensor] +# ModelRunnerOutput is serialized and sent to the scheduler process. +# This is expensive for torch.Tensor so prefer to use List instead. @dataclass class ModelRunnerOutput: @@ -29,7 +31,7 @@ class ModelRunnerOutput: req_id_to_index: Dict[str, int] # [num_reqs] - sampled_token_ids_cpu: torch.Tensor + sampled_token_ids: List[int] # [num_reqs, max_num_logprobs + 1] logprob_token_ids_cpu: Optional[torch.Tensor] diff --git a/vllm/v1/request.py b/vllm/v1/request.py index 51fb4003e5fe0..f4783ae366ef0 100644 --- a/vllm/v1/request.py +++ b/vllm/v1/request.py @@ -1,5 +1,5 @@ import enum -from typing import List, Optional, Union +from typing import TYPE_CHECKING, List, Optional, Union from vllm.inputs import DecoderOnlyInputs, SingletonInputsAdapter, token_inputs from vllm.lora.request import LoRARequest @@ -9,6 +9,9 @@ from vllm.v1.engine import EngineCoreRequest from vllm.v1.utils import ConstantList +if TYPE_CHECKING: + from vllm.v1.core.kv_cache_utils import BlockHashType + class Request: @@ -45,9 +48,7 @@ def __init__( self._all_token_ids: List[int] = self.prompt_token_ids.copy() self.num_computed_tokens = 0 - # Raw multimodal data before the mm input mapper (e.g., PIL images). - self.mm_data = self.inputs.multi_modal_data - self.mm_processor_kwargs = self.inputs.mm_processor_kwargs + # Multi-modal input metadata. mm_positions = self.inputs.multi_modal_placeholders if mm_positions: # FIXME(woosuk): Support other modalities. @@ -56,6 +57,14 @@ def __init__( self.mm_positions = [] # Output of the mm input mapper (e.g., image tensors). self.mm_inputs: List[MultiModalKwargs] = [] + if self.inputs.multi_modal_inputs: + self.mm_inputs = self.inputs.multi_modal_inputs + + self.mm_hashes: List[str] = self.inputs.multi_modal_hashes + + # Cache the computed kv block hashes of the request to avoid + # recomputing. + self._kv_block_hashes: List[BlockHashType] = [] @classmethod def from_engine_core_request(cls, request: EngineCoreRequest) -> "Request": @@ -64,9 +73,11 @@ def from_engine_core_request(cls, request: EngineCoreRequest) -> "Request": inputs=token_inputs( prompt_token_ids=request.prompt_token_ids, prompt=request.prompt, - multi_modal_data=request.mm_data, + multi_modal_data=None, + multi_modal_inputs=request.mm_inputs, + multi_modal_hashes=request.mm_hashes, multi_modal_placeholders=request.mm_placeholders, - mm_processor_kwargs=request.mm_processor_kwargs, + mm_processor_kwargs=None, ), sampling_params=request.sampling_params, eos_token_id=request.eos_token_id, @@ -110,7 +121,7 @@ def get_finished_reason(self) -> Union[str, None]: return RequestStatus.get_finished_reason(self.status) def has_encoder_inputs(self) -> bool: - return len(self.mm_data) > 0 + return len(self.mm_inputs) > 0 @property def num_encoder_inputs(self) -> int: @@ -121,6 +132,17 @@ def get_num_encoder_tokens(self, input_id: int) -> int: num_tokens = self.mm_positions[input_id]["length"] return num_tokens + @property + def kv_block_hashes(self) -> ConstantList["BlockHashType"]: + # Prevent directly appending to the kv_block_hashes. + return ConstantList(self._kv_block_hashes) + + def set_kv_block_hashes(self, value: List["BlockHashType"]) -> None: + self._kv_block_hashes = value + + def append_kv_block_hashes(self, block_hash: "BlockHashType") -> None: + self._kv_block_hashes.append(block_hash) + class RequestStatus(enum.IntEnum): """Status of a request.""" diff --git a/vllm/v1/sample/sampler.py b/vllm/v1/sample/sampler.py index 927f274541c4d..d1a755be01ff7 100644 --- a/vllm/v1/sample/sampler.py +++ b/vllm/v1/sample/sampler.py @@ -37,8 +37,9 @@ def forward( topk_logprobs = None topk_indices = None + # NOTE: CPU-GPU synchronization happens here. sampler_output = SamplerOutput( - sampled_token_ids=sampled, + sampled_token_ids=sampled.tolist(), logprob_token_ids=topk_indices, logprobs=topk_logprobs, prompt_logprob_token_ids=None, diff --git a/vllm/v1/utils.py b/vllm/v1/utils.py index 4b26749712e32..5f327d7066830 100644 --- a/vllm/v1/utils.py +++ b/vllm/v1/utils.py @@ -1,9 +1,19 @@ -from typing import Generic, List, TypeVar, overload +from collections import OrderedDict +from collections.abc import Sequence +from contextlib import contextmanager +from typing import (Any, Generic, Iterator, List, Optional, TypeVar, Union, + overload) + +import zmq + +from vllm.logger import init_logger + +logger = init_logger(__name__) T = TypeVar("T") -class ConstantList(Generic[T]): +class ConstantList(Generic[T], Sequence): def __init__(self, x: List[T]) -> None: self._x = x @@ -26,29 +36,33 @@ def remove(self, item): def clear(self): raise Exception("Cannot clear a constant list") - def index(self, item): - return self._x.index(item) + def index(self, + item: T, + start: int = 0, + stop: Optional[int] = None) -> int: + return self._x.index(item, start, + stop if stop is not None else len(self._x)) @overload - def __getitem__(self, item) -> T: + def __getitem__(self, item: int) -> T: ... @overload def __getitem__(self, s: slice, /) -> List[T]: ... - def __getitem__(self, item): + def __getitem__(self, item: Union[int, slice]) -> Union[T, List[T]]: return self._x[item] @overload - def __setitem__(self, item, value): + def __setitem__(self, item: int, value: T): ... @overload - def __setitem__(self, s: slice, value, /): + def __setitem__(self, s: slice, value: T, /): ... - def __setitem__(self, item, value): + def __setitem__(self, item: Union[int, slice], value: Union[T, List[T]]): raise Exception("Cannot set item in a constant list") def __delitem__(self, item): @@ -62,3 +76,53 @@ def __contains__(self, item): def __len__(self): return len(self._x) + + +@contextmanager +def make_zmq_socket( + path: str, + type: Any) -> Iterator[zmq.Socket]: # type: ignore[name-defined] + """Context manager for a ZMQ socket""" + + ctx = zmq.Context() # type: ignore[attr-defined] + try: + socket = ctx.socket(type) + + if type == zmq.constants.PULL: + socket.connect(path) + elif type == zmq.constants.PUSH: + socket.bind(path) + else: + raise ValueError(f"Unknown Socket Type: {type}") + + yield socket + + except KeyboardInterrupt: + logger.debug("Worker had Keyboard Interrupt.") + + finally: + ctx.destroy(linger=0) + + +K = TypeVar('K') +V = TypeVar('V') + + +class LRUDictCache(Generic[K, V]): + + def __init__(self, size: int): + self.cache: OrderedDict[K, V] = OrderedDict() + self.size = size + + def get(self, key: K, default=None) -> V: + if key not in self.cache: + return default + + self.cache.move_to_end(key) + return self.cache[key] + + def put(self, key: K, value: V): + self.cache[key] = value + self.cache.move_to_end(key) + if len(self.cache) > self.size: + self.cache.popitem(last=False) diff --git a/vllm/v1/worker/gpu_input_batch.py b/vllm/v1/worker/gpu_input_batch.py new file mode 100644 index 0000000000000..5c113c74778df --- /dev/null +++ b/vllm/v1/worker/gpu_input_batch.py @@ -0,0 +1,295 @@ +# Datastructures defining an input batch + +from dataclasses import dataclass +from typing import TYPE_CHECKING, Dict, List, Optional, Set + +import numpy as np +import torch + +from vllm.multimodal import MultiModalKwargs +from vllm.sampling_params import SamplingParams, SamplingType +from vllm.v1.sample.metadata import SamplingMetadata + +if TYPE_CHECKING: + from vllm.multimodal.inputs import PlaceholderRange + + +@dataclass +class CachedRequestState: + + req_id: str + prompt_token_ids: List[int] + prompt: Optional[str] + mm_inputs: List[MultiModalKwargs] + mm_positions: List["PlaceholderRange"] + sampling_params: SamplingParams + generator: Optional[torch.Generator] + + block_ids: List[int] + num_computed_tokens: int + output_token_ids: List[int] + + @property + def num_tokens(self) -> int: + return len(self.prompt_token_ids) + len(self.output_token_ids) + + +class InputBatch: + + def __init__( + self, + max_num_reqs: int, + max_model_len: int, + max_num_blocks_per_req: int, + device: torch.device, + pin_memory: bool, + ): + self.max_num_reqs = max_num_reqs + self.max_model_len = max_model_len + self.max_num_blocks_per_req = max_num_blocks_per_req + self.device = device + self.pin_memory = pin_memory + + self.req_ids: List[Optional[str]] = [None] * max_num_reqs + self.req_id_to_index: Dict[str, int] = {} + + # TODO(woosuk): This buffer could be too large if max_model_len is big. + # Find a way to reduce the CPU memory usage. + self.token_ids_cpu_tensor = torch.zeros( + (max_num_reqs, max_model_len), + device="cpu", + dtype=torch.int32, + pin_memory=pin_memory, + ) + self.token_ids_cpu = self.token_ids_cpu_tensor.numpy() + self.num_computed_tokens_cpu = np.empty(max_num_reqs, dtype=np.int32) + + # Attention-related. + self.block_table = torch.zeros( + (max_num_reqs, max_num_blocks_per_req), + device=self.device, + dtype=torch.int32, + ) + self.block_table_cpu_tensor = torch.zeros( + (max_num_reqs, max_num_blocks_per_req), + device="cpu", + dtype=torch.int32, + pin_memory=pin_memory, + ) + self.block_table_cpu = self.block_table_cpu_tensor.numpy() + + # Sampling-related. + self.temperature = torch.empty((max_num_reqs, ), + dtype=torch.float32, + device=device) + self.temperature_cpu_tensor = torch.empty((max_num_reqs, ), + dtype=torch.float32, + device="cpu", + pin_memory=pin_memory) + self.temperature_cpu = self.temperature_cpu_tensor.numpy() + self.greedy_reqs: Set[str] = set() + self.random_reqs: Set[str] = set() + + self.top_p = torch.empty((max_num_reqs, ), + dtype=torch.float32, + device=device) + self.top_p_cpu_tensor = torch.empty((max_num_reqs, ), + dtype=torch.float32, + device="cpu", + pin_memory=pin_memory) + self.top_p_cpu = self.top_p_cpu_tensor.numpy() + self.top_p_reqs: Set[str] = set() + + self.top_k = torch.empty((max_num_reqs, ), + dtype=torch.int32, + device=device) + self.top_k_cpu_tensor = torch.empty((max_num_reqs, ), + dtype=torch.int32, + device="cpu", + pin_memory=pin_memory) + self.top_k_cpu = self.top_k_cpu_tensor.numpy() + self.top_k_reqs: Set[str] = set() + + # req_index -> generator + # NOTE(woosuk): The indices of the requests that do not have their own + # generator should not be included in the dictionary. + self.generators: Dict[int, torch.Generator] = {} + + self.num_logprobs: Dict[str, int] = {} + self.prompt_logprob_reqs: Set[str] = set() + + def add_request( + self, + request: "CachedRequestState", + req_index: Optional[int] = None, + ) -> None: + if req_index is None: + req_index = self.num_reqs + assert req_index < self.max_num_reqs + + req_id = request.req_id + self.req_ids[req_index] = req_id + self.req_id_to_index[req_id] = req_index + + # Copy the prompt token ids and output token ids. + num_prompt_tokens = len(request.prompt_token_ids) + self.token_ids_cpu[ + req_index, :num_prompt_tokens] = request.prompt_token_ids + start_idx = num_prompt_tokens + end_idx = start_idx + len(request.output_token_ids) + self.token_ids_cpu[req_index, + start_idx:end_idx] = request.output_token_ids + + self.num_computed_tokens_cpu[req_index] = request.num_computed_tokens + num_blocks = len(request.block_ids) + self.block_table_cpu[req_index, :num_blocks] = request.block_ids + + sampling_params = request.sampling_params + self.temperature_cpu[req_index] = sampling_params.temperature + if sampling_params.sampling_type == SamplingType.GREEDY: + self.greedy_reqs.add(req_id) + else: + self.random_reqs.add(req_id) + + self.top_p_cpu[req_index] = sampling_params.top_p + if sampling_params.top_p < 1: + self.top_p_reqs.add(req_id) + self.top_k_cpu[req_index] = sampling_params.top_k + if sampling_params.top_k > 0: + self.top_k_reqs.add(req_id) + + # NOTE(woosuk): self.generators should not include the requests that + # do not have their own generator. + if request.generator is not None: + self.generators[req_index] = request.generator + + num_logprobs = sampling_params.logprobs + if num_logprobs is not None and num_logprobs > 0: + self.num_logprobs[req_id] = num_logprobs + if sampling_params.prompt_logprobs: + self.prompt_logprob_reqs.add(req_id) + + def remove_request(self, req_id: str) -> Optional[int]: + req_index = self.req_id_to_index.pop(req_id, None) + if req_index is None: + return None + self.req_ids[req_index] = None + + self.greedy_reqs.discard(req_id) + self.random_reqs.discard(req_id) + self.top_p_reqs.discard(req_id) + self.top_k_reqs.discard(req_id) + self.generators.pop(req_index, None) + self.num_logprobs.pop(req_id, None) + self.prompt_logprob_reqs.discard(req_id) + return req_index + + def clear(self) -> None: + self.req_ids = [None] * self.max_num_reqs + self.req_id_to_index.clear() + self.greedy_reqs.clear() + self.random_reqs.clear() + self.top_p_reqs.clear() + self.top_k_reqs.clear() + self.generators.clear() + self.num_logprobs.clear() + self.prompt_logprob_reqs.clear() + + def condense(self, empty_req_indices: List[int]) -> None: + if self.num_reqs == 0: + # The batched states are empty. + return + + # NOTE(woosuk): This function assumes that the empty_req_indices + # is sorted in descending order. + last_req_index = self.num_reqs + len(empty_req_indices) - 1 + while empty_req_indices: + # Find the largest non-empty index. + while last_req_index in empty_req_indices: + last_req_index -= 1 + + # Find the smallest empty index. + empty_index = empty_req_indices.pop() + if empty_index >= last_req_index: + break + + # Swap the states. + req_id = self.req_ids[last_req_index] + assert req_id is not None + self.req_ids[empty_index] = req_id + self.req_ids[last_req_index] = None + self.req_id_to_index[req_id] = empty_index + + # TODO(woosuk): Optimize the copy of token_ids_cpu and + # block_table_cpu. + self.token_ids_cpu[empty_index] = self.token_ids_cpu[ + last_req_index] + self.num_computed_tokens_cpu[ + empty_index] = self.num_computed_tokens_cpu[last_req_index] + self.block_table_cpu[empty_index] = self.block_table_cpu[ + last_req_index] + self.temperature_cpu[empty_index] = self.temperature_cpu[ + last_req_index] + self.top_p_cpu[empty_index] = self.top_p_cpu[last_req_index] + self.top_k_cpu[empty_index] = self.top_k_cpu[last_req_index] + generator = self.generators.pop(last_req_index, None) + if generator is not None: + self.generators[empty_index] = generator + + # Decrement last_req_index since it is now empty. + last_req_index -= 1 + + def make_sampling_metadata( + self, + skip_copy: bool = False, + ) -> SamplingMetadata: + if not skip_copy: + self.temperature[:self.num_reqs].copy_( + self.temperature_cpu_tensor[:self.num_reqs], non_blocking=True) + self.top_p[:self.num_reqs].copy_( + self.top_p_cpu_tensor[:self.num_reqs], non_blocking=True) + self.top_k[:self.num_reqs].copy_( + self.top_k_cpu_tensor[:self.num_reqs], non_blocking=True) + return SamplingMetadata( + temperature=self.temperature[:self.num_reqs], + all_greedy=self.all_greedy, + all_random=self.all_random, + top_p=self.top_p[:self.num_reqs], + top_k=self.top_k[:self.num_reqs], + no_top_p=self.no_top_p, + no_top_k=self.no_top_k, + generators=self.generators, + max_num_logprobs=self.max_num_logprobs, + ) + + @property + def num_reqs(self) -> int: + return len(self.req_id_to_index) + + @property + def all_greedy(self) -> bool: + return len(self.random_reqs) == 0 + + @property + def all_random(self) -> bool: + return len(self.greedy_reqs) == 0 + + @property + def no_top_p(self) -> bool: + return len(self.top_p_reqs) == 0 + + @property + def no_top_k(self) -> bool: + return len(self.top_k_reqs) == 0 + + @property + def max_num_logprobs(self) -> int: + return max(self.num_logprobs.values()) if self.num_logprobs else 0 + + @property + def no_logprob(self) -> bool: + return len(self.num_logprobs) == 0 + + @property + def no_prompt_logprob(self) -> bool: + return len(self.prompt_logprob_reqs) == 0 diff --git a/vllm/v1/worker/gpu_model_runner.py b/vllm/v1/worker/gpu_model_runner.py index 1fa47f553dfd6..c6fab5f05fcb3 100644 --- a/vllm/v1/worker/gpu_model_runner.py +++ b/vllm/v1/worker/gpu_model_runner.py @@ -1,31 +1,30 @@ import gc import time -from dataclasses import dataclass -from typing import TYPE_CHECKING, Dict, List, Optional, Set, Tuple +from typing import TYPE_CHECKING, Dict, List, Tuple, cast import numpy as np import torch import torch.distributed import torch.nn as nn -from vllm.compilation.compile_context import set_compile_context from vllm.config import CompilationLevel, VllmConfig from vllm.distributed.parallel_state import graph_capture from vllm.forward_context import set_forward_context -from vllm.inputs import INPUT_REGISTRY, InputRegistry +from vllm.inputs import INPUT_REGISTRY from vllm.logger import init_logger from vllm.model_executor.model_loader import get_model -from vllm.multimodal import MultiModalKwargs -from vllm.sampling_params import SamplingParams, SamplingType -from vllm.utils import (STR_DTYPE_TO_TORCH_DTYPE, DeviceMemoryProfiler, cdiv, - is_pin_memory_available) +from vllm.multimodal import MULTIMODAL_REGISTRY, MultiModalKwargs +from vllm.sampling_params import SamplingType +from vllm.utils import (STR_DTYPE_TO_TORCH_DTYPE, DeviceMemoryProfiler, + LayerBlockType, cdiv, is_pin_memory_available) from vllm.v1.attention.backends.flash_attn import (FlashAttentionBackend, FlashAttentionMetadata) +from vllm.v1.engine.mm_input_mapper import MMInputMapperClient from vllm.v1.outputs import ModelRunnerOutput from vllm.v1.sample.metadata import SamplingMetadata +from vllm.v1.worker.gpu_input_batch import CachedRequestState, InputBatch if TYPE_CHECKING: - from vllm.multimodal.inputs import PlaceholderRange from vllm.v1.core.scheduler import SchedulerOutput logger = init_logger(__name__) @@ -36,7 +35,7 @@ class GPUModelRunner: def __init__( self, vllm_config: VllmConfig, - input_registry: InputRegistry = INPUT_REGISTRY, + device: torch.device, ): self.vllm_config = vllm_config self.model_config = vllm_config.model_config @@ -45,7 +44,6 @@ def __init__( self.load_config = vllm_config.load_config self.parallel_config = vllm_config.parallel_config self.scheduler_config = vllm_config.scheduler_config - self.device_config = vllm_config.device_config self.speculative_config = vllm_config.speculative_config self.prompt_adapter_config = vllm_config.prompt_adapter_config self.observability_config = vllm_config.observability_config @@ -54,7 +52,7 @@ def __init__( cache_config = self.cache_config scheduler_config = self.scheduler_config parallel_config = self.parallel_config - self.device = self.device_config.device + self.device = device self.pin_memory = is_pin_memory_available() self.dtype = self.model_config.dtype if cache_config.cache_dtype == "auto": @@ -63,21 +61,28 @@ def __init__( self.kv_cache_dtype = STR_DTYPE_TO_TORCH_DTYPE[ cache_config.cache_dtype] + self.is_multimodal_model = model_config.is_multimodal_model self.sliding_window = model_config.get_sliding_window() self.block_size = cache_config.block_size self.max_model_len = model_config.max_model_len self.max_num_blocks_per_req = cdiv(self.max_model_len, self.block_size) self.max_num_tokens = scheduler_config.max_num_batched_tokens + self.max_num_reqs = scheduler_config.max_num_seqs # Model-related. - self.num_attn_layers = model_config.get_num_attention_layers( - parallel_config) + self.num_attn_layers = model_config.get_num_layers_by_block_type( + parallel_config, LayerBlockType.attention) self.num_kv_heads = model_config.get_num_kv_heads(parallel_config) self.head_size = model_config.get_head_size() self.hidden_size = model_config.get_hidden_size() # Multi-modal data support - self.input_registry = input_registry + self.input_registry = INPUT_REGISTRY + self.mm_registry = MULTIMODAL_REGISTRY + # NOTE: mm_input_mapper is only used for memory profiling. + self.mm_input_mapper = MMInputMapperClient(self.model_config) + self.max_num_encoder_input_tokens = self.scheduler_config.max_num_encoder_input_tokens # noqa: E501 + self.encoder_cache_size = self.scheduler_config.encoder_cache_size # Lazy initialization # self.model: nn.Module # Set after load_model @@ -89,7 +94,7 @@ def __init__( self.requests: Dict[str, CachedRequestState] = {} # Persistent batch. self.input_batch = InputBatch( - max_num_reqs=self.scheduler_config.max_num_seqs, + max_num_reqs=self.max_num_reqs, max_model_len=self.max_model_len, max_num_blocks_per_req=self.max_num_blocks_per_req, device=self.device, @@ -100,7 +105,16 @@ def __init__( == CompilationLevel.PIECEWISE and not self.model_config.enforce_eager) # TODO(woosuk): Provide an option to tune the max cudagraph batch size. - self.cudagraph_batch_sizes = [1, 2, 4] + [i for i in range(8, 513, 8)] + # The convention is different. + # self.cudagraph_batch_sizes sorts in ascending order. + # The batch sizes in the config are in descending order. + self.cudagraph_batch_sizes = list( + reversed(self.vllm_config.compilation_config.capture_sizes)) + + # Persistent buffers for CUDA graphs. + self.input_ids = torch.zeros(self.max_num_tokens, + dtype=torch.int32, + device=self.device) self.positions = torch.zeros(self.max_num_tokens, dtype=torch.int64, device=self.device) @@ -109,6 +123,38 @@ def __init__( dtype=self.dtype, device=self.device) + # OPTIMIZATION: Cache the tensors rather than creating them every step. + self.arange_np = np.arange(max(self.max_num_reqs, self.max_model_len), + dtype=np.int32) + # NOTE(woosuk): These tensors are "stateless", i.e., they are literally + # a faster version of creating a new tensor every time. Thus, we should + # not make any assumptions about the values in these tensors. + self.input_ids_cpu = torch.zeros(self.max_num_tokens, + dtype=torch.int32, + device="cpu", + pin_memory=self.pin_memory) + self.input_ids_np = self.input_ids_cpu.numpy() + self.positions_cpu = torch.zeros(self.max_num_tokens, + dtype=torch.int64, + device="cpu", + pin_memory=self.pin_memory) + self.positions_np = self.positions_cpu.numpy() + self.slot_mapping_cpu = torch.zeros(self.max_num_tokens, + dtype=torch.int32, + device="cpu", + pin_memory=self.pin_memory) + self.slot_mapping_np = self.slot_mapping_cpu.numpy() + self.query_start_loc_cpu = torch.zeros(self.max_num_reqs + 1, + dtype=torch.int32, + device="cpu", + pin_memory=self.pin_memory) + self.query_start_loc_np = self.query_start_loc_cpu.numpy() + self.seq_start_loc_cpu = torch.zeros(self.max_num_reqs + 1, + dtype=torch.int32, + device="cpu", + pin_memory=self.pin_memory) + self.seq_start_loc_np = self.seq_start_loc_cpu.numpy() + def _update_states(self, scheduler_output: "SchedulerOutput") -> None: # Remove stopped requests from the cached states. # Keep the states of the pre-empted requests. @@ -158,9 +204,9 @@ def _update_states(self, scheduler_output: "SchedulerOutput") -> None: req_ids_to_add: List[str] = [] # Add new requests to the cached states. - for req_data in scheduler_output.scheduled_new_reqs: - req_id = req_data.req_id - sampling_params = req_data.sampling_params + for new_req_data in scheduler_output.scheduled_new_reqs: + req_id = new_req_data.req_id + sampling_params = new_req_data.sampling_params if sampling_params.sampling_type == SamplingType.RANDOM_SEED: generator = torch.Generator(device=self.device) generator.manual_seed(sampling_params.seed) @@ -169,25 +215,25 @@ def _update_states(self, scheduler_output: "SchedulerOutput") -> None: self.requests[req_id] = CachedRequestState( req_id=req_id, - prompt_token_ids=req_data.prompt_token_ids, - prompt=req_data.prompt, - mm_inputs=req_data.mm_inputs, - mm_positions=req_data.mm_positions, + prompt_token_ids=new_req_data.prompt_token_ids, + prompt=new_req_data.prompt, + mm_inputs=new_req_data.mm_inputs, + mm_positions=new_req_data.mm_positions, sampling_params=sampling_params, generator=generator, - block_ids=req_data.block_ids, - num_computed_tokens=req_data.num_computed_tokens, + block_ids=new_req_data.block_ids, + num_computed_tokens=new_req_data.num_computed_tokens, output_token_ids=[], ) req_ids_to_add.append(req_id) # Update the cached states of the resumed requests. - for req_data in scheduler_output.scheduled_resumed_reqs: - req_id = req_data.req_id + for res_req_data in scheduler_output.scheduled_resumed_reqs: + req_id = res_req_data.req_id req_state = self.requests[req_id] - req_state.block_ids = req_data.block_ids - req_state.num_computed_tokens = req_data.num_computed_tokens + req_state.block_ids = res_req_data.block_ids + req_state.num_computed_tokens = res_req_data.num_computed_tokens req_ids_to_add.append(req_id) # Add the new or resumed requests to the persistent batch. @@ -224,6 +270,7 @@ def _prepare_inputs(self, scheduler_output: "SchedulerOutput"): num_scheduled_tokens = [] max_num_scheduled_tokens = 0 for req_id in self.input_batch.req_ids[:num_reqs]: + assert req_id is not None num_tokens = scheduler_output.num_scheduled_tokens[req_id] num_scheduled_tokens.append(num_tokens) max_num_scheduled_tokens = max(max_num_scheduled_tokens, @@ -233,22 +280,16 @@ def _prepare_inputs(self, scheduler_output: "SchedulerOutput"): # Get request indices. # E.g., [2, 5, 3] -> [0, 0, 1, 1, 1, 1, 1, 2, 2, 2] - indices = np.arange(num_reqs) - req_indices = np.repeat(indices, num_scheduled_tokens) + req_indices = np.repeat(self.arange_np[:num_reqs], + num_scheduled_tokens) # Get batched arange. # E.g., [2, 5, 3] -> [0, 1, 0, 1, 2, 3, 4, 0, 1, 2] - arange_matrix = np.tile(np.arange(max_num_scheduled_tokens), - (num_reqs, 1)) - mask = arange_matrix < num_scheduled_tokens[:, np.newaxis] - arange = arange_matrix[mask] + arange = np.concatenate( + [self.arange_np[:n] for n in num_scheduled_tokens]) # Get positions. - positions = torch.empty((total_num_scheduled_tokens, ), - dtype=torch.int32, - device="cpu", - pin_memory=self.pin_memory) - positions_np = positions.numpy() + positions_np = self.positions_np[:total_num_scheduled_tokens] np.add(self.input_batch.num_computed_tokens_cpu[req_indices], arange, out=positions_np) @@ -257,56 +298,56 @@ def _prepare_inputs(self, scheduler_output: "SchedulerOutput"): # E.g., [0, 1, 0, 1, 2, 3, 4, 0, 1, 2] # -> [0, 1, M, M + 1, M + 2, M + 3, M + 4, 2 * M, 2 * M + 1, 2 * M + 2] # where M is the max_model_len. - token_indices = positions_np + req_indices * self.max_model_len - token_indices = torch.from_numpy(token_indices) - input_ids = torch.empty((total_num_scheduled_tokens, ), - dtype=torch.int32, - device="cpu", - pin_memory=self.pin_memory) - torch.index_select(torch.from_numpy( - self.input_batch.token_ids_cpu).flatten(), + token_indices = (positions_np + + req_indices * self.input_batch.token_ids_cpu.shape[1]) + # NOTE(woosuk): We use torch.index_select instead of np.take here + # because torch.index_select is much faster than np.take for large + # tensors. + torch.index_select(self.input_batch.token_ids_cpu_tensor.flatten(), 0, - token_indices, - out=input_ids) + torch.from_numpy(token_indices), + out=self.input_ids_cpu[:total_num_scheduled_tokens]) # Calculate the slot mapping. - block_numbers = self.input_batch.block_table_cpu_tensor.flatten()[ - token_indices // self.block_size] - block_offsets = token_indices % self.block_size - slot_mapping = torch.empty((total_num_scheduled_tokens, ), - dtype=torch.int32, - device="cpu", - pin_memory=self.pin_memory) - torch.add(block_numbers * self.block_size, - block_offsets, - out=slot_mapping) + # E.g., [0, 1, 0, 1, 2, 3, 4, 0, 1, 2] + # -> [0, 0, K, K, K + 1, K + 1, K + 2, 2 * K, 2 * K, 2 * K + 1] + # where K is the max_num_blocks_per_req and the block size is 2. + # NOTE(woosuk): We can't simply use `token_indices // block_size` here + # because M (max_model_len) is not necessarily divisible by block_size. + block_table_indices = (req_indices * self.max_num_blocks_per_req + + positions_np // self.block_size) + # NOTE(woosuk): We use torch.index_select instead of np.take here + # because torch.index_select is much faster than np.take for large + # tensors. + block_numbers = (self.input_batch.block_table_cpu_tensor.flatten() + [block_table_indices].numpy()) + block_offsets = positions_np % self.block_size + np.add(block_numbers * self.block_size, + block_offsets, + out=self.slot_mapping_np[:total_num_scheduled_tokens]) # Prepare the attention metadata. - query_start_loc = torch.empty((num_reqs + 1, ), - dtype=torch.int32, - device="cpu", - pin_memory=self.pin_memory) - query_start_loc_np = query_start_loc.numpy() - query_start_loc_np[0] = 0 - np.cumsum(num_scheduled_tokens, out=query_start_loc_np[1:]) + self.query_start_loc_np[0] = 0 + np.cumsum(num_scheduled_tokens, + out=self.query_start_loc_np[1:num_reqs + 1]) seq_lens = (self.input_batch.num_computed_tokens_cpu[:num_reqs] + num_scheduled_tokens) max_seq_len = seq_lens.max() - seq_start_loc = torch.empty((num_reqs + 1, ), - dtype=torch.int32, - device="cpu", - pin_memory=self.pin_memory) - seq_start_loc_np = seq_start_loc.numpy() - seq_start_loc_np[0] = 0 - np.cumsum(seq_lens, out=seq_start_loc_np[1:]) - - input_ids = input_ids.to(self.device, non_blocking=True) - self.positions[:total_num_scheduled_tokens].copy_(positions, - non_blocking=True) - query_start_loc = query_start_loc.to(self.device, non_blocking=True) - seq_start_loc = seq_start_loc.to(self.device, non_blocking=True) - slot_mapping = slot_mapping.to(self.device, non_blocking=True).long() + self.seq_start_loc_np[0] = 0 + np.cumsum(seq_lens, out=self.seq_start_loc_np[1:num_reqs + 1]) + + # Copy the tensors to the GPU. + self.input_ids[:total_num_scheduled_tokens].copy_( + self.input_ids_cpu[:total_num_scheduled_tokens], non_blocking=True) + self.positions[:total_num_scheduled_tokens].copy_( + self.positions_cpu[:total_num_scheduled_tokens], non_blocking=True) + query_start_loc = self.query_start_loc_cpu[:num_reqs + 1].to( + self.device, non_blocking=True) + seq_start_loc = self.seq_start_loc_cpu[:num_reqs + 1].to( + self.device, non_blocking=True) + slot_mapping = self.slot_mapping_cpu[:total_num_scheduled_tokens].to( + self.device, non_blocking=True).long() attn_metadata = FlashAttentionMetadata( num_actual_tokens=total_num_scheduled_tokens, max_query_len=max_num_scheduled_tokens, @@ -322,7 +363,7 @@ def _prepare_inputs(self, scheduler_output: "SchedulerOutput"): # token from the partial request. # TODO: Support prompt logprobs. logits_indices = query_start_loc[1:] - 1 - return input_ids, attn_metadata, logits_indices + return attn_metadata, logits_indices def _prepare_sampling( self, @@ -346,7 +387,7 @@ def _execute_encoder(self, scheduler_output: "SchedulerOutput"): # Batch the multi-modal inputs. mm_inputs: List[MultiModalKwargs] = [] - req_input_ids: List[Tuple[int, int]] = [] + req_input_ids: List[Tuple[str, int]] = [] for req_id, encoder_input_ids in scheduled_encoder_inputs.items(): req_state = self.requests[req_id] for input_id in encoder_input_ids: @@ -379,6 +420,7 @@ def _gather_encoder_outputs( encoder_outputs: List[torch.Tensor] = [] num_reqs = self.input_batch.num_reqs for req_id in self.input_batch.req_ids[:num_reqs]: + assert req_id is not None num_scheduled_tokens = scheduler_output.num_scheduled_tokens[ req_id] req_state = self.requests[req_id] @@ -418,44 +460,58 @@ def execute_model( ) -> ModelRunnerOutput: self._update_states(scheduler_output) - # Run the encoder. - self._execute_encoder(scheduler_output) - encoder_outputs = self._gather_encoder_outputs(scheduler_output) + if self.is_multimodal_model: + # Run the multimodal encoder if any. + self._execute_encoder(scheduler_output) + encoder_outputs = self._gather_encoder_outputs(scheduler_output) + else: + encoder_outputs = [] # Prepare the decoder inputs. - input_ids, attn_metadata, logits_indices = self._prepare_inputs( - scheduler_output) + attn_metadata, logits_indices = self._prepare_inputs(scheduler_output) num_scheduled_tokens = scheduler_output.total_num_scheduled_tokens if (self.use_cuda_graph and num_scheduled_tokens <= self.cudagraph_batch_sizes[-1]): # Use piecewise CUDA graphs. # Add padding to the batch size. - num_input_tokens = self._get_padded_batch_size( + num_input_tokens = self.vllm_config.pad_for_cudagraph( num_scheduled_tokens) else: # Eager mode. num_input_tokens = num_scheduled_tokens - - # Get the inputs embeds. - if encoder_outputs: - inputs_embeds = self.model.get_input_embeddings( - input_ids, encoder_outputs) + attn_metadata.num_input_tokens = num_input_tokens + + if self.is_multimodal_model: + # NOTE(woosuk): To unify token ids and soft tokens (vision + # embeddings), we always use embeddings (rather than token ids) + # as input to the multimodal model, even when the input is text. + input_ids = self.input_ids[:num_scheduled_tokens] + if encoder_outputs: + inputs_embeds = self.model.get_input_embeddings( + input_ids, encoder_outputs) + else: + inputs_embeds = self.model.get_input_embeddings(input_ids) + # TODO(woosuk): Avoid the copy. Optimize. + self.inputs_embeds[:num_scheduled_tokens].copy_(inputs_embeds) + inputs_embeds = self.inputs_embeds[:num_input_tokens] + input_ids = None else: - inputs_embeds = self.model.get_input_embeddings(input_ids) - # NOTE(woosuk): To unify token ids and soft tokens (vision embeddings), - # always use embeddings (rather than token ids) as input to the model. - # TODO(woosuk): Avoid the copy. Optimize. - self.inputs_embeds[:num_scheduled_tokens].copy_(inputs_embeds) + # For text-only models, we use token ids as input. + # While it is possible to use embeddings as input just like the + # multimodal models, it is not desirable for performance since + # then the embedding layer is not included in the CUDA graph. + input_ids = self.input_ids[:num_input_tokens] + inputs_embeds = None # Run the decoder. # Use persistent buffers for CUDA graphs. with set_forward_context(attn_metadata, self.vllm_config): hidden_states = self.model( - input_ids=None, + input_ids=input_ids, positions=self.positions[:num_input_tokens], kv_caches=self.kv_caches, attn_metadata=None, - inputs_embeds=self.inputs_embeds[:num_input_tokens], + inputs_embeds=inputs_embeds, ) hidden_states = hidden_states[:num_scheduled_tokens] hidden_states = hidden_states[logits_indices] @@ -468,20 +524,19 @@ def execute_model( sampling_metadata=sampling_metadata, ) - # NOTE: CPU-GPU synchronization happens here. - sampled_token_ids = sampler_output.sampled_token_ids.cpu() - sampled_token_ids_list = sampled_token_ids.tolist() + sampled_token_ids = sampler_output.sampled_token_ids # TODO(woosuk): The following loop can be slow since it iterates over # the requests one by one. Optimize. num_reqs = self.input_batch.num_reqs for i, req_id in enumerate(self.input_batch.req_ids[:num_reqs]): + assert req_id is not None req_state = self.requests[req_id] seq_len = (req_state.num_computed_tokens + scheduler_output.num_scheduled_tokens[req_id]) assert seq_len <= req_state.num_tokens if seq_len == req_state.num_tokens: # Append the sampled token to the output token ids. - token_id = sampled_token_ids_list[i] + token_id = sampled_token_ids[i] self.input_batch.token_ids_cpu[i, seq_len] = token_id req_state.output_token_ids.append(token_id) else: @@ -500,10 +555,17 @@ def execute_model( logprobs = None else: logprobs = sampler_output.logprobs.cpu() + + # num_reqs entries should be non-None + assert all( + req_id is not None for req_id in + self.input_batch.req_ids[:num_reqs]), "req_ids contains None" + req_ids = cast(List[str], self.input_batch.req_ids[:num_reqs]) + model_runner_output = ModelRunnerOutput( - req_ids=self.input_batch.req_ids[:num_reqs], + req_ids=req_ids, req_id_to_index=self.input_batch.req_id_to_index, - sampled_token_ids_cpu=sampled_token_ids, + sampled_token_ids=sampled_token_ids, logprob_token_ids_cpu=logprob_token_ids, logprobs_cpu=logprobs, ) @@ -525,18 +587,23 @@ def _dummy_run( num_tokens: int, kv_caches: List[torch.Tensor], ) -> torch.Tensor: + if self.is_multimodal_model: + input_ids = None + inputs_embeds = self.inputs_embeds[:num_tokens] + else: + input_ids = self.input_ids[:num_tokens] + inputs_embeds = None with set_forward_context(None, self.vllm_config): hidden_states = model( - input_ids=None, + input_ids=input_ids, positions=self.positions[:num_tokens], kv_caches=kv_caches, attn_metadata=None, - inputs_embeds=self.inputs_embeds[:num_tokens]) + inputs_embeds=inputs_embeds, + ) return hidden_states def profile_run(self) -> None: - # TODO(woosuk): Profile the max memory usage of the encoder and - # the encoder cache. # use an empty tensor instead of `None`` to force Dynamo to pass # it by reference, rather by specializing on the value `None`. # the `dtype` argument does not matter, and we use `float32` as @@ -548,15 +615,66 @@ def profile_run(self) -> None: torch.tensor([], dtype=torch.float32, device=self.device) for _ in range(self.num_attn_layers) ] - with set_compile_context(self.cudagraph_batch_sizes): - # Trigger compilation for general shape. - hidden_states = self._dummy_run(self.model, self.max_num_tokens, - dummy_kv_caches) + + # Profile with multimodal encoder & encoder cache. + # TODO (ywang96): generalize this beyond image modality since + # mm_input_mapper only supports image inputs. + if self.is_multimodal_model: + + # Create dummy batch of multimodal inputs. + dummy_request_data = self.input_registry.dummy_data_for_profiling( + model_config=self.model_config, + seq_len=self.max_num_tokens, + mm_registry=self.mm_registry, + ) + dummy_mm_data = dummy_request_data.multi_modal_data + dummy_mm_kwargs, _ = self.mm_input_mapper.process_inputs( + mm_data=dummy_mm_data, + mm_hashes=None, + mm_processor_kwargs=None, + precomputed_mm_inputs=None) + + # NOTE: Currently model is profiled with a single non-text + # modality even when it supports multiple. + max_tokens_per_mm_item = max( + self.mm_registry.get_max_tokens_per_item_by_modality( + self.model_config).values()) + + max_num_mm_items = min( + self.max_num_encoder_input_tokens, + self.encoder_cache_size) // max_tokens_per_mm_item + + # Dummy data definition in V0 may contain multiple multimodal items + # (e.g, multiple images) for a single request, therefore here we + # always replicate first item by max_num_mm_items times since in V1 + # they are scheduled to be processed separately. + batched_dummy_mm_inputs = MultiModalKwargs.batch( + [dummy_mm_kwargs[0]] * max_num_mm_items) + batched_dummy_mm_inputs = MultiModalKwargs.as_kwargs( + batched_dummy_mm_inputs, device=self.device) + + # Run multimodal encoder. + dummy_encoder_outputs = self.model.get_multimodal_embeddings( + **batched_dummy_mm_inputs) + assert len(dummy_encoder_outputs) == max_num_mm_items, ( + "Expected dimension 0 of encoder outputs to match the number " + f"of multimodal data items: {max_num_mm_items}, got " + f"{len(dummy_encoder_outputs)=} instead. This is most likely " + "due to the 'get_multimodal_embeddings' method of the model " + "not implemented correctly.") + + # Cache the dummy encoder outputs. + self.encoder_cache["tmp"] = dict(enumerate(dummy_encoder_outputs)) + + # Trigger compilation for general shape. + hidden_states = self._dummy_run(self.model, self.max_num_tokens, + dummy_kv_caches) logits = self.model.compute_logits(hidden_states, None) logits = logits[:self.max_num_tokens] # TODO(woosuk): Consider the memory usage of the sampler. torch.cuda.synchronize() del hidden_states, logits + self.encoder_cache.clear() gc.collect() def capture_model(self) -> None: @@ -574,6 +692,9 @@ def capture_model(self) -> None: # can reuse the memory pool allocated for the large shapes. with graph_capture(): for num_tokens in reversed(self.cudagraph_batch_sizes): + for _ in range(self.vllm_config.compilation_config. + cudagraph_num_of_warmups): + self._dummy_run(self.model, num_tokens, self.kv_caches) self._dummy_run(self.model, num_tokens, self.kv_caches) end_time = time.perf_counter() @@ -593,276 +714,3 @@ def initialize_kv_cache(self, num_blocks: int) -> None: torch.zeros(kv_cache_shape, dtype=self.kv_cache_dtype, device=self.device)) - - def _get_padded_batch_size(self, batch_size: int) -> Optional[int]: - # TODO: Optimize this? - for size in self.cudagraph_batch_sizes: - if batch_size <= size: - return size - return None - - -@dataclass -class CachedRequestState: - - req_id: str - prompt_token_ids: List[int] - prompt: Optional[str] - mm_inputs: List[MultiModalKwargs] - mm_positions: List["PlaceholderRange"] - sampling_params: SamplingParams - generator: Optional[torch.Generator] - - block_ids: List[int] - num_computed_tokens: int - output_token_ids: List[int] - - @property - def num_tokens(self) -> int: - return len(self.prompt_token_ids) + len(self.output_token_ids) - - -class InputBatch: - - def __init__( - self, - max_num_reqs: int, - max_model_len: int, - max_num_blocks_per_req: int, - device: torch.device, - pin_memory: bool, - ): - self.max_num_reqs = max_num_reqs - self.max_model_len = max_model_len - self.max_num_blocks_per_req = max_num_blocks_per_req - self.device = device - self.pin_memory = pin_memory - - self.req_ids: List[Optional[str]] = [None] * max_num_reqs - self.req_id_to_index: Dict[str, int] = {} - - self.token_ids_cpu = np.empty((max_num_reqs, max_model_len), - dtype=np.int32) - self.num_computed_tokens_cpu = np.empty(max_num_reqs, dtype=np.int32) - - # Attention-related. - self.block_table = torch.zeros((max_num_reqs, max_num_blocks_per_req), - device=self.device, - dtype=torch.int32) - self.block_table_cpu_tensor = torch.zeros( - (max_num_reqs, max_num_blocks_per_req), - device="cpu", - dtype=torch.int32, - pin_memory=pin_memory, - ) - self.block_table_cpu = self.block_table_cpu_tensor.numpy() - - # Sampling-related. - self.temperature = torch.empty((max_num_reqs, ), - dtype=torch.float32, - device=device) - self.temperature_cpu_tensor = torch.empty((max_num_reqs, ), - dtype=torch.float32, - device="cpu", - pin_memory=pin_memory) - self.temperature_cpu = self.temperature_cpu_tensor.numpy() - self.greedy_reqs: Set[str] = set() - self.random_reqs: Set[str] = set() - - self.top_p = torch.empty((max_num_reqs, ), - dtype=torch.float32, - device=device) - self.top_p_cpu_tensor = torch.empty((max_num_reqs, ), - dtype=torch.float32, - device="cpu", - pin_memory=pin_memory) - self.top_p_cpu = self.top_p_cpu_tensor.numpy() - self.top_p_reqs: Set[str] = set() - - self.top_k = torch.empty((max_num_reqs, ), - dtype=torch.int32, - device=device) - self.top_k_cpu_tensor = torch.empty((max_num_reqs, ), - dtype=torch.int32, - device="cpu", - pin_memory=pin_memory) - self.top_k_cpu = self.top_k_cpu_tensor.numpy() - self.top_k_reqs: Set[str] = set() - - # req_index -> generator - self.generators: Dict[int, torch.Generator] = {} - - self.num_logprobs: Dict[str, int] = {} - self.prompt_logprob_reqs: Set[str] = set() - - def add_request( - self, - request: "CachedRequestState", - req_index: Optional[int] = None, - ) -> None: - if req_index is None: - req_index = self.num_reqs - assert req_index < self.max_num_reqs - - req_id = request.req_id - self.req_ids[req_index] = req_id - self.req_id_to_index[req_id] = req_index - - # Copy the prompt token ids and output token ids. - num_prompt_tokens = len(request.prompt_token_ids) - self.token_ids_cpu[ - req_index, :num_prompt_tokens] = request.prompt_token_ids - start_idx = num_prompt_tokens - end_idx = start_idx + len(request.output_token_ids) - self.token_ids_cpu[req_index, - start_idx:end_idx] = request.output_token_ids - - self.num_computed_tokens_cpu[req_index] = request.num_computed_tokens - num_blocks = len(request.block_ids) - self.block_table_cpu[req_index, :num_blocks] = request.block_ids - - sampling_params = request.sampling_params - self.temperature_cpu[req_index] = sampling_params.temperature - if sampling_params.sampling_type == SamplingType.GREEDY: - self.greedy_reqs.add(req_id) - else: - self.random_reqs.add(req_id) - - self.top_p_cpu[req_index] = sampling_params.top_p - if sampling_params.top_p < 1: - self.top_p_reqs.add(req_id) - self.top_k_cpu[req_index] = sampling_params.top_k - if sampling_params.top_k > 0: - self.top_k_reqs.add(req_id) - - self.generators[req_index] = request.generator - - num_logprobs = sampling_params.logprobs - if num_logprobs is not None and num_logprobs > 0: - self.num_logprobs[req_id] = num_logprobs - if sampling_params.prompt_logprobs: - self.prompt_logprob_reqs.add(req_id) - - def remove_request(self, req_id: str) -> Optional[int]: - req_index = self.req_id_to_index.pop(req_id, None) - if req_index is None: - return None - self.req_ids[req_index] = None - - self.greedy_reqs.discard(req_id) - self.random_reqs.discard(req_id) - self.top_p_reqs.discard(req_id) - self.top_k_reqs.discard(req_id) - self.generators.pop(req_index, None) - self.num_logprobs.pop(req_id, None) - self.prompt_logprob_reqs.discard(req_id) - return req_index - - def clear(self) -> None: - self.req_ids = [None] * self.max_num_reqs - self.req_id_to_index.clear() - self.greedy_reqs.clear() - self.random_reqs.clear() - self.top_p_reqs.clear() - self.top_k_reqs.clear() - self.generators.clear() - self.num_logprobs.clear() - self.prompt_logprob_reqs.clear() - - def condense(self, empty_req_indices: List[int]) -> None: - if self.num_reqs == 0: - # The batched states are empty. - return - - # NOTE(woosuk): This function assumes that the empty_req_indices - # is sorted in descending order. - last_req_index = self.num_reqs + len(empty_req_indices) - 1 - while empty_req_indices: - # Find the largest non-empty index. - while last_req_index in empty_req_indices: - last_req_index -= 1 - - # Find the smallest empty index. - empty_index = empty_req_indices.pop() - if empty_index >= last_req_index: - break - - # Swap the states. - req_id = self.req_ids[last_req_index] - self.req_ids[empty_index] = req_id - self.req_ids[last_req_index] = None - self.req_id_to_index[req_id] = empty_index - - # TODO(woosuk): Optimize the copy of token_ids_cpu and - # block_table_cpu. - self.token_ids_cpu[empty_index] = self.token_ids_cpu[ - last_req_index] - self.num_computed_tokens_cpu[ - empty_index] = self.num_computed_tokens_cpu[last_req_index] - self.block_table_cpu[empty_index] = self.block_table_cpu[ - last_req_index] - self.temperature_cpu[empty_index] = self.temperature_cpu[ - last_req_index] - self.top_p_cpu[empty_index] = self.top_p_cpu[last_req_index] - self.top_k_cpu[empty_index] = self.top_k_cpu[last_req_index] - generator = self.generators.pop(last_req_index, None) - if generator is not None: - self.generators[empty_index] = generator - - # Decrement last_req_index since it is now empty. - last_req_index -= 1 - - def make_sampling_metadata( - self, - skip_copy: bool = False, - ) -> SamplingMetadata: - if not skip_copy: - self.temperature[:self.num_reqs].copy_( - self.temperature_cpu_tensor[:self.num_reqs], non_blocking=True) - self.top_p[:self.num_reqs].copy_( - self.top_p_cpu_tensor[:self.num_reqs], non_blocking=True) - self.top_k[:self.num_reqs].copy_( - self.top_k_cpu_tensor[:self.num_reqs], non_blocking=True) - return SamplingMetadata( - temperature=self.temperature[:self.num_reqs], - all_greedy=self.all_greedy, - all_random=self.all_random, - top_p=self.top_p[:self.num_reqs], - top_k=self.top_k[:self.num_reqs], - no_top_p=self.no_top_p, - no_top_k=self.no_top_k, - generators=self.generators, - max_num_logprobs=self.max_num_logprobs, - ) - - @property - def num_reqs(self) -> int: - return len(self.req_id_to_index) - - @property - def all_greedy(self) -> bool: - return len(self.random_reqs) == 0 - - @property - def all_random(self) -> bool: - return len(self.greedy_reqs) == 0 - - @property - def no_top_p(self) -> bool: - return len(self.top_p_reqs) == 0 - - @property - def no_top_k(self) -> bool: - return len(self.top_k_reqs) == 0 - - @property - def max_num_logprobs(self) -> int: - return max(self.num_logprobs.values()) if self.num_logprobs else 0 - - @property - def no_logprob(self) -> bool: - return len(self.num_logprobs) == 0 - - @property - def no_prompt_logprob(self) -> bool: - return len(self.prompt_logprob_reqs) == 0 diff --git a/vllm/v1/worker/gpu_worker.py b/vllm/v1/worker/gpu_worker.py index d33b55a8a9f9a..33491f700de10 100644 --- a/vllm/v1/worker/gpu_worker.py +++ b/vllm/v1/worker/gpu_worker.py @@ -14,7 +14,8 @@ from vllm.logger import init_logger from vllm.model_executor import set_random_seed from vllm.platforms import current_platform -from vllm.utils import STR_DTYPE_TO_TORCH_DTYPE, get_dtype_size +from vllm.utils import STR_DTYPE_TO_TORCH_DTYPE, LayerBlockType, get_dtype_size +from vllm.v1.core.scheduler import SchedulerOutput from vllm.v1.outputs import ModelRunnerOutput from vllm.v1.worker.gpu_model_runner import GPUModelRunner @@ -56,7 +57,6 @@ def __init__( from vllm.utils import init_cached_hf_modules init_cached_hf_modules() - self.model_runner = GPUModelRunner(vllm_config) # Torch profiler. Enabled and configured through env vars: # VLLM_TORCH_PROFILER_DIR=/path/to/save/trace if envs.VLLM_TORCH_PROFILER_DIR: @@ -103,6 +103,9 @@ def initialize(self): # Set random seed. set_random_seed(self.model_config.seed) + # Construct the model runner + self.model_runner = GPUModelRunner(self.vllm_config, self.device) + def load_model(self) -> None: self.model_runner.load_model() @@ -198,10 +201,10 @@ def execute_model( scheduler_output: "SchedulerOutput", ) -> ModelRunnerOutput: output = self.model_runner.execute_model(scheduler_output) - # TODO(woosuk): Send the output to the engine process. + return output if self.rank == 0 else None return output - def profile(self, is_start=True): + def profile(self, is_start: bool = True): if self.profiler is None: raise RuntimeError("Profiler is not enabled.") if is_start: @@ -209,6 +212,10 @@ def profile(self, is_start=True): else: self.profiler.stop() + def check_health(self) -> None: + # worker will always be healthy as long as it's running. + return + def init_worker_distributed_environment( parallel_config: ParallelConfig, @@ -253,8 +260,8 @@ def _get_cache_block_size( ) -> int: head_size = model_config.get_head_size() num_heads = model_config.get_num_kv_heads(parallel_config) - num_attention_layers = model_config.get_num_attention_layers( - parallel_config) + num_attention_layers = model_config.get_num_layers_by_block_type( + parallel_config, LayerBlockType.attention) key_cache_block = cache_config.block_size * num_heads * head_size value_cache_block = key_cache_block diff --git a/vllm/worker/cache_engine.py b/vllm/worker/cache_engine.py index ac3270d1c9909..7ccd4571b19df 100644 --- a/vllm/worker/cache_engine.py +++ b/vllm/worker/cache_engine.py @@ -6,8 +6,8 @@ from vllm.attention import get_attn_backend from vllm.config import CacheConfig, DeviceConfig, ModelConfig, ParallelConfig from vllm.logger import init_logger -from vllm.utils import (STR_DTYPE_TO_TORCH_DTYPE, get_dtype_size, - is_pin_memory_available) +from vllm.utils import (STR_DTYPE_TO_TORCH_DTYPE, LayerBlockType, + get_dtype_size, is_pin_memory_available) logger = init_logger(__name__) @@ -34,8 +34,8 @@ def __init__( self.head_size = model_config.get_head_size() # Models like Jamba, have mixed typed layers, E.g Mamba - self.num_attention_layers = model_config.get_num_attention_layers( - parallel_config) + self.num_attention_layers = model_config.get_num_layers_by_block_type( + parallel_config, LayerBlockType.attention) self.num_kv_heads = model_config.get_num_kv_heads(parallel_config) self.block_size = cache_config.block_size @@ -105,8 +105,8 @@ def get_cache_block_size( ) -> int: head_size = model_config.get_head_size() num_heads = model_config.get_num_kv_heads(parallel_config) - num_attention_layers = model_config.get_num_attention_layers( - parallel_config) + num_attention_layers = model_config.get_num_layers_by_block_type( + parallel_config, LayerBlockType.attention) key_cache_block = cache_config.block_size * num_heads * head_size value_cache_block = key_cache_block diff --git a/vllm/worker/cpu_embedding_model_runner.py b/vllm/worker/cpu_pooling_model_runner.py similarity index 98% rename from vllm/worker/cpu_embedding_model_runner.py rename to vllm/worker/cpu_pooling_model_runner.py index 3954e4c4c8a5b..17b2fd2564a04 100644 --- a/vllm/worker/cpu_embedding_model_runner.py +++ b/vllm/worker/cpu_pooling_model_runner.py @@ -16,12 +16,12 @@ @dataclasses.dataclass(frozen=True) class ModelInputForCPUWithPoolingMetadata(ModelInputForCPU): """ - Used by the CPUEmbeddingModelRunner. + Used by the CPUPoolingModelRunner. """ pooling_metadata: Optional["PoolingMetadata"] = None -class CPUEmbeddingModelRunner( +class CPUPoolingModelRunner( CPUModelRunnerBase[ModelInputForCPUWithPoolingMetadata]): _model_input_cls: Type[ModelInputForCPUWithPoolingMetadata] = ( ModelInputForCPUWithPoolingMetadata) diff --git a/vllm/worker/cpu_worker.py b/vllm/worker/cpu_worker.py index cf04808b73372..09758a5d9accf 100644 --- a/vllm/worker/cpu_worker.py +++ b/vllm/worker/cpu_worker.py @@ -14,9 +14,9 @@ from vllm.model_executor import set_random_seed from vllm.sequence import ExecuteModelRequest from vllm.utils import STR_DTYPE_TO_TORCH_DTYPE -from vllm.worker.cpu_embedding_model_runner import CPUEmbeddingModelRunner from vllm.worker.cpu_enc_dec_model_runner import CPUEncoderDecoderModelRunner from vllm.worker.cpu_model_runner import CPUModelRunner, CPUModelRunnerBase +from vllm.worker.cpu_pooling_model_runner import CPUPoolingModelRunner from vllm.worker.worker_base import (LocalOrDistributedWorkerBase, LoraNotSupportedWorkerBase, WorkerBase, WorkerInput) @@ -163,8 +163,8 @@ def __init__( not in ["medusa", "mlp_speculator", "eagle"]) \ else {"return_hidden_states": True} ModelRunnerClass: Type[CPUModelRunnerBase] = CPUModelRunner - if self.model_config.task == "embedding": - ModelRunnerClass = CPUEmbeddingModelRunner + if self.model_config.runner_type == "pooling": + ModelRunnerClass = CPUPoolingModelRunner elif self.model_config.is_encoder_decoder: ModelRunnerClass = CPUEncoderDecoderModelRunner self.model_runner: CPUModelRunnerBase = ModelRunnerClass( @@ -178,7 +178,7 @@ def __init__( # Uninitialized cache engine. Will be initialized by # initialize_cache. self.cache_engine: List[CPUCacheEngine] - # Initialize cpu_cache as embedding models don't initialize kv_caches + # Initialize cpu_cache as pooling models don't initialize kv_caches self.cpu_cache: Optional[List[List[torch.Tensor]]] = None # Torch profiler. Enabled and configured through env vars: diff --git a/vllm/worker/enc_dec_model_runner.py b/vllm/worker/enc_dec_model_runner.py index ae18c79c980c8..bff01320d7927 100644 --- a/vllm/worker/enc_dec_model_runner.py +++ b/vllm/worker/enc_dec_model_runner.py @@ -25,8 +25,7 @@ from vllm.utils import STR_NOT_IMPL_ENC_DEC_BACKEND, make_tensor_with_pad from vllm.worker.model_runner import (GPUModelRunnerBase, ModelInputForGPUBuilder, - ModelInputForGPUWithSamplingMetadata, - _get_graph_batch_size) + ModelInputForGPUWithSamplingMetadata) from vllm.worker.model_runner_base import ( _add_attn_metadata_broadcastable_dict, _add_sampling_metadata_broadcastable_dict) @@ -465,7 +464,8 @@ def _prepare_encoder_model_input_tensors( # We will be using CUDA graph replay for this decode. max_len_of_block_table = self.get_max_block_per_batch() batch_size = len(encoder_seq_lens) - graph_batch_size = _get_graph_batch_size(batch_size) + graph_batch_size = self.vllm_config.pad_for_cudagraph( + batch_size) assert graph_batch_size >= batch_size cuda_graph_pad_size = graph_batch_size - batch_size # extend the cross_block_tables and encoder_seq_lens to match diff --git a/vllm/worker/hpu_model_runner.py b/vllm/worker/hpu_model_runner.py index 99cf9a7e67256..9d479f412af46 100644 --- a/vllm/worker/hpu_model_runner.py +++ b/vllm/worker/hpu_model_runner.py @@ -622,6 +622,10 @@ def load_model(self) -> None: assert hasattr( self.model, "embedding_padding_modules" ), "Model does not have embedding_padding_modules" + assert not self.lora_config.bias_enabled, \ + "Bias support in LoRA is not enabled in HPU yet." + assert not self.lora_config.fully_sharded_loras, \ + "Fully sharded LoRAs is not enabled in HPU yet." self.lora_manager = LRUCacheWorkerLoRAManager( self.scheduler_config.max_num_seqs, self.scheduler_config.max_num_batched_tokens, @@ -1282,11 +1286,9 @@ def create_dummy_seq_group_metadata(self, def profile_run(self) -> None: num_layers = self.model_config.get_num_layers(self.parallel_config) kv_caches = [None] * num_layers - max_batch_size = self.bucketing_global_state.prompt_bs_bucket_cfg[-1] - max_seq_len = min( - self.bucketing_global_state.prompt_seq_bucket_cfg[-1], - self.max_num_batched_tokens // max_batch_size) - + max_seq_len = self.bucketing_global_state.prompt_seq_bucket_cfg[-1] + max_batch_size = min(self.max_num_batched_tokens // max_seq_len, + self.scheduler_config.max_num_seqs) self.warmup_scenario(max_batch_size, max_seq_len, True, kv_caches, False, True) return @@ -1304,7 +1306,6 @@ def warmup_scenario(self, f"bs{batch_size}_" f"seq{seq_len}_" f"graphs{'T' if use_graphs else 'F'}") - max_num_seqs = self.scheduler_config.max_num_seqs # This represents the maximum number of different requests # that will have unique loras, an therefore the max amount of memory # consumption create dummy lora request copies from the lora request @@ -1326,16 +1327,10 @@ def warmup_scenario(self, dummy_lora_requests.append(dummy_lora_request) dummy_lora_requests_per_seq = [ dummy_lora_requests[idx % len(dummy_lora_requests)] - for idx in range(max_num_seqs) + for idx in range(batch_size) ] self.profiler.start('internal', scenario_name) times = 3 if use_graphs or is_pt_profiler_run else 1 - if self.lora_config and not is_lora_profile_run: - lora_mapping = LoRAMapping( - **dict(index_mapping=[0] * batch_size * seq_len, - prompt_mapping=[0] * batch_size * seq_len, - is_prefill=is_prompt)) - self.set_active_loras(set(), lora_mapping) if is_prompt: seqs = [ self.create_dummy_seq_group_metadata( diff --git a/vllm/worker/hpu_worker.py b/vllm/worker/hpu_worker.py index 493f7a9fad098..cca7cd50bfc7b 100644 --- a/vllm/worker/hpu_worker.py +++ b/vllm/worker/hpu_worker.py @@ -65,8 +65,8 @@ def __init__( # Uninitialized cache engine. Will be initialized by # initialize_cache. self.cache_engine: List[HPUCacheEngine] - # Initialize gpu_cache as embedding models don't initialize kv_caches - self.hpu_cache: Optional[List[List[torch.tensor]]] = None + # Initialize gpu_cache as pooling models don't initialize kv_caches + self.hpu_cache: Optional[List[List[torch.Tensor]]] = None # Torch profiler. Enabled and configured through env vars: # VLLM_TORCH_PROFILER_DIR=/path/to/save/trace if envs.VLLM_TORCH_PROFILER_DIR: diff --git a/vllm/worker/model_runner.py b/vllm/worker/model_runner.py index 1f654a9cce465..6ff98a8f1bab2 100644 --- a/vllm/worker/model_runner.py +++ b/vllm/worker/model_runner.py @@ -18,10 +18,9 @@ from vllm.attention import AttentionMetadata, get_attn_backend from vllm.attention.backends.abstract import AttentionState from vllm.attention.backends.utils import CommonAttentionState -from vllm.compilation.compile_context import set_compile_context from vllm.config import CompilationLevel, VllmConfig from vllm.core.scheduler import SchedulerOutputs -from vllm.distributed import get_pp_group +from vllm.distributed import get_kv_transfer_group, get_pp_group from vllm.distributed.parallel_state import graph_capture from vllm.forward_context import set_forward_context from vllm.inputs import INPUT_REGISTRY, InputRegistry @@ -63,16 +62,7 @@ logger = init_logger(__name__) LORA_WARMUP_RANK = 8 -_BATCH_SIZE_ALIGNMENT = 8 -# all the token sizes that **can** be captured by cudagraph. -# they can be arbitrarily large. -# currently it includes: 1, 2, 4, 8, 16, 24, 32, 40, ..., 8192. -# the actual sizes to capture will be determined by the model, -# depending on the model's max_num_seqs. -# NOTE: _get_graph_batch_size needs to be updated if this list is changed. -_BATCH_SIZES_TO_CAPTURE = [1, 2, 4] + [ - _BATCH_SIZE_ALIGNMENT * i for i in range(1, 1025) -] + _NUM_WARMUP_ITERS = 2 TModelInputForGPU = TypeVar('TModelInputForGPU', bound="ModelInputForGPU") @@ -632,11 +622,13 @@ def _compute_lora_input(self, inter_data: InterDataForSeqGroup, inter_data.lora_requests.add(seq_group_metadata.lora_request) query_len = inter_data.query_lens[seq_idx] inter_data.lora_index_mapping.append([lora_id] * query_len) - inter_data.lora_prompt_mapping.append( - [lora_id] * - (query_len if seq_group_metadata.sampling_params - and seq_group_metadata.sampling_params.prompt_logprobs is not None - else 1)) + sampling_params = seq_group_metadata.sampling_params + if sampling_params and sampling_params.prompt_logprobs is not None: + inter_data.lora_prompt_mapping.append([lora_id] * query_len) + elif not self.chunked_prefill_enabled or seq_group_metadata.do_sample: + inter_data.lora_prompt_mapping.append([lora_id]) + else: + inter_data.lora_prompt_mapping.append([]) def _compute_prompt_adapter_input( self, inter_data: InterDataForSeqGroup, @@ -763,7 +755,6 @@ def _use_captured_graph(self, max_decode_seq_len: int, max_encoder_seq_len: int = 0) -> bool: return (decode_only and not self.runner.model_config.enforce_eager - and batch_size <= _BATCH_SIZES_TO_CAPTURE[-1] and max_decode_seq_len <= self.runner.max_seq_len_to_capture and max_encoder_seq_len <= self.runner.max_seq_len_to_capture and batch_size <= self.runner.max_batchsize_to_capture) @@ -811,7 +802,8 @@ def _get_cuda_graph_pad_size(self, max_encoder_seq_len): return -1 - graph_batch_size = _get_graph_batch_size(batch_size) + graph_batch_size = self.runner.vllm_config.pad_for_cudagraph( + batch_size) assert graph_batch_size >= batch_size return graph_batch_size - batch_size @@ -1023,8 +1015,8 @@ def __init__( self.sliding_window = model_config.get_sliding_window() self.block_size = cache_config.block_size self.max_seq_len_to_capture = self.model_config.max_seq_len_to_capture - self.max_batchsize_to_capture = _get_max_graph_batch_size( - self.scheduler_config.max_num_seqs) + self.max_batchsize_to_capture = \ + self.vllm_config.compilation_config.max_capture_size self.graph_runners: List[Dict[int, CUDAGraphRunner]] = [ {} for _ in range(self.parallel_config.pipeline_parallel_size) @@ -1171,7 +1163,8 @@ def load_model(self) -> None: if self.vllm_config.compilation_config.level ==\ CompilationLevel.DYNAMO_AS_IS and supports_dynamo(): - backend = self.vllm_config.compilation_config.init_backend() + backend = self.vllm_config.compilation_config.init_backend( + self.vllm_config) self.model = torch.compile( self.model, fullgraph=envs.VLLM_TEST_DYNAMO_FULLGRAPH_CAPTURE, @@ -1333,14 +1326,7 @@ def profile_run(self) -> None: dtype=self.model_config.dtype, device=self.device) - graph_batch_size = self.max_batchsize_to_capture - batch_size_capture_list = [ - bs for bs in _BATCH_SIZES_TO_CAPTURE if bs <= graph_batch_size - ] - if self.model_config.enforce_eager: - batch_size_capture_list = [] - with set_compile_context(batch_size_capture_list): - self.execute_model(model_input, kv_caches, intermediate_tensors) + self.execute_model(model_input, kv_caches, intermediate_tensors) torch.cuda.synchronize() return @@ -1459,18 +1445,14 @@ def capture_model(self, kv_caches: List[List[torch.Tensor]]) -> None: dtype=self.model_config.dtype, device=self.device) - graph_batch_size = self.max_batchsize_to_capture - batch_size_capture_list = [ - bs for bs in _BATCH_SIZES_TO_CAPTURE if bs <= graph_batch_size - ] - with self.attn_state.graph_capture( max_batch_size), graph_capture() as graph_capture_context: # NOTE: Capturing the largest batch size first may help reduce the # memory usage of CUDA graph. for virtual_engine in range( self.parallel_config.pipeline_parallel_size): - for batch_size in reversed(batch_size_capture_list): + for batch_size in \ + self.vllm_config.compilation_config.capture_sizes: attn_metadata = ( self.attn_state.graph_capture_get_metadata_for_batch( batch_size, @@ -1666,6 +1648,24 @@ def execute_model( else: model_executable = self.model + # Receive KV cache in distributed KV cache transfer setting + # In disagg prefill setting, it will also recv hidden states and bypass + # model forwarding + # In KV cache database setting, it will change the model input so that + # we can skip prefilling on tokens that successfully received KV caches + # NOTE: The receive operation is blocking + bypass_model_exec = False + if self.need_recv_kv(model_input, kv_caches): + hidden_or_intermediate_states, bypass_model_exec, model_input = \ + get_kv_transfer_group().recv_kv_caches_and_hidden_states( + # model is used to know which layer the current worker + # is working on, so that we can receive KV for only those + # layers. + model_executable, + model_input, + kv_caches=kv_caches + ) + multi_modal_kwargs = model_input.multi_modal_kwargs or {} seqlen_agnostic_kwargs = { "finished_requests_ids": model_input.finished_requests_ids, @@ -1677,21 +1677,36 @@ def execute_model( model_forward_end = torch.cuda.Event(enable_timing=True) model_forward_start.record() - with set_forward_context(model_input.attn_metadata, self.vllm_config): - hidden_or_intermediate_states = model_executable( - input_ids=model_input.input_tokens, - positions=model_input.input_positions, - kv_caches=kv_caches, - attn_metadata=model_input.attn_metadata, - intermediate_tensors=intermediate_tensors, - **MultiModalKwargs.as_kwargs(multi_modal_kwargs, - device=self.device), - **seqlen_agnostic_kwargs) + if not bypass_model_exec: + with set_forward_context(model_input.attn_metadata, + self.vllm_config): + hidden_or_intermediate_states = model_executable( + input_ids=model_input.input_tokens, + positions=model_input.input_positions, + kv_caches=kv_caches, + attn_metadata=model_input.attn_metadata, + intermediate_tensors=intermediate_tensors, + **MultiModalKwargs.as_kwargs(multi_modal_kwargs, + device=self.device), + **seqlen_agnostic_kwargs) if (self.observability_config is not None and self.observability_config.collect_model_forward_time): model_forward_end.record() + # Sending KV cache in distributed KV cache transfer setting + # NOTE: the send operation is non-blocking + if self.need_send_kv(model_input, kv_caches): + get_kv_transfer_group().send_kv_caches_and_hidden_states( + # model_executable is used to know which layer the current + # worker is working on, so that we can send KV for only those + # layers. + model_executable, + model_input, + kv_caches, + hidden_or_intermediate_states, + ) + # Compute the logits in the last pipeline stage. if not get_pp_group().is_last_rank: if (self.is_driver_worker @@ -1759,6 +1774,56 @@ def execute_model( return [output] + def need_recv_kv(self, model_input, kv_caches) -> bool: + """Check if we need to receive kv-cache from the other worker. + We need to receive KV when + 1. current vLLM instance is KV cache consumer/decode vLLM instance + 2. this batch is not a profiling run + 3. this batch is a prefill run + + Args: + model_input: input to the model executable + kv_caches: vLLM's paged memory + """ + + if self.vllm_config.kv_transfer_config is None: + return False + + prefill_meta = model_input.attn_metadata.prefill_metadata + + # check if the current run is profiling + is_profile_run = (kv_caches[0].numel() == 0) + # check if the current run is prefill + is_prefill_run = prefill_meta is not None + + return self.vllm_config.kv_transfer_config.is_kv_consumer and ( + not is_profile_run) and is_prefill_run + + def need_send_kv(self, model_input, kv_caches) -> bool: + """Check if we need to send kv-cache to the other worker. + We need to send KV when + 1. current vLLM instance is KV cache producer/prefill vLLM instance + 2. this batch is not a profiling run + 3. this batch is a prefill run + + Args: + model_input: input to the model executable + kv_caches: vLLM's paged memory + """ + + if self.vllm_config.kv_transfer_config is None: + return False + + prefill_meta = model_input.attn_metadata.prefill_metadata + + # check if the current run is profiling + is_profile_run = (kv_caches[0].numel() == 0) + # check if the current run is prefill + is_prefill_run = prefill_meta is not None + + return self.vllm_config.kv_transfer_config.is_kv_producer and ( + not is_profile_run) and is_prefill_run + # NOTE: this is nn.Module so the profiler can properly capture/group # kernels calls made within the graph @@ -1910,37 +1975,3 @@ def forward( return self.output_buffers["hidden_states"] return self.output_buffers - - -def _get_graph_batch_size(batch_size: int) -> int: - """Returns the padded batch size given actual batch size. - - Batch sizes are 1, 2, 4, _BATCH_SIZE_ALIGNMENT, - 2*_BATCH_SIZE_ALIGNMENT, 3*_BATCH_SIZE_ALIGNMENT... - """ - if batch_size <= 2: - return batch_size - elif batch_size <= 4: - return 4 - else: - return ((batch_size + _BATCH_SIZE_ALIGNMENT - 1) // - _BATCH_SIZE_ALIGNMENT * _BATCH_SIZE_ALIGNMENT) - - -def _get_max_graph_batch_size(max_num_seqs: int) -> int: - """ - max_num_seqs: Maximum number of sequences in a batch. - _BATCH_SIZES_TO_CAPTURE: all the sizes that we want to capture. - - pad the max_num_seqs if necessary by calling _get_graph_batch_size, - which will deal with some edge cases like 1, 2, 4. - - if the padded size is in _BATCH_SIZES_TO_CAPTURE, return the padded size. - if not, it means the padded size is larger than the largest size in - _BATCH_SIZES_TO_CAPTURE, return the largest size in _BATCH_SIZES_TO_CAPTURE. - """ - padded_size = _get_graph_batch_size(max_num_seqs) - if padded_size in _BATCH_SIZES_TO_CAPTURE: - return padded_size - assert padded_size > _BATCH_SIZES_TO_CAPTURE[-1] - return _BATCH_SIZES_TO_CAPTURE[-1] diff --git a/vllm/worker/multi_step_model_runner.py b/vllm/worker/multi_step_model_runner.py index 3ee0fb4dc943e..18b03bf1bfb56 100644 --- a/vllm/worker/multi_step_model_runner.py +++ b/vllm/worker/multi_step_model_runner.py @@ -29,7 +29,9 @@ logger = init_logger(__name__) -MULTI_STEP_ATTENTION_BACKENDS = ["FLASH_ATTN", "ROCM_FLASH", "FLASHINFER"] +MULTI_STEP_ATTENTION_BACKENDS = [ + "FLASH_ATTN", "ROCM_FLASH", "FLASHINFER", "NO_ATTENTION" +] MULTI_STEP_CHUNKED_PREFILL_ATTENTION_BACKENDS = ["FLASH_ATTN"] def _get_supported_attention_backends(chunked_prefill_enabled: bool) \ @@ -643,7 +645,8 @@ def _advance_step(self, model_input: StatefulModelInput, return model_input def load_model(self) -> None: - return self._base_model_runner.load_model() + self._base_model_runner.load_model() + self.model_memory_usage = self._base_model_runner.model_memory_usage def save_sharded_state( self, @@ -817,7 +820,7 @@ def _pythonize_sampler_output( for sgdx, (seq_group, sample_result) in enumerate(zip(seq_groups, samples_list)): - # Reminder: Please update docs/source/serving/compatibility_matrix.rst + # Reminder: Please update docs/source/usage/compatibility_matrix.rst # If the feature combo become valid # (Check for Guided Decoding) if seq_group.sampling_params.logits_processors: diff --git a/vllm/worker/openvino_worker.py b/vllm/worker/openvino_worker.py index 205f8a337ce6c..0bf522d5333ed 100644 --- a/vllm/worker/openvino_worker.py +++ b/vllm/worker/openvino_worker.py @@ -489,7 +489,7 @@ def model_profile_run(): block_size = cache_config.block_size seq_num_blocks = (seq_len + block_size - 1) // block_size - seq_data, dummy_multi_modal_data = input_registry \ + dummy_data = input_registry \ .dummy_data_for_profiling(model_config, seq_len, mm_registry) @@ -498,11 +498,11 @@ def model_profile_run(): seq = SequenceGroupMetadata( request_id=str(group_id), is_prompt=True, - seq_data={group_id: seq_data}, + seq_data={group_id: dummy_data.seq_data}, sampling_params=sampling_params, block_tables=block_tables, lora_request=None, - multi_modal_data=dummy_multi_modal_data) + multi_modal_data=dummy_data.multi_modal_data) seqs.append(seq) self.model_runner.block_size = tmp_cache_config.block_size diff --git a/vllm/worker/embedding_model_runner.py b/vllm/worker/pooling_model_runner.py similarity index 98% rename from vllm/worker/embedding_model_runner.py rename to vllm/worker/pooling_model_runner.py index f56805918fd15..1beae1e3884c5 100644 --- a/vllm/worker/embedding_model_runner.py +++ b/vllm/worker/pooling_model_runner.py @@ -21,12 +21,12 @@ @dataclasses.dataclass(frozen=True) class ModelInputForGPUWithPoolingMetadata(ModelInputForGPU): """ - Used by the EmbeddingModelRunner. + Used by the PoolingModelRunner. """ pooling_metadata: Optional["PoolingMetadata"] = None -class EmbeddingModelRunner( +class PoolingModelRunner( GPUModelRunnerBase[ModelInputForGPUWithPoolingMetadata]): _model_input_cls: Type[ModelInputForGPUWithPoolingMetadata] = ( ModelInputForGPUWithPoolingMetadata) @@ -52,7 +52,7 @@ def execute_model( ) -> Optional[Union[List[PoolerOutput], IntermediateTensors]]: if num_steps > 1: raise ValueError( - "EmbeddingModelRunner does not support multi-step execution.") + "PoolingModelRunner does not support multi-step execution.") if self.lora_config: assert model_input.lora_requests is not None diff --git a/vllm/worker/utils.py b/vllm/worker/utils.py index f43635464ef00..5f71ec0c14df8 100644 --- a/vllm/worker/utils.py +++ b/vllm/worker/utils.py @@ -13,7 +13,7 @@ def assert_enc_dec_mr_supported_scenario( a supported scenario. ''' - # Reminder: Please update docs/source/serving/compatibility_matrix.rst + # Reminder: Please update docs/source/usage/compatibility_matrix.rst # If the feature combo become valid if enc_dec_mr.cache_config.enable_prefix_caching: diff --git a/vllm/worker/worker.py b/vllm/worker/worker.py index 24e7bc760b0c0..f51b51d433d3d 100644 --- a/vllm/worker/worker.py +++ b/vllm/worker/worker.py @@ -1,15 +1,15 @@ """A GPU worker class.""" import gc import os -import time from typing import Dict, List, Optional, Set, Tuple, Type, Union import torch import torch.distributed import vllm.envs as envs -from vllm.config import ParallelConfig, VllmConfig -from vllm.distributed import (ensure_model_parallel_initialized, +from vllm.config import VllmConfig +from vllm.distributed import (ensure_kv_transfer_initialized, + ensure_model_parallel_initialized, init_distributed_environment, set_custom_all_reduce) from vllm.logger import init_logger @@ -21,10 +21,11 @@ from vllm.prompt_adapter.request import PromptAdapterRequest from vllm.sequence import (ExecuteModelRequest, IntermediateTensors, SequenceGroupMetadata, SequenceGroupMetadataDelta) +from vllm.utils import GiB_bytes, memory_profiling from vllm.worker.cache_engine import CacheEngine -from vllm.worker.embedding_model_runner import EmbeddingModelRunner from vllm.worker.enc_dec_model_runner import EncoderDecoderModelRunner from vllm.worker.model_runner import GPUModelRunnerBase, ModelRunner +from vllm.worker.pooling_model_runner import PoolingModelRunner from vllm.worker.worker_base import (LocalOrDistributedWorkerBase, WorkerBase, WorkerInput) @@ -74,8 +75,8 @@ def __init__( else {"return_hidden_states": True} ModelRunnerClass: Type[GPUModelRunnerBase] = ModelRunner - if model_config.task == "embedding": - ModelRunnerClass = EmbeddingModelRunner + if model_config.runner_type == "pooling": + ModelRunnerClass = PoolingModelRunner elif self.model_config.is_encoder_decoder: ModelRunnerClass = EncoderDecoderModelRunner self.model_runner: GPUModelRunnerBase = ModelRunnerClass( @@ -90,7 +91,7 @@ def __init__( # Uninitialized cache engine. Will be initialized by # initialize_cache. self.cache_engine: List[CacheEngine] - # Initialize gpu_cache as embedding models don't initialize kv_caches + # Initialize gpu_cache as pooling models don't initialize kv_caches self.gpu_cache: Optional[List[List[torch.Tensor]]] = None self._seq_group_metadata_cache: Dict[str, SequenceGroupMetadata] = {} @@ -144,7 +145,7 @@ def init_device(self) -> None: raise RuntimeError( f"Not support device type: {self.device_config.device}") # Initialize the distributed environment. - init_worker_distributed_environment(self.parallel_config, self.rank, + init_worker_distributed_environment(self.vllm_config, self.rank, self.distributed_init_method, self.local_rank) # Set random seed. @@ -191,33 +192,22 @@ def determine_num_available_blocks(self) -> Tuple[int, int]: torch.cuda.reset_peak_memory_stats() free_memory_pre_profile, total_gpu_memory = torch.cuda.mem_get_info() - start_time = time.time() # Execute a forward pass with dummy inputs to profile the memory usage # of the model. - self.model_runner.profile_run() - torch.cuda.synchronize() + with memory_profiling(baseline_memory_in_bytes=total_gpu_memory - + self.init_gpu_memory, + weights_memory_in_bytes=self.model_runner. + model_memory_usage) as result: + self.model_runner.profile_run() + torch.cuda.synchronize() self._assert_memory_footprint_increased_during_profiling() - # Get the peak memory allocation recorded by torch - peak_memory = torch.cuda.memory_stats()["allocated_bytes.all.peak"] - - # Check for any memory left around that may have been allocated on the - # gpu outside of `torch`. NCCL operations, for example, can use a few - # GB during a forward pass - torch.cuda.empty_cache() - torch_allocated_bytes = torch.cuda.memory_stats( - )["allocated_bytes.all.current"] - total_allocated_bytes = torch.cuda.mem_get_info( - )[1] - torch.cuda.mem_get_info()[0] - non_torch_allocations = total_allocated_bytes - torch_allocated_bytes - if non_torch_allocations > 0: - peak_memory += non_torch_allocations - - available_kv_cache_memory = ( - total_gpu_memory * self.cache_config.gpu_memory_utilization - - peak_memory) + memory_for_current_instance = total_gpu_memory * \ + self.cache_config.gpu_memory_utilization + available_kv_cache_memory = (memory_for_current_instance - + result.non_kv_cache_memory_in_bytes) # Calculate the number of blocks that can be allocated with the # profiled peak memory. @@ -232,24 +222,23 @@ def determine_num_available_blocks(self) -> Tuple[int, int]: num_gpu_blocks = max(num_gpu_blocks, 0) num_cpu_blocks = max(num_cpu_blocks, 0) - end_time = time.time() - logger.info( - "Memory profiling results: " - "duration=%.2f seconds, " - "total_gpu_memory=%.2fGiB, " - "initial_memory_usage=%.2fGiB, " - "peak_torch_memory=%.2fGiB, " - "memory_usage_post_profile=%.2fGiB, " - "non_torch_memory=%.2fGiB, " - "kv_cache_size=%.2fGiB, " - "gpu_memory_utilization=%.2f.", end_time - start_time, - total_gpu_memory / (1024**3), - (total_gpu_memory - free_memory_pre_profile) / (1024**3), - (peak_memory - non_torch_allocations) / (1024**3), - total_allocated_bytes / (1024**3), - non_torch_allocations / (1024**3), - available_kv_cache_memory / (1024**3), - self.cache_config.gpu_memory_utilization) + msg = (f"Memory profiling takes {result.profile_time:.2f} seconds\n" + "the current vLLM instance can use " + "total_gpu_memory " + f"({(total_gpu_memory / GiB_bytes):.2f}GiB)" + " x gpu_memory_utilization " + f"({self.cache_config.gpu_memory_utilization:.2f})" + f" = {(memory_for_current_instance / GiB_bytes):.2f}GiB\n" + "model weights take " + f"{(result.weights_memory_in_bytes / GiB_bytes):.2f}GiB;" + " non_torch_memory takes " + f"{(result.non_torch_increase_in_bytes / GiB_bytes):.2f}GiB;" + " PyTorch activation peak memory takes " + f"{(result.torch_peak_increase_in_bytes / GiB_bytes):.2f}GiB;" + " the rest of the memory reserved for KV Cache is " + f"{(available_kv_cache_memory / GiB_bytes):.2f}GiB.") + + logger.info(msg) # Final cleanup if self.model_runner.lora_manager: @@ -457,20 +446,22 @@ def get_cache_block_size_bytes(self) -> int: def init_worker_distributed_environment( - parallel_config: ParallelConfig, + vllm_config: VllmConfig, rank: int, distributed_init_method: Optional[str] = None, local_rank: int = -1, ) -> None: """Initialize the distributed environment.""" + parallel_config = vllm_config.parallel_config set_custom_all_reduce(not parallel_config.disable_custom_all_reduce) init_distributed_environment(parallel_config.world_size, rank, distributed_init_method, local_rank) - ensure_model_parallel_initialized(parallel_config.tensor_parallel_size, parallel_config.pipeline_parallel_size) + ensure_kv_transfer_initialized(vllm_config) + def _check_if_gpu_supports_dtype(torch_dtype: torch.dtype): # Check if the GPU supports the dtype. diff --git a/vllm/worker/worker_base.py b/vllm/worker/worker_base.py index 7aaa8b453cff1..6d00102e0a324 100644 --- a/vllm/worker/worker_base.py +++ b/vllm/worker/worker_base.py @@ -43,6 +43,7 @@ def __init__( self.speculative_config = vllm_config.speculative_config self.prompt_adapter_config = vllm_config.prompt_adapter_config self.observability_config = vllm_config.observability_config + self.kv_transfer_config = vllm_config.kv_transfer_config @abstractmethod def init_device(self) -> None: @@ -438,7 +439,7 @@ def init_worker(self, *args, **kwargs): Here we inject some common logic before initializing the worker. Arguments are passed to the worker class constructor. """ - enable_trace_function_call_for_thread() + enable_trace_function_call_for_thread(self.vllm_config) # see https://github.com/NVIDIA/nccl/issues/1234 os.environ['NCCL_CUMEM_ENABLE'] = '0' diff --git a/vllm/worker/xpu_model_runner.py b/vllm/worker/xpu_model_runner.py index e6322e095bbb9..9cf25387560da 100644 --- a/vllm/worker/xpu_model_runner.py +++ b/vllm/worker/xpu_model_runner.py @@ -37,10 +37,6 @@ logger = init_logger(__name__) _PAD_SLOT_ID = -1 -_BATCH_SIZE_ALIGNMENT = 8 -_BATCH_SIZES_TO_CAPTURE = [1, 2, 4] + [ - _BATCH_SIZE_ALIGNMENT * i for i in range(1, 33) -] TModelInputForXPU = TypeVar('TModelInputForXPU', bound="ModelInputForXPU") From 6d8f1fd5c076bca22a45c3f27c177b3c911da824 Mon Sep 17 00:00:00 2001 From: Arthur Zucker Date: Thu, 19 Dec 2024 11:01:23 +0100 Subject: [PATCH 02/86] revert some changes --- vllm/model_executor/layers/linear.py | 24 ++---------------------- 1 file changed, 2 insertions(+), 22 deletions(-) diff --git a/vllm/model_executor/layers/linear.py b/vllm/model_executor/layers/linear.py index 678a1171e745d..46ef11e7d02c6 100644 --- a/vllm/model_executor/layers/linear.py +++ b/vllm/model_executor/layers/linear.py @@ -23,16 +23,6 @@ logger = init_logger(__name__) - -from torch.distributed.tensor import ( - DeviceMesh, - distribute_module, - distribute_tensor, - DTensor, - Replicate, - Shard, -) - WEIGHT_LOADER_V2_SUPPORTED = [ "CompressedTensorsLinearMethod", "AWQMarlinLinearMethod", "AWQLinearMethod", "GPTQMarlinLinearMethod", "Fp8LinearMethod", @@ -284,9 +274,8 @@ class ColumnParallelLinear(LinearBase): """ def __init__(self, - *, - input_size: int = None, - output_size: int=None, + input_size: int, + output_size: int, bias: bool = True, gather_output: bool = False, skip_bias_add: bool = False, @@ -294,10 +283,6 @@ def __init__(self, quant_config: Optional[QuantizationConfig] = None, output_sizes: Optional[List[int]] = None, prefix: str = ""): - - self.input_size = (input_size or Replicate(),) - self.output_size = (output_size or Shard(-1),) - super().__init__(input_size, output_size, skip_bias_add, params_dtype, quant_config, prefix) @@ -1002,7 +987,6 @@ class RowParallelLinear(LinearBase): """ def __init__(self, - *, input_size: int, output_size: int, bias: bool = True, @@ -1012,10 +996,6 @@ def __init__(self, reduce_results: bool = True, quant_config: Optional[QuantizationConfig] = None, prefix: str = ""): - - self.input_size = (input_size or Replicate(),) - self.output_size = (output_size or Shard(-1),) - super().__init__(input_size, output_size, skip_bias_add, params_dtype, quant_config, prefix) From fb37617e35f3b044a55d0a8c50d4013854bf0ef6 Mon Sep 17 00:00:00 2001 From: Arthur Zucker Date: Thu, 19 Dec 2024 11:02:05 +0100 Subject: [PATCH 03/86] changes are now merged with main of transformers --- requirements-common.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements-common.txt b/requirements-common.txt index a72f5546b0fc4..642926d3c1d38 100644 --- a/requirements-common.txt +++ b/requirements-common.txt @@ -6,7 +6,7 @@ tqdm blake3 py-cpuinfo # transformers >= 4.45.2 # Required for Llama 3.2 and Qwen2-VL. -git+https://github.com/huggingface/transformers@llama-refactor +git+https://github.com/huggingface/transformers@main tokenizers >= 0.19.1 # Required for Llama 3. protobuf # Required by LlamaTokenizer. fastapi >= 0.107.0, < 0.113.0; python_version < '3.9' From 2d0c128b56829729a3f81b380e4deb0c4aea6bd8 Mon Sep 17 00:00:00 2001 From: Arthur Zucker Date: Thu, 19 Dec 2024 11:02:25 +0100 Subject: [PATCH 04/86] revert more changes --- vllm/attention/backends/torch_sdpa.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/vllm/attention/backends/torch_sdpa.py b/vllm/attention/backends/torch_sdpa.py index 245ef7eb5dd87..0cff6f5952aba 100644 --- a/vllm/attention/backends/torch_sdpa.py +++ b/vllm/attention/backends/torch_sdpa.py @@ -508,11 +508,11 @@ def forward( num_prefill_tokens = attn_metadata.num_encoder_tokens num_decode_tokens = 0 - # if attn_type == AttentionType.DECODER: - # # Only enforce this shape-constraint for decoder - # # self-attention - # assert key.shape[0] == num_prefill_tokens + num_decode_tokens - # assert value.shape[0] == num_prefill_tokens + num_decode_tokens + if attn_type == AttentionType.DECODER: + # Only enforce this shape-constraint for decoder + # self-attention + assert key.shape[0] == num_prefill_tokens + num_decode_tokens + assert value.shape[0] == num_prefill_tokens + num_decode_tokens output = torch.empty_like(query) if prefill_meta := attn_metadata.prefill_metadata: From a49aa815e2b821549acc910745627d6808ce2be5 Mon Sep 17 00:00:00 2001 From: Harry Mellor <19981378+hmellor@users.noreply.github.com> Date: Thu, 9 Jan 2025 12:17:40 +0000 Subject: [PATCH 05/86] Undo whitespace changes Signed-off-by: Harry Mellor <19981378+hmellor@users.noreply.github.com> --- tests/entrypoints/llm/test_guided_generate.py | 1 - vllm/model_executor/models/phi3v.py | 1 - vllm/multimodal/processing.py | 1 + 3 files changed, 1 insertion(+), 2 deletions(-) diff --git a/tests/entrypoints/llm/test_guided_generate.py b/tests/entrypoints/llm/test_guided_generate.py index fce1c7b581367..ccb9906fc5c0f 100644 --- a/tests/entrypoints/llm/test_guided_generate.py +++ b/tests/entrypoints/llm/test_guided_generate.py @@ -1,4 +1,3 @@ - import json import re import weakref diff --git a/vllm/model_executor/models/phi3v.py b/vllm/model_executor/models/phi3v.py index d0311c8f512e7..a1b1af35604db 100644 --- a/vllm/model_executor/models/phi3v.py +++ b/vllm/model_executor/models/phi3v.py @@ -1,4 +1,3 @@ - # Copyright 2024 The vLLM team. # Copyright 2024 Microsoft and the HuggingFace Inc. team. All rights reserved. # diff --git a/vllm/multimodal/processing.py b/vllm/multimodal/processing.py index 05536fba35cb4..31610552d904c 100644 --- a/vllm/multimodal/processing.py +++ b/vllm/multimodal/processing.py @@ -221,6 +221,7 @@ def iter_token_matches( else: start_idx += 1 + @dataclass(repr=False) class _PromptReplacementMatch(ABC): prompt_repl: BoundPromptReplacement From ff19ade185dea1555ef9713f1cff4ff1ea0aba31 Mon Sep 17 00:00:00 2001 From: Harry Mellor <19981378+hmellor@users.noreply.github.com> Date: Mon, 13 Jan 2025 16:49:55 +0100 Subject: [PATCH 06/86] Update transformers pin Signed-off-by: Harry Mellor <19981378+hmellor@users.noreply.github.com> --- requirements-common.txt | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/requirements-common.txt b/requirements-common.txt index 861f4ec3322ac..af9e46305cbac 100644 --- a/requirements-common.txt +++ b/requirements-common.txt @@ -5,8 +5,7 @@ requests >= 2.26.0 tqdm blake3 py-cpuinfo -# transformers >= 4.45.2 # Required for Llama 3.2 and Qwen2-VL. -git+https://github.com/huggingface/transformers@main +transformers >= 4.48.0 # Required for Transformers model. tokenizers >= 0.19.1 # Required for Llama 3. protobuf # Required by LlamaTokenizer. fastapi >= 0.107.0, < 0.113.0; python_version < '3.9' @@ -31,7 +30,6 @@ msgspec gguf == 0.10.0 importlib_metadata mistral_common[opencv] >= 1.5.0 -transformers>=4.47 pyyaml six>=1.16.0; python_version > '3.11' # transitive dependency of pandas that needs to be the latest version for python 3.12 setuptools>=74.1.1; python_version > '3.11' # Setuptools is used by triton, we need to ensure a modern version is installed for 3.12+ so that it does not try to import distutils, which was removed in 3.12 From 038604bcc006b940d7181b8444f2354eb2def630 Mon Sep 17 00:00:00 2001 From: Harry Mellor <19981378+hmellor@users.noreply.github.com> Date: Mon, 13 Jan 2025 18:17:15 +0100 Subject: [PATCH 07/86] Remove unreachable code Signed-off-by: Harry Mellor <19981378+hmellor@users.noreply.github.com> --- vllm/multimodal/processing.py | 18 ------------------ 1 file changed, 18 deletions(-) diff --git a/vllm/multimodal/processing.py b/vllm/multimodal/processing.py index 7e6cd2f956315..8b47dfb07387f 100644 --- a/vllm/multimodal/processing.py +++ b/vllm/multimodal/processing.py @@ -752,24 +752,6 @@ def _apply_hf_processor_text_mm( ) return prompt_ids, mm_kwargs - - try: - hf_inputs = hf_processor( - text=prompt, # type: ignore - **processor_data, - **mm_processor_kwargs, - return_tensors="pt", - ) - except Exception as exc: - data = dict(text=prompt, **processor_data) - - raise RuntimeError( - f"Failed to apply {type(hf_processor).__name__} " - f"on data={data} with kwargs={mm_processor_kwargs}") from exc - - hf_inputs.update(passthrough_data) - - return hf_inputs def _apply_hf_processor_text_only(self, prompt_text: str) -> list[int]: """ From 882ef81921b67658313ca2adfb5583bbe808d8c1 Mon Sep 17 00:00:00 2001 From: Harry Mellor <19981378+hmellor@users.noreply.github.com> Date: Mon, 13 Jan 2025 18:46:46 +0100 Subject: [PATCH 08/86] Remove dead code Signed-off-by: Harry Mellor <19981378+hmellor@users.noreply.github.com> --- vllm/model_executor/model_loader/utils.py | 4 ---- 1 file changed, 4 deletions(-) diff --git a/vllm/model_executor/model_loader/utils.py b/vllm/model_executor/model_loader/utils.py index 010a1fc486140..73f1938f5360f 100644 --- a/vllm/model_executor/model_loader/utils.py +++ b/vllm/model_executor/model_loader/utils.py @@ -39,10 +39,6 @@ def get_model_architecture( # FIXME(Isotr0py): This is a temporary hack to enable transformers fallback. architectures = ["TransformersModel"] - model_cls, arch = ModelRegistry.resolve_model_cls(architectures) - if model_config.runner_type == "pooling": - model_cls = as_embedding_model(model_cls) - model_cls, arch = ModelRegistry.resolve_model_cls(architectures) if model_config.task == "embed": model_cls = as_embedding_model(model_cls) From f254f2c612d15ebd4099d5af74cd60d6f95f35a2 Mon Sep 17 00:00:00 2001 From: Harry Mellor <19981378+hmellor@users.noreply.github.com> Date: Mon, 13 Jan 2025 19:43:44 +0100 Subject: [PATCH 09/86] Update to latest attention interface Signed-off-by: Harry Mellor <19981378+hmellor@users.noreply.github.com> --- vllm/model_executor/models/transformers.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/vllm/model_executor/models/transformers.py b/vllm/model_executor/models/transformers.py index b0324f0e871cc..78b56c6f203ae 100644 --- a/vllm/model_executor/models/transformers.py +++ b/vllm/model_executor/models/transformers.py @@ -65,7 +65,7 @@ def vllm_flash_attention_forward( hidden = query.shape[-2] query, key, value = [x.transpose(1,2) for x in (query, key, value)] query, key, value = [x.reshape(hidden,-1) for x in (query, key, value)] - return attention_interface(query, key, value, kv_cache=kv_caches[layer_idx],attn_metadata=attn_metadata), None + return attention_interface(query, key, value, _kv_cache=kv_caches[layer_idx],_attn_metadata=attn_metadata), None ALL_ATTENTION_FUNCTIONS["vllm"] = vllm_flash_attention_forward From 5a1a8335621de414c7a27a638b0441fd2b45feca Mon Sep 17 00:00:00 2001 From: Harry Mellor <19981378+hmellor@users.noreply.github.com> Date: Mon, 13 Jan 2025 19:46:05 +0100 Subject: [PATCH 10/86] Always try to load `TransformersModel` if model isn't explicitly supported Signed-off-by: Harry Mellor <19981378+hmellor@users.noreply.github.com> --- vllm/model_executor/model_loader/utils.py | 3 --- vllm/model_executor/models/registry.py | 4 ++-- 2 files changed, 2 insertions(+), 5 deletions(-) diff --git a/vllm/model_executor/model_loader/utils.py b/vllm/model_executor/model_loader/utils.py index 73f1938f5360f..44978a55e072d 100644 --- a/vllm/model_executor/model_loader/utils.py +++ b/vllm/model_executor/model_loader/utils.py @@ -35,9 +35,6 @@ def get_model_architecture( and model_config.quantization not in mixtral_supported and "MixtralForCausalLM" in architectures): architectures = ["QuantMixtralForCausalLM"] - - # FIXME(Isotr0py): This is a temporary hack to enable transformers fallback. - architectures = ["TransformersModel"] model_cls, arch = ModelRegistry.resolve_model_cls(architectures) if model_config.task == "embed": diff --git a/vllm/model_executor/models/registry.py b/vllm/model_executor/models/registry.py index ed002842d4b53..b91fa4e4a0217 100644 --- a/vllm/model_executor/models/registry.py +++ b/vllm/model_executor/models/registry.py @@ -355,13 +355,13 @@ def _raise_for_unsupported(self, architectures: List[str]): def _try_load_model_cls(self, model_arch: str) -> Optional[Type[nn.Module]]: if model_arch not in self.models: - return None + return _try_load_model_cls(model_arch, self.models["TransformersModel"]) return _try_load_model_cls(model_arch, self.models[model_arch]) def _try_inspect_model_cls(self, model_arch: str) -> Optional[_ModelInfo]: if model_arch not in self.models: - return None + return _try_inspect_model_cls(model_arch, self.models["TransformersModel"]) return _try_inspect_model_cls(model_arch, self.models[model_arch]) From b7de34df4b675c9753c5e97d21d2e78d5ef998c1 Mon Sep 17 00:00:00 2001 From: Harry Mellor <19981378+hmellor@users.noreply.github.com> Date: Mon, 13 Jan 2025 19:46:33 +0100 Subject: [PATCH 11/86] Temporarily remove Llama from registry Signed-off-by: Harry Mellor <19981378+hmellor@users.noreply.github.com> --- vllm/model_executor/models/registry.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/vllm/model_executor/models/registry.py b/vllm/model_executor/models/registry.py index b91fa4e4a0217..74faea095d65b 100644 --- a/vllm/model_executor/models/registry.py +++ b/vllm/model_executor/models/registry.py @@ -62,7 +62,7 @@ "InternLM2VEForCausalLM": ("internlm2_ve", "InternLM2VEForCausalLM"), "JAISLMHeadModel": ("jais", "JAISLMHeadModel"), "JambaForCausalLM": ("jamba", "JambaForCausalLM"), - "LlamaForCausalLM": ("llama", "LlamaForCausalLM"), + # "LlamaForCausalLM": ("llama", "LlamaForCausalLM"), # For decapoda-research/llama-* "LLaMAForCausalLM": ("llama", "LlamaForCausalLM"), "MambaForCausalLM": ("mamba", "MambaForCausalLM"), From 49c4616204e62a202582f6f00c6f0f26840dca3c Mon Sep 17 00:00:00 2001 From: Harry Mellor <19981378+hmellor@users.noreply.github.com> Date: Thu, 16 Jan 2025 18:46:32 +0100 Subject: [PATCH 12/86] Deduplicate registry code slightly Signed-off-by: Harry Mellor <19981378+hmellor@users.noreply.github.com> --- vllm/model_executor/models/registry.py | 20 +++++++++++++------- 1 file changed, 13 insertions(+), 7 deletions(-) diff --git a/vllm/model_executor/models/registry.py b/vllm/model_executor/models/registry.py index 74faea095d65b..a7029c817d46b 100644 --- a/vllm/model_executor/models/registry.py +++ b/vllm/model_executor/models/registry.py @@ -179,6 +179,10 @@ "MedusaModel": ("medusa", "Medusa"), "MLPSpeculatorPreTrainedModel": ("mlp_speculator", "MLPSpeculator"), } + +_FALLBACK_MODEL = { + "TransformersModel": ("transformers", "TransformersModel"), +} # yapf: enable _VLLM_MODELS = { @@ -187,7 +191,7 @@ **_CROSS_ENCODER_MODELS, **_MULTIMODAL_MODELS, **_SPECULATIVE_DECODING_MODELS, - "TransformersModel": ("transformers", "TransformersModel"), + **_FALLBACK_MODEL, } @@ -354,16 +358,18 @@ def _raise_for_unsupported(self, architectures: List[str]): def _try_load_model_cls(self, model_arch: str) -> Optional[Type[nn.Module]]: - if model_arch not in self.models: - return _try_load_model_cls(model_arch, self.models["TransformersModel"]) + model = self.models.get(model_arch) + if model is None: + model = self.models[next(iter(_FALLBACK_MODEL))] - return _try_load_model_cls(model_arch, self.models[model_arch]) + return _try_load_model_cls(model_arch, model) def _try_inspect_model_cls(self, model_arch: str) -> Optional[_ModelInfo]: - if model_arch not in self.models: - return _try_inspect_model_cls(model_arch, self.models["TransformersModel"]) + model = self.models.get(model_arch) + if model is None: + model = self.models[next(iter(_FALLBACK_MODEL))] - return _try_inspect_model_cls(model_arch, self.models[model_arch]) + return _try_inspect_model_cls(model_arch, model) def _normalize_archs( self, From 071246df9f744f43677040ed6ab3a0501f83cc4a Mon Sep 17 00:00:00 2001 From: Harry Mellor <19981378+hmellor@users.noreply.github.com> Date: Thu, 16 Jan 2025 18:46:49 +0100 Subject: [PATCH 13/86] Fix profiling of Attentions Signed-off-by: Harry Mellor <19981378+hmellor@users.noreply.github.com> --- vllm/model_executor/models/transformers.py | 35 +++++++++++++--------- 1 file changed, 21 insertions(+), 14 deletions(-) diff --git a/vllm/model_executor/models/transformers.py b/vllm/model_executor/models/transformers.py index 78b56c6f203ae..b4340bc1af41c 100644 --- a/vllm/model_executor/models/transformers.py +++ b/vllm/model_executor/models/transformers.py @@ -58,14 +58,14 @@ def vllm_flash_attention_forward( query_length: int=None, kv_caches: torch.Tensor=None, attn_metadata: AttentionMetadata=None, - attention_interface=None, + attention_instances=None, **kwargs ): layer_idx = _module.layer_idx hidden = query.shape[-2] query, key, value = [x.transpose(1,2) for x in (query, key, value)] query, key, value = [x.reshape(hidden,-1) for x in (query, key, value)] - return attention_interface(query, key, value, _kv_cache=kv_caches[layer_idx],_attn_metadata=attn_metadata), None + return attention_instances[layer_idx].forward(query, key, value, _kv_cache=kv_caches[layer_idx],_attn_metadata=attn_metadata), None ALL_ATTENTION_FUNCTIONS["vllm"] = vllm_flash_attention_forward @@ -116,23 +116,30 @@ def __init__( super().__init__() config = vllm_config.model_config.hf_config + cache_config = vllm_config.cache_config + quant_config = vllm_config.quant_config self.config = config self.vocab_size = config.vocab_size self.unpadded_vocab_size = config.vocab_size - self.tp_size = get_tensor_model_parallel_world_size() - self.attention_interface = Attention( - divide(config.num_attention_heads, self.tp_size), - config.head_dim, - config.head_dim**-0.5, # ish, the sacling is different for every attn layer - num_kv_heads=divide(config.num_key_value_heads, self.tp_size), - cache_config=vllm_config.cache_config, - quant_config=vllm_config.quant_config, - ) - config._attn_implementation_internal="vllm" + tp_size = get_tensor_model_parallel_world_size() + # Assumes 1 attention operation per hidden layer + self.attention_instances = [ + Attention( + divide(config.num_attention_heads, tp_size), + config.head_dim, + config.head_dim**-0.5, # ish, the sacling is different for every attn layer + num_kv_heads=divide(config.num_key_value_heads, tp_size), + cache_config=cache_config, + quant_config=quant_config, + prefix=maybe_prefix(prefix, f"{i}.attn") + ) for i in range(config.num_hidden_layers) + + ] + self.config._attn_implementation_internal="vllm" self.tp_plan = self.config.base_model_tp_plan - self.model = AutoModel.from_config(config) + self.model = AutoModel.from_config(self.config) self.tensor_parallelize(self.model) # TODO(Isotr0py): Find a better method to parallelize VocabEmbedding @@ -192,7 +199,7 @@ def forward( position_ids=positions[None,...], kv_caches=kv_caches, attn_metadata=attn_metadata, intermediate_tensors=intermediate_tensors, - attention_interface = self.attention_interface.forward, + attention_instances = self.attention_instances, return_dict=False )[0][0,...] # we remove batch dimension for now return model_output From 61905916d2662bdf1d7afc7c207161dee2eac6a8 Mon Sep 17 00:00:00 2001 From: Harry Mellor <19981378+hmellor@users.noreply.github.com> Date: Thu, 16 Jan 2025 18:47:40 +0100 Subject: [PATCH 14/86] Run `./format.sh` on `transformers.py` Signed-off-by: Harry Mellor <19981378+hmellor@users.noreply.github.com> --- vllm/model_executor/models/transformers.py | 90 +++++++++++----------- 1 file changed, 45 insertions(+), 45 deletions(-) diff --git a/vllm/model_executor/models/transformers.py b/vllm/model_executor/models/transformers.py index b4340bc1af41c..653d21f3aae83 100644 --- a/vllm/model_executor/models/transformers.py +++ b/vllm/model_executor/models/transformers.py @@ -11,7 +11,6 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - """Wrapper around `transformers` models""" import re from typing import Dict, Iterable, List, Optional, Set, Tuple, TypedDict, Union @@ -34,7 +33,6 @@ from vllm.model_executor.sampling_metadata import SamplingMetadata from vllm.sequence import IntermediateTensors - from .utils import maybe_prefix @@ -49,23 +47,26 @@ class VllmKwargsForCausalLM(TypedDict, total=False): attn_metadata: AttentionMetadata -def vllm_flash_attention_forward( - _module, - query: torch.Tensor, - key: torch.Tensor, - value: torch.Tensor, - attention_mask: torch.Tensor, - query_length: int=None, - kv_caches: torch.Tensor=None, - attn_metadata: AttentionMetadata=None, - attention_instances=None, - **kwargs - ): +def vllm_flash_attention_forward(_module, + query: torch.Tensor, + key: torch.Tensor, + value: torch.Tensor, + attention_mask: torch.Tensor, + query_length: int = None, + kv_caches: torch.Tensor = None, + attn_metadata: AttentionMetadata = None, + attention_instances=None, + **kwargs): layer_idx = _module.layer_idx hidden = query.shape[-2] - query, key, value = [x.transpose(1,2) for x in (query, key, value)] - query, key, value = [x.reshape(hidden,-1) for x in (query, key, value)] - return attention_instances[layer_idx].forward(query, key, value, _kv_cache=kv_caches[layer_idx],_attn_metadata=attn_metadata), None + query, key, value = [x.transpose(1, 2) for x in (query, key, value)] + query, key, value = [x.reshape(hidden, -1) for x in (query, key, value)] + return attention_instances[layer_idx].forward( + query, + key, + value, + _kv_cache=kv_caches[layer_idx], + _attn_metadata=attn_metadata), None ALL_ATTENTION_FUNCTIONS["vllm"] = vllm_flash_attention_forward @@ -74,10 +75,13 @@ def vllm_flash_attention_forward( # Linear Layer that is compatiable with transformers internal forward # TODO: This is a temporary solution, we should find a better way to intergrate class HFColumnParallelLinear(ColumnParallelLinear): + def forward(self, input: torch.Tensor) -> torch.Tensor: return super().forward(input)[0] + class HFRowParallelLinear(RowParallelLinear): + def forward(self, input: torch.Tensor) -> torch.Tensor: return super().forward(input)[0] @@ -89,7 +93,8 @@ def replace_tp_linear_class(orig_module: nn.Linear, style: str): """ if not isinstance(style, str): - raise ValueError(f"Unsupported parallel style type {type(style)}, expected str") + raise ValueError( + f"Unsupported parallel style type {type(style)}, expected str") input_size = orig_module.in_features output_size = orig_module.out_features @@ -107,12 +112,7 @@ def replace_tp_linear_class(orig_module: nn.Linear, style: str): class TransformersModel(nn.Module): embedding_padding_modules = ["lm_head"] - def __init__( - self, - *, - vllm_config: VllmConfig, - prefix: str = "" - ) -> None: + def __init__(self, *, vllm_config: VllmConfig, prefix: str = "") -> None: super().__init__() config = vllm_config.model_config.hf_config @@ -128,15 +128,15 @@ def __init__( Attention( divide(config.num_attention_heads, tp_size), config.head_dim, - config.head_dim**-0.5, # ish, the sacling is different for every attn layer + config.head_dim** + -0.5, # ish, the sacling is different for every attn layer num_kv_heads=divide(config.num_key_value_heads, tp_size), cache_config=cache_config, quant_config=quant_config, - prefix=maybe_prefix(prefix, f"{i}.attn") - ) for i in range(config.num_hidden_layers) - + prefix=maybe_prefix(prefix, f"{i}.attn")) + for i in range(config.num_hidden_layers) ] - self.config._attn_implementation_internal="vllm" + self.config._attn_implementation_internal = "vllm" self.tp_plan = self.config.base_model_tp_plan self.model = AutoModel.from_config(self.config) @@ -152,31 +152,30 @@ def __init__( self.lm_head = ParallelLMHead(config.vocab_size, config.hidden_size, quant_config=None, - prefix=maybe_prefix( - prefix, "lm_head")) + prefix=maybe_prefix(prefix, "lm_head")) if config.tie_word_embeddings: self.lm_head.weight = self.model.get_input_embeddings().weight logit_scale = getattr(config, "logit_scale", 1.0) self.logits_processor = LogitsProcessor(self.unpadded_vocab_size, - config.vocab_size, - logit_scale) + config.vocab_size, logit_scale) self.sampler = get_sampler() - - def tensor_parallelize(self, module: nn.Module, prefix: str =""): + def tensor_parallelize(self, module: nn.Module, prefix: str = ""): for child_name, child_module in module.named_children(): qual_name = prefix + child_name for pattern, style in self.tp_plan.items(): - if re.match(pattern, qual_name) and isinstance(child_module, nn.Linear): + if re.match(pattern, qual_name) and isinstance( + child_module, nn.Linear): new_module = replace_tp_linear_class(child_module, style) print(f"{qual_name}: {child_module} -> {new_module}") setattr(module, child_name, new_module) else: self.tensor_parallelize(child_module, prefix=f"{qual_name}.") - - def _autoset_attn_implementation(self, config, + def _autoset_attn_implementation( + self, + config, use_flash_attention_2: bool = False, torch_dtype: Optional[torch.dtype] = None, device_map: Optional[Union[str, Dict[str, int]]] = None, @@ -195,13 +194,14 @@ def forward( intermediate_tensors: Optional[IntermediateTensors] = None, ) -> Union[torch.Tensor, IntermediateTensors]: model_output = self.model( - input_ids[None,...], use_cache=False, - position_ids=positions[None,...], - kv_caches=kv_caches, attn_metadata=attn_metadata, + input_ids[None, ...], + use_cache=False, + position_ids=positions[None, ...], + kv_caches=kv_caches, + attn_metadata=attn_metadata, intermediate_tensors=intermediate_tensors, - attention_instances = self.attention_instances, - return_dict=False - )[0][0,...] # we remove batch dimension for now + attention_instances=self.attention_instances, + return_dict=False)[0][0, ...] # we remove batch dimension for now return model_output def compute_logits( @@ -218,7 +218,7 @@ def sample(self, logits: torch.Tensor, next_tokens = self.sampler(logits, sampling_metadata) return next_tokens - + def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]) -> Set[str]: params_dict = dict(self.named_parameters()) From 7ae8262c9a495b4f2b4ae037e60ba2655517940b Mon Sep 17 00:00:00 2001 From: Harry Mellor <19981378+hmellor@users.noreply.github.com> Date: Thu, 16 Jan 2025 18:51:59 +0100 Subject: [PATCH 15/86] Fix spelling Signed-off-by: Harry Mellor <19981378+hmellor@users.noreply.github.com> --- vllm/model_executor/models/transformers.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/vllm/model_executor/models/transformers.py b/vllm/model_executor/models/transformers.py index 653d21f3aae83..d4ec24c437223 100644 --- a/vllm/model_executor/models/transformers.py +++ b/vllm/model_executor/models/transformers.py @@ -72,8 +72,8 @@ def vllm_flash_attention_forward(_module, ALL_ATTENTION_FUNCTIONS["vllm"] = vllm_flash_attention_forward -# Linear Layer that is compatiable with transformers internal forward -# TODO: This is a temporary solution, we should find a better way to intergrate +# Linear Layer that is compatible with transformers internal forward +# TODO: This is a temporary solution, we should find a better way to integrate class HFColumnParallelLinear(ColumnParallelLinear): def forward(self, input: torch.Tensor) -> torch.Tensor: @@ -129,7 +129,7 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = "") -> None: divide(config.num_attention_heads, tp_size), config.head_dim, config.head_dim** - -0.5, # ish, the sacling is different for every attn layer + -0.5, # ish, the scaling is different for every attn layer num_kv_heads=divide(config.num_key_value_heads, tp_size), cache_config=cache_config, quant_config=quant_config, From 988586d5c17b66a2de2341379c7a56e52ea478c9 Mon Sep 17 00:00:00 2001 From: Harry Mellor <19981378+hmellor@users.noreply.github.com> Date: Thu, 16 Jan 2025 19:08:57 +0100 Subject: [PATCH 16/86] Undo changes to `chat.py` Signed-off-by: Harry Mellor <19981378+hmellor@users.noreply.github.com> --- examples/offline_inference/chat.py | 72 +++++++++++++----------------- 1 file changed, 31 insertions(+), 41 deletions(-) diff --git a/examples/offline_inference/chat.py b/examples/offline_inference/chat.py index de565eb47819f..8814f4d7bef0d 100644 --- a/examples/offline_inference/chat.py +++ b/examples/offline_inference/chat.py @@ -1,7 +1,7 @@ from vllm import LLM, SamplingParams -# llm = LLM(model="meta-llama/Llama-3.2-1B-Instruct") -sampling_params = SamplingParams(temperature=0.5, top_k=8, max_tokens=300) +llm = LLM(model="meta-llama/Meta-Llama-3-8B-Instruct") +sampling_params = SamplingParams(temperature=0.5) def print_outputs(outputs): @@ -14,14 +14,9 @@ def print_outputs(outputs): print("=" * 80) -# outputs = llm.generate(["The theory of relativity states that"], -# sampling_params=sampling_params, -# use_tqdm=False) -# print_outputs(outputs) +# In this script, we demonstrate how to pass input to the chat method: -# # In this script, we demonstrate how to pass input to the chat method: -llm = LLM(model="meta-llama/Llama-3.1-8B-Instruct", tensor_parallel_size=1) -conversation1 = [ +conversation = [ { "role": "system", "content": "You are a helpful assistant" @@ -36,45 +31,40 @@ def print_outputs(outputs): }, { "role": "user", - "content": "Write a short essay about the importance of higher education.", + "content": "Write an essay about the importance of higher education.", }, ] - -conversations = [conversation1 for _ in range(100)] -import time - -start = time.time() -outputs = llm.chat(conversations,sampling_params=sampling_params,use_tqdm=True) +outputs = llm.chat(conversation, + sampling_params=sampling_params, + use_tqdm=False) print_outputs(outputs) -end = time.time() -print(end-start) # You can run batch inference with llm.chat API -# conversation2 = [ -# { -# "role": "system", -# "content": "You are a helpful assistant" -# }, -# { -# "role": "user", -# "content": "Hello" -# }, -# { -# "role": "assistant", -# "content": "Hello! How can I assist you today?" -# }, -# { -# "role": "user", -# "content": "Write an essay about the importance of playing video games!", -# }, -# ] -# conversations = [conversation1, conversation2] +conversation = [ + { + "role": "system", + "content": "You are a helpful assistant" + }, + { + "role": "user", + "content": "Hello" + }, + { + "role": "assistant", + "content": "Hello! How can I assist you today?" + }, + { + "role": "user", + "content": "Write an essay about the importance of higher education.", + }, +] +conversations = [conversation for _ in range(10)] # We turn on tqdm progress bar to verify it's indeed running batch inference -# outputs = llm.chat(messages=conversations, -# sampling_params=sampling_params, -# use_tqdm=True) -# print_outputs(outputs) +outputs = llm.chat(messages=conversations, + sampling_params=sampling_params, + use_tqdm=True) +print_outputs(outputs) # A chat template can be optionally supplied. # If not, the model will use its default chat template. From 5313551146e497a07dfbe5031f74fb19ae7df81f Mon Sep 17 00:00:00 2001 From: Arthur Zucker Date: Fri, 17 Jan 2025 09:11:26 +0100 Subject: [PATCH 17/86] tests + md --- docs/source/models/supported_models.md | 13 +++++++++++++ tests/models/decoder_only/language/test_models.py | 4 ++++ 2 files changed, 17 insertions(+) diff --git a/docs/source/models/supported_models.md b/docs/source/models/supported_models.md index 642ef3c9655b8..1c6e3490e047d 100644 --- a/docs/source/models/supported_models.md +++ b/docs/source/models/supported_models.md @@ -40,6 +40,19 @@ If vLLM successfully returns text (for generative models) or hidden states (for Otherwise, please refer to [Adding a New Model](#new-model) for instructions on how to implement your model in vLLM. Alternatively, you can [open an issue on GitHub](https://github.com/vllm-project/vllm/issues/new/choose) to request vLLM support. +### Transformers fallback +After the merge of #11330, `vllm` can fallback to models that are avaialble in `transformers`. This does not work for all models for now, but most decoder language models are supported, and vision language model support is planned! + +To check if the backend is `transformers`, you can simply do this: + +```python +from vllm import LLM +llm = LLM(model=..., task="generate") # Name or path of your model +print(llm.model.__class__) +``` + +If it is `TransformersModel` then it means it's based on `transformers`! + ### ModelScope To use models from [ModelScope](https://www.modelscope.cn) instead of HuggingFace Hub, set an environment variable: diff --git a/tests/models/decoder_only/language/test_models.py b/tests/models/decoder_only/language/test_models.py index 4e110366a09f3..3610f40e8edc3 100644 --- a/tests/models/decoder_only/language/test_models.py +++ b/tests/models/decoder_only/language/test_models.py @@ -51,6 +51,10 @@ pytest.param( "ehristoforu/Falcon3-MoE-2x7B-Insruct", # mixtral marks=[pytest.mark.cpu_model], + ), + pytest.param( + "kyutai/helium-1-preview-2b", # transformers backend + marks=[pytest.mark.core_model], ) ]) @pytest.mark.parametrize("dtype", ["half"]) From f127a031e577658186fea9627a5f5cbe2d6994e5 Mon Sep 17 00:00:00 2001 From: Arthur Zucker Date: Fri, 17 Jan 2025 09:30:08 +0100 Subject: [PATCH 18/86] test helium --- examples/offline_inference/distributed.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/offline_inference/distributed.py b/examples/offline_inference/distributed.py index 677127844ccdd..35f8e494c9815 100644 --- a/examples/offline_inference/distributed.py +++ b/examples/offline_inference/distributed.py @@ -32,7 +32,7 @@ class LLMPredictor: def __init__(self): # Create an LLM. - self.llm = LLM(model="meta-llama/Llama-2-7b-chat-hf", + self.llm = LLM(model="kyutai/helium-1-preview-2b", tensor_parallel_size=tensor_parallel_size) def __call__(self, batch: Dict[str, np.ndarray]) -> Dict[str, list]: From 9baefd2ccc6d1a70b05444f27b25fa061d7c0698 Mon Sep 17 00:00:00 2001 From: Arthur Zucker Date: Fri, 17 Jan 2025 11:19:45 +0100 Subject: [PATCH 19/86] fix dtype issue --- vllm/model_executor/models/transformers.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/vllm/model_executor/models/transformers.py b/vllm/model_executor/models/transformers.py index d4ec24c437223..87f0f153e07ce 100644 --- a/vllm/model_executor/models/transformers.py +++ b/vllm/model_executor/models/transformers.py @@ -139,7 +139,7 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = "") -> None: self.config._attn_implementation_internal = "vllm" self.tp_plan = self.config.base_model_tp_plan - self.model = AutoModel.from_config(self.config) + self.model = AutoModel.from_config(self.config, torch_dtype=vllm_config.model_config.dtype) self.tensor_parallelize(self.model) # TODO(Isotr0py): Find a better method to parallelize VocabEmbedding @@ -151,7 +151,7 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = "") -> None: # ) self.lm_head = ParallelLMHead(config.vocab_size, config.hidden_size, - quant_config=None, + quant_config=quant_config, prefix=maybe_prefix(prefix, "lm_head")) if config.tie_word_embeddings: self.lm_head.weight = self.model.get_input_embeddings().weight @@ -162,6 +162,9 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = "") -> None: self.sampler = get_sampler() def tensor_parallelize(self, module: nn.Module, prefix: str = ""): + if self.tp_plan is None: + raise ValueError("Trying to run tensor parallelization but the model does not support it yet!") + for child_name, child_module in module.named_children(): qual_name = prefix + child_name for pattern, style in self.tp_plan.items(): From aff205a21c4304b543c4aab28194100e69c4acc1 Mon Sep 17 00:00:00 2001 From: Harry Mellor <19981378+hmellor@users.noreply.github.com> Date: Fri, 17 Jan 2025 12:49:00 +0100 Subject: [PATCH 20/86] Make model implementation configurable Signed-off-by: Harry Mellor <19981378+hmellor@users.noreply.github.com> --- vllm/config.py | 16 ++++++++++++- vllm/engine/arg_utils.py | 24 +++++++++++++++---- vllm/model_executor/model_loader/utils.py | 29 ++++++++++++++++++++++- vllm/model_executor/models/registry.py | 8 +++---- 4 files changed, 66 insertions(+), 11 deletions(-) diff --git a/vllm/config.py b/vllm/config.py index 59b509d5a961e..2cf62b03d25ba 100644 --- a/vllm/config.py +++ b/vllm/config.py @@ -80,6 +80,12 @@ def compute_hash(self) -> str: ... +class ModelImpl(str, enum.Enum): + AUTO = "auto" + VLLM = "vllm" + TRANSFORMERS = "transformers" + + class ModelConfig: """Configuration for the model. @@ -169,6 +175,12 @@ class ModelConfig: `logits_processors` extra completion argument. Defaults to None, which allows no processors. generation_config: Configuration parameter file for generation. + model_impl: Which implementation of the model to use: + "auto" will try to use the vLLM implementation if it exists and + fall back to the Transformers implementation if no vLLM + implementation is available. + "vllm" will use the vLLM model implementation. + "transformers" will use the Transformers model implementation. """ def compute_hash(self) -> str: @@ -228,7 +240,8 @@ def __init__(self, override_neuron_config: Optional[Dict[str, Any]] = None, override_pooler_config: Optional["PoolerConfig"] = None, logits_processor_pattern: Optional[str] = None, - generation_config: Optional[str] = None) -> None: + generation_config: Optional[str] = None, + model_impl: Union[str, ModelImpl] = ModelImpl.AUTO) -> None: self.model = model self.tokenizer = tokenizer self.tokenizer_mode = tokenizer_mode @@ -239,6 +252,7 @@ def __init__(self, self.code_revision = code_revision self.rope_scaling = rope_scaling self.rope_theta = rope_theta + self.model_impl = model_impl if hf_overrides is None: hf_overrides = {} diff --git a/vllm/engine/arg_utils.py b/vllm/engine/arg_utils.py index 0850bab6bb7e1..0a4541b498138 100644 --- a/vllm/engine/arg_utils.py +++ b/vllm/engine/arg_utils.py @@ -11,10 +11,10 @@ from vllm.config import (CacheConfig, CompilationConfig, ConfigFormat, DecodingConfig, DeviceConfig, HfOverrides, KVTransferConfig, LoadConfig, LoadFormat, LoRAConfig, - ModelConfig, ObservabilityConfig, ParallelConfig, - PoolerConfig, PromptAdapterConfig, SchedulerConfig, - SpeculativeConfig, TaskOption, TokenizerPoolConfig, - VllmConfig) + ModelConfig, ModelImpl, ObservabilityConfig, + ParallelConfig, PoolerConfig, PromptAdapterConfig, + SchedulerConfig, SpeculativeConfig, TaskOption, + TokenizerPoolConfig, VllmConfig) from vllm.executor.executor_base import ExecutorBase from vllm.logger import init_logger from vllm.model_executor.layers.quantization import QUANTIZATION_METHODS @@ -197,6 +197,7 @@ class EngineArgs: kv_transfer_config: Optional[KVTransferConfig] = None generation_config: Optional[str] = None + model_impl: str = "auto" def __post_init__(self): if not self.tokenizer: @@ -385,6 +386,18 @@ def add_cli_args(parser: FlexibleArgumentParser) -> FlexibleArgumentParser: 'qualified names that can be passed with the `logits_processors` ' 'extra completion argument. Defaults to None, which allows no ' 'processors.') + parser.add_argument( + '--model-impl', + type=str, + default=EngineArgs.model_impl, + choices=[f.value for f in ModelImpl], + help='Which implementation of the model to use.\n\n' + '* "auto" will try to use the vLLM implementation if it ' + 'exists and fall back to the Transformers implementation if ' + 'no vLLM implementation is available.\n' + '* "vllm" will use the vLLM model implementation.\n' + '* "transformers" will use the Transformers model ' + 'implementation.\n') # Parallel arguments parser.add_argument( '--distributed-executor-backend', @@ -999,7 +1012,8 @@ def create_model_config(self) -> ModelConfig: override_neuron_config=self.override_neuron_config, override_pooler_config=self.override_pooler_config, logits_processor_pattern=self.logits_processor_pattern, - generation_config=self.generation_config) + generation_config=self.generation_config, + model_impl=self.model_impl) def create_load_config(self) -> LoadConfig: return LoadConfig( diff --git a/vllm/model_executor/model_loader/utils.py b/vllm/model_executor/model_loader/utils.py index 44978a55e072d..4ac49fdb0b9ef 100644 --- a/vllm/model_executor/model_loader/utils.py +++ b/vllm/model_executor/model_loader/utils.py @@ -3,14 +3,18 @@ from typing import Tuple, Type import torch +import transformers from torch import nn -from vllm.config import ModelConfig +from vllm.config import ModelConfig, ModelImpl +from vllm.logger import init_logger from vllm.model_executor.models import ModelRegistry from vllm.model_executor.models.adapters import (as_classification_model, as_embedding_model, as_reward_model) +logger = init_logger(__name__) + @contextlib.contextmanager def set_default_torch_dtype(dtype: torch.dtype): @@ -21,6 +25,10 @@ def set_default_torch_dtype(dtype: torch.dtype): torch.set_default_dtype(old_dtype) +def is_transformers_impl_compatible(arch: str) -> bool: + return getattr(transformers, arch)._supports_flex_attn + + def get_model_architecture( model_config: ModelConfig) -> Tuple[Type[nn.Module], str]: architectures = getattr(model_config.hf_config, "architectures", []) @@ -36,6 +44,25 @@ def get_model_architecture( and "MixtralForCausalLM" in architectures): architectures = ["QuantMixtralForCausalLM"] + vllm_supported_archs = ModelRegistry.get_supported_archs() + for i, arch in enumerate(architectures): + if model_config.model_impl == ModelImpl.TRANSFORMERS: + if not is_transformers_impl_compatible: + raise ValueError( + "The Transformers implementation of %s is not compatible " + "with vLLM.", arch) + architectures[i] = "TransformersModel" + if (model_config.model_impl == ModelImpl.AUTO + and arch not in vllm_supported_archs): + if not is_transformers_impl_compatible: + raise ValueError( + "%s has no vLLM implementation and the Transformers " + "implementationis not compatible with vLLM.", arch) + logger.info( + "%s has no vLLM implementation, falling back to use " + "Transformers implementation", arch) + architectures[i] = "TransformersModel" + model_cls, arch = ModelRegistry.resolve_model_cls(architectures) if model_config.task == "embed": model_cls = as_embedding_model(model_cls) diff --git a/vllm/model_executor/models/registry.py b/vllm/model_executor/models/registry.py index a7029c817d46b..7d2d473edd3e0 100644 --- a/vllm/model_executor/models/registry.py +++ b/vllm/model_executor/models/registry.py @@ -358,13 +358,13 @@ def _raise_for_unsupported(self, architectures: List[str]): def _try_load_model_cls(self, model_arch: str) -> Optional[Type[nn.Module]]: - model = self.models.get(model_arch) - if model is None: - model = self.models[next(iter(_FALLBACK_MODEL))] + if model_arch not in self.models: + return None - return _try_load_model_cls(model_arch, model) + return _try_load_model_cls(model_arch, self.models[model_arch]) def _try_inspect_model_cls(self, model_arch: str) -> Optional[_ModelInfo]: + # A model will always be inspected even if it cannot be loaded model = self.models.get(model_arch) if model is None: model = self.models[next(iter(_FALLBACK_MODEL))] From 4efcac82b2188b98df5c822dfef694b6003d74d7 Mon Sep 17 00:00:00 2001 From: Harry Mellor <19981378+hmellor@users.noreply.github.com> Date: Fri, 17 Jan 2025 13:19:11 +0100 Subject: [PATCH 21/86] FIx previous commit Signed-off-by: Harry Mellor <19981378+hmellor@users.noreply.github.com> --- vllm/model_executor/model_loader/utils.py | 6 ++++-- vllm/model_executor/models/transformers.py | 24 ++++++++++++++-------- 2 files changed, 19 insertions(+), 11 deletions(-) diff --git a/vllm/model_executor/model_loader/utils.py b/vllm/model_executor/model_loader/utils.py index 4ac49fdb0b9ef..91bcf300865ca 100644 --- a/vllm/model_executor/model_loader/utils.py +++ b/vllm/model_executor/model_loader/utils.py @@ -46,15 +46,17 @@ def get_model_architecture( vllm_supported_archs = ModelRegistry.get_supported_archs() for i, arch in enumerate(architectures): + if arch == "TransformersModel": + continue if model_config.model_impl == ModelImpl.TRANSFORMERS: - if not is_transformers_impl_compatible: + if not is_transformers_impl_compatible(arch): raise ValueError( "The Transformers implementation of %s is not compatible " "with vLLM.", arch) architectures[i] = "TransformersModel" if (model_config.model_impl == ModelImpl.AUTO and arch not in vllm_supported_archs): - if not is_transformers_impl_compatible: + if not is_transformers_impl_compatible(arch): raise ValueError( "%s has no vLLM implementation and the Transformers " "implementationis not compatible with vLLM.", arch) diff --git a/vllm/model_executor/models/transformers.py b/vllm/model_executor/models/transformers.py index 87f0f153e07ce..48f96926f7235 100644 --- a/vllm/model_executor/models/transformers.py +++ b/vllm/model_executor/models/transformers.py @@ -24,6 +24,7 @@ from vllm.config import VllmConfig from vllm.distributed import get_tensor_model_parallel_world_size from vllm.distributed.utils import divide +from vllm.logger import init_logger from vllm.model_executor.layers.logits_processor import LogitsProcessor from vllm.model_executor.layers.linear import (ColumnParallelLinear, RowParallelLinear) @@ -35,6 +36,8 @@ from .utils import maybe_prefix +logger = init_logger(__name__) + class VllmKwargsForCausalLM(TypedDict, total=False): """ @@ -114,6 +117,7 @@ class TransformersModel(nn.Module): def __init__(self, *, vllm_config: VllmConfig, prefix: str = "") -> None: super().__init__() + logger.info("Using Transformers backend.") config = vllm_config.model_config.hf_config cache_config = vllm_config.cache_config @@ -139,16 +143,16 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = "") -> None: self.config._attn_implementation_internal = "vllm" self.tp_plan = self.config.base_model_tp_plan - self.model = AutoModel.from_config(self.config, torch_dtype=vllm_config.model_config.dtype) + self.model = AutoModel.from_config( + self.config, torch_dtype=vllm_config.model_config.dtype) self.tensor_parallelize(self.model) - # TODO(Isotr0py): Find a better method to parallelize VocabEmbedding - # self.model.embed_tokens = VocabParallelEmbedding( - # self.vocab_size, - # config.hidden_size, - # org_num_embeddings=config.vocab_size, - # quant_config=None, - # ) + self.model.embed_tokens = VocabParallelEmbedding( + self.vocab_size, + config.hidden_size, + org_num_embeddings=config.vocab_size, + quant_config=None, + ) self.lm_head = ParallelLMHead(config.vocab_size, config.hidden_size, quant_config=quant_config, @@ -163,7 +167,9 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = "") -> None: def tensor_parallelize(self, module: nn.Module, prefix: str = ""): if self.tp_plan is None: - raise ValueError("Trying to run tensor parallelization but the model does not support it yet!") + raise ValueError( + "Trying to run tensor parallelization but the model does not support it yet!" + ) for child_name, child_module in module.named_children(): qual_name = prefix + child_name From 5d3afacc19ddcf4f4c367a2b89e85c33508f930e Mon Sep 17 00:00:00 2001 From: Harry Mellor <19981378+hmellor@users.noreply.github.com> Date: Fri, 17 Jan 2025 13:24:33 +0100 Subject: [PATCH 22/86] `format.sh` Signed-off-by: Harry Mellor <19981378+hmellor@users.noreply.github.com> --- vllm/engine/arg_utils.py | 12 ++++++------ vllm/model_executor/models/transformers.py | 13 +++++++------ 2 files changed, 13 insertions(+), 12 deletions(-) diff --git a/vllm/engine/arg_utils.py b/vllm/engine/arg_utils.py index 0a4541b498138..3a813e7ab4790 100644 --- a/vllm/engine/arg_utils.py +++ b/vllm/engine/arg_utils.py @@ -392,12 +392,12 @@ def add_cli_args(parser: FlexibleArgumentParser) -> FlexibleArgumentParser: default=EngineArgs.model_impl, choices=[f.value for f in ModelImpl], help='Which implementation of the model to use.\n\n' - '* "auto" will try to use the vLLM implementation if it ' - 'exists and fall back to the Transformers implementation if ' - 'no vLLM implementation is available.\n' - '* "vllm" will use the vLLM model implementation.\n' - '* "transformers" will use the Transformers model ' - 'implementation.\n') + '* "auto" will try to use the vLLM implementation if it exists ' + 'and fall back to the Transformers implementation if no vLLM ' + 'implementation is available.\n' + '* "vllm" will use the vLLM model implementation.\n' + '* "transformers" will use the Transformers model ' + 'implementation.\n') # Parallel arguments parser.add_argument( '--distributed-executor-backend', diff --git a/vllm/model_executor/models/transformers.py b/vllm/model_executor/models/transformers.py index 48f96926f7235..f432ebd41689e 100644 --- a/vllm/model_executor/models/transformers.py +++ b/vllm/model_executor/models/transformers.py @@ -25,11 +25,12 @@ from vllm.distributed import get_tensor_model_parallel_world_size from vllm.distributed.utils import divide from vllm.logger import init_logger -from vllm.model_executor.layers.logits_processor import LogitsProcessor from vllm.model_executor.layers.linear import (ColumnParallelLinear, RowParallelLinear) -from vllm.model_executor.layers.vocab_parallel_embedding import VocabParallelEmbedding, ParallelLMHead +from vllm.model_executor.layers.logits_processor import LogitsProcessor from vllm.model_executor.layers.sampler import SamplerOutput, get_sampler +from vllm.model_executor.layers.vocab_parallel_embedding import ( + ParallelLMHead, VocabParallelEmbedding) from vllm.model_executor.model_loader.weight_utils import default_weight_loader from vllm.model_executor.sampling_metadata import SamplingMetadata from vllm.sequence import IntermediateTensors @@ -62,8 +63,8 @@ def vllm_flash_attention_forward(_module, **kwargs): layer_idx = _module.layer_idx hidden = query.shape[-2] - query, key, value = [x.transpose(1, 2) for x in (query, key, value)] - query, key, value = [x.reshape(hidden, -1) for x in (query, key, value)] + query, key, value = (x.transpose(1, 2) for x in (query, key, value)) + query, key, value = (x.reshape(hidden, -1) for x in (query, key, value)) return attention_instances[layer_idx].forward( query, key, @@ -168,8 +169,8 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = "") -> None: def tensor_parallelize(self, module: nn.Module, prefix: str = ""): if self.tp_plan is None: raise ValueError( - "Trying to run tensor parallelization but the model does not support it yet!" - ) + "Trying to run tensor parallelization but the model does not " + "support it yet!") for child_name, child_module in module.named_children(): qual_name = prefix + child_name From 20f4d485fc7a1ae3d887b8a23f8496ca63d001af Mon Sep 17 00:00:00 2001 From: Harry Mellor <19981378+hmellor@users.noreply.github.com> Date: Fri, 17 Jan 2025 13:42:27 +0100 Subject: [PATCH 23/86] Handle alternative vocab embed layer names Signed-off-by: Harry Mellor <19981378+hmellor@users.noreply.github.com> --- vllm/model_executor/models/transformers.py | 22 ++++++++++++++++------ 1 file changed, 16 insertions(+), 6 deletions(-) diff --git a/vllm/model_executor/models/transformers.py b/vllm/model_executor/models/transformers.py index f432ebd41689e..f91cdc5032280 100644 --- a/vllm/model_executor/models/transformers.py +++ b/vllm/model_executor/models/transformers.py @@ -148,12 +148,22 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = "") -> None: self.config, torch_dtype=vllm_config.model_config.dtype) self.tensor_parallelize(self.model) - self.model.embed_tokens = VocabParallelEmbedding( - self.vocab_size, - config.hidden_size, - org_num_embeddings=config.vocab_size, - quant_config=None, - ) + # Sorted by most frequently use (most frequent first) + vocab_embed_names = ( + "embed_tokens", "word_embeddings", "wte", "embed_in") + for vocab_embed_name in vocab_embed_names: + if hasattr(self.model, vocab_embed_name): + setattr( + self.model, + vocab_embed_name, + VocabParallelEmbedding( + self.vocab_size, + config.hidden_size, + org_num_embeddings=config.vocab_size, + quant_config=None, + ), + ) + break self.lm_head = ParallelLMHead(config.vocab_size, config.hidden_size, quant_config=quant_config, From 013f880687ec386e4258a3589601ee4dbb73951c Mon Sep 17 00:00:00 2001 From: Harry Mellor <19981378+hmellor@users.noreply.github.com> Date: Fri, 17 Jan 2025 15:25:56 +0100 Subject: [PATCH 24/86] Undo removel of `LlamaForCausalLM` Signed-off-by: Harry Mellor <19981378+hmellor@users.noreply.github.com> --- vllm/model_executor/models/registry.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/vllm/model_executor/models/registry.py b/vllm/model_executor/models/registry.py index 7d2d473edd3e0..45b3fb091bc4a 100644 --- a/vllm/model_executor/models/registry.py +++ b/vllm/model_executor/models/registry.py @@ -62,7 +62,7 @@ "InternLM2VEForCausalLM": ("internlm2_ve", "InternLM2VEForCausalLM"), "JAISLMHeadModel": ("jais", "JAISLMHeadModel"), "JambaForCausalLM": ("jamba", "JambaForCausalLM"), - # "LlamaForCausalLM": ("llama", "LlamaForCausalLM"), + "LlamaForCausalLM": ("llama", "LlamaForCausalLM"), # For decapoda-research/llama-* "LLaMAForCausalLM": ("llama", "LlamaForCausalLM"), "MambaForCausalLM": ("mamba", "MambaForCausalLM"), From 19dc1f8db876d0413763373871c5d231bbf27ccb Mon Sep 17 00:00:00 2001 From: Harry Mellor <19981378+hmellor@users.noreply.github.com> Date: Fri, 17 Jan 2025 15:26:17 +0100 Subject: [PATCH 25/86] Add `RMSNorm` replacement Signed-off-by: Harry Mellor <19981378+hmellor@users.noreply.github.com> --- vllm/model_executor/models/transformers.py | 73 +++++++++++++++------- 1 file changed, 49 insertions(+), 24 deletions(-) diff --git a/vllm/model_executor/models/transformers.py b/vllm/model_executor/models/transformers.py index f91cdc5032280..38ae582343a75 100644 --- a/vllm/model_executor/models/transformers.py +++ b/vllm/model_executor/models/transformers.py @@ -17,7 +17,7 @@ import torch from torch import nn -from transformers import AutoModel +from transformers import AutoModel, PreTrainedModel from transformers.modeling_utils import ALL_ATTENTION_FUNCTIONS from vllm.attention import Attention, AttentionMetadata @@ -25,6 +25,7 @@ from vllm.distributed import get_tensor_model_parallel_world_size from vllm.distributed.utils import divide from vllm.logger import init_logger +from vllm.model_executor.layers.layernorm import RMSNorm from vllm.model_executor.layers.linear import (ColumnParallelLinear, RowParallelLinear) from vllm.model_executor.layers.logits_processor import LogitsProcessor @@ -127,8 +128,14 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = "") -> None: self.vocab_size = config.vocab_size self.unpadded_vocab_size = config.vocab_size + # MLP modifications + self.tp_plan = self.config.base_model_tp_plan + self.model: PreTrainedModel = AutoModel.from_config( + self.config, torch_dtype=vllm_config.model_config.dtype) + self.tensor_parallelize(self.model) + + # Attention modifications (assumes 1 attention op per hidden layer) tp_size = get_tensor_model_parallel_world_size() - # Assumes 1 attention operation per hidden layer self.attention_instances = [ Attention( divide(config.num_attention_heads, tp_size), @@ -142,28 +149,12 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = "") -> None: for i in range(config.num_hidden_layers) ] self.config._attn_implementation_internal = "vllm" + + # Model modifications + self.replace_vocab_embed_class(self.model) + self.replace_rms_norm_class(self.model) - self.tp_plan = self.config.base_model_tp_plan - self.model = AutoModel.from_config( - self.config, torch_dtype=vllm_config.model_config.dtype) - self.tensor_parallelize(self.model) - - # Sorted by most frequently use (most frequent first) - vocab_embed_names = ( - "embed_tokens", "word_embeddings", "wte", "embed_in") - for vocab_embed_name in vocab_embed_names: - if hasattr(self.model, vocab_embed_name): - setattr( - self.model, - vocab_embed_name, - VocabParallelEmbedding( - self.vocab_size, - config.hidden_size, - org_num_embeddings=config.vocab_size, - quant_config=None, - ), - ) - break + # ForCausalLM modifications self.lm_head = ParallelLMHead(config.vocab_size, config.hidden_size, quant_config=quant_config, @@ -176,6 +167,12 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = "") -> None: config.vocab_size, logit_scale) self.sampler = get_sampler() + def log_replacement( + self, name: str, old_module: nn.Module, new_module: nn.Module): + logger.debug("%s: %s -> %s", name, + old_module.__class__.__name__, + new_module.__class__.__name__) + def tensor_parallelize(self, module: nn.Module, prefix: str = ""): if self.tp_plan is None: raise ValueError( @@ -188,11 +185,39 @@ def tensor_parallelize(self, module: nn.Module, prefix: str = ""): if re.match(pattern, qual_name) and isinstance( child_module, nn.Linear): new_module = replace_tp_linear_class(child_module, style) - print(f"{qual_name}: {child_module} -> {new_module}") setattr(module, child_name, new_module) + self.log_replacement(qual_name, child_module, new_module) else: self.tensor_parallelize(child_module, prefix=f"{qual_name}.") + def replace_vocab_embed_class(self, module: nn.Module): + # Sorted by most frequently use (most frequent first) + vocab_embed_names = ( + "embed_tokens", "word_embeddings", "wte", "embed_in") + for vocab_embed_name in vocab_embed_names: + if hasattr(module, vocab_embed_name): + old_module = getattr(module, vocab_embed_name) + new_module = VocabParallelEmbedding( + self.vocab_size, + self.config.hidden_size, + org_num_embeddings=self.config.vocab_size, + quant_config=None, + ) + setattr(module, vocab_embed_name, new_module) + self.log_replacement(vocab_embed_name, old_module, new_module) + break + + def replace_rms_norm_class(self, module: nn.Module, prefix: str = ""): + for child_name, child_module in module.named_children(): + qual_name = prefix + child_name + if "RMSNorm" in child_module.__class__.__name__: + rms_norm = RMSNorm( + self.config.hidden_size, eps=self.config.rms_norm_eps) + setattr(module, child_name, rms_norm) + self.log_replacement(qual_name, child_module, rms_norm) + self.replace_rms_norm_class(child_module, prefix=f"{qual_name}.") + + def _autoset_attn_implementation( self, config, From 7b5f1468d5eb250347ce71b458faae9764935aed Mon Sep 17 00:00:00 2001 From: Arthur Zucker Date: Fri, 17 Jan 2025 16:11:45 +0100 Subject: [PATCH 26/86] bnb and `SupportsLoRA` --- vllm/model_executor/models/transformers.py | 17 +++++++++++++++-- 1 file changed, 15 insertions(+), 2 deletions(-) diff --git a/vllm/model_executor/models/transformers.py b/vllm/model_executor/models/transformers.py index f432ebd41689e..0b4fdeb66c3eb 100644 --- a/vllm/model_executor/models/transformers.py +++ b/vllm/model_executor/models/transformers.py @@ -34,7 +34,7 @@ from vllm.model_executor.model_loader.weight_utils import default_weight_loader from vllm.model_executor.sampling_metadata import SamplingMetadata from vllm.sequence import IntermediateTensors - +from .interfaces import SupportsLoRA from .utils import maybe_prefix logger = init_logger(__name__) @@ -113,9 +113,22 @@ def replace_tp_linear_class(orig_module: nn.Linear, style: str): raise ValueError(f"Unsupported parallel style value: {style}") -class TransformersModel(nn.Module): +class TransformersModel(nn.Module, SupportsLoRA): embedding_padding_modules = ["lm_head"] + # LoRA specific attributes + supported_lora_modules = [ + "q_proj", + "k_proj", + "v_proj", + "o_proj", + "gate_proj", + "up_proj", + "down_proj", + ] + # BitandBytes specific attributes. No remapping needed + bitsandbytes_stacked_params_mapping = {} + def __init__(self, *, vllm_config: VllmConfig, prefix: str = "") -> None: super().__init__() logger.info("Using Transformers backend.") From c805f9d7a70006e6793c24e97ffb1c62310610d0 Mon Sep 17 00:00:00 2001 From: Harry Mellor <19981378+hmellor@users.noreply.github.com> Date: Fri, 17 Jan 2025 18:06:36 +0100 Subject: [PATCH 27/86] Change log Signed-off-by: Harry Mellor <19981378+hmellor@users.noreply.github.com> --- vllm/model_executor/model_loader/utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/vllm/model_executor/model_loader/utils.py b/vllm/model_executor/model_loader/utils.py index 91bcf300865ca..e5f2188bb8bc7 100644 --- a/vllm/model_executor/model_loader/utils.py +++ b/vllm/model_executor/model_loader/utils.py @@ -61,7 +61,7 @@ def get_model_architecture( "%s has no vLLM implementation and the Transformers " "implementationis not compatible with vLLM.", arch) logger.info( - "%s has no vLLM implementation, falling back to use " + "%s has no vLLM implementation, falling back to " "Transformers implementation", arch) architectures[i] = "TransformersModel" From aadfb1bbc297b0d176005f43c72ee4d35f34d1e4 Mon Sep 17 00:00:00 2001 From: Harry Mellor <19981378+hmellor@users.noreply.github.com> Date: Fri, 17 Jan 2025 19:31:42 +0100 Subject: [PATCH 28/86] Formatting Signed-off-by: Harry Mellor <19981378+hmellor@users.noreply.github.com> --- vllm/model_executor/models/transformers.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/vllm/model_executor/models/transformers.py b/vllm/model_executor/models/transformers.py index ae9f8eaa334bf..40f162eace645 100644 --- a/vllm/model_executor/models/transformers.py +++ b/vllm/model_executor/models/transformers.py @@ -35,6 +35,7 @@ from vllm.model_executor.model_loader.weight_utils import default_weight_loader from vllm.model_executor.sampling_metadata import SamplingMetadata from vllm.sequence import IntermediateTensors + from .interfaces import SupportsLoRA from .utils import maybe_prefix @@ -162,7 +163,7 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = "") -> None: for i in range(config.num_hidden_layers) ] self.config._attn_implementation_internal = "vllm" - + # Model modifications self.replace_vocab_embed_class(self.model) self.replace_rms_norm_class(self.model) @@ -205,8 +206,8 @@ def tensor_parallelize(self, module: nn.Module, prefix: str = ""): def replace_vocab_embed_class(self, module: nn.Module): # Sorted by most frequently use (most frequent first) - vocab_embed_names = ( - "embed_tokens", "word_embeddings", "wte", "embed_in") + vocab_embed_names = ("embed_tokens", "word_embeddings", "wte", + "embed_in") for vocab_embed_name in vocab_embed_names: if hasattr(module, vocab_embed_name): old_module = getattr(module, vocab_embed_name) @@ -224,13 +225,12 @@ def replace_rms_norm_class(self, module: nn.Module, prefix: str = ""): for child_name, child_module in module.named_children(): qual_name = prefix + child_name if "RMSNorm" in child_module.__class__.__name__: - rms_norm = RMSNorm( - self.config.hidden_size, eps=self.config.rms_norm_eps) + rms_norm = RMSNorm(self.config.hidden_size, + eps=self.config.rms_norm_eps) setattr(module, child_name, rms_norm) self.log_replacement(qual_name, child_module, rms_norm) self.replace_rms_norm_class(child_module, prefix=f"{qual_name}.") - def _autoset_attn_implementation( self, config, From 544ba2d4f1f93a77b5f33324bd2d68c0959bbeb6 Mon Sep 17 00:00:00 2001 From: Harry Mellor <19981378+hmellor@users.noreply.github.com> Date: Fri, 17 Jan 2025 19:33:10 +0100 Subject: [PATCH 29/86] Disable vLLM RMS Norm implementation for now Signed-off-by: Harry Mellor <19981378+hmellor@users.noreply.github.com> --- vllm/model_executor/models/transformers.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/vllm/model_executor/models/transformers.py b/vllm/model_executor/models/transformers.py index 40f162eace645..aeb67a5874ea2 100644 --- a/vllm/model_executor/models/transformers.py +++ b/vllm/model_executor/models/transformers.py @@ -166,7 +166,8 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = "") -> None: # Model modifications self.replace_vocab_embed_class(self.model) - self.replace_rms_norm_class(self.model) + # TODO: solve issue with residuals being added before/in RMSNorm ops + # self.replace_rms_norm_class(self.model) # ForCausalLM modifications self.lm_head = ParallelLMHead(config.vocab_size, From 06347f8c516311ba265f62d725fe2c8f47ee7109 Mon Sep 17 00:00:00 2001 From: Harry Mellor <19981378+hmellor@users.noreply.github.com> Date: Fri, 17 Jan 2025 19:34:36 +0100 Subject: [PATCH 30/86] Only throw TP error if user is trying to use TP Signed-off-by: Harry Mellor <19981378+hmellor@users.noreply.github.com> --- vllm/model_executor/models/transformers.py | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/vllm/model_executor/models/transformers.py b/vllm/model_executor/models/transformers.py index aeb67a5874ea2..2de412adbdd59 100644 --- a/vllm/model_executor/models/transformers.py +++ b/vllm/model_executor/models/transformers.py @@ -135,6 +135,7 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = "") -> None: super().__init__() logger.info("Using Transformers backend.") + self.vllm_config = vllm_config config = vllm_config.model_config.hf_config cache_config = vllm_config.cache_config quant_config = vllm_config.quant_config @@ -142,10 +143,10 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = "") -> None: self.vocab_size = config.vocab_size self.unpadded_vocab_size = config.vocab_size - # MLP modifications - self.tp_plan = self.config.base_model_tp_plan self.model: PreTrainedModel = AutoModel.from_config( self.config, torch_dtype=vllm_config.model_config.dtype) + + # MLP modifications self.tensor_parallelize(self.model) # Attention modifications (assumes 1 attention op per hidden layer) @@ -182,21 +183,20 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = "") -> None: config.vocab_size, logit_scale) self.sampler = get_sampler() - def log_replacement( - self, name: str, old_module: nn.Module, new_module: nn.Module): - logger.debug("%s: %s -> %s", name, - old_module.__class__.__name__, - new_module.__class__.__name__) + def log_replacement(self, name: str, old_module: nn.Module, + new_module: nn.Module): + logger.debug("%s: %s -> %s", name, old_module, new_module) def tensor_parallelize(self, module: nn.Module, prefix: str = ""): - if self.tp_plan is None: + if (self.config.base_model_tp_plan is None + and self.vllm_config.parallel_config.tensor_parallel_size > 1): raise ValueError( "Trying to run tensor parallelization but the model does not " "support it yet!") for child_name, child_module in module.named_children(): qual_name = prefix + child_name - for pattern, style in self.tp_plan.items(): + for pattern, style in self.config.base_model_tp_plan.items(): if re.match(pattern, qual_name) and isinstance( child_module, nn.Linear): new_module = replace_tp_linear_class(child_module, style) From 3fe40d1c1ba15f05f1b0d06ad42d294ea8c72c54 Mon Sep 17 00:00:00 2001 From: Harry Mellor <19981378+hmellor@users.noreply.github.com> Date: Fri, 17 Jan 2025 19:51:20 +0100 Subject: [PATCH 31/86] Add some tests for TransformersModel Signed-off-by: Harry Mellor <19981378+hmellor@users.noreply.github.com> --- .../decoder_only/language/test_models.py | 4 - tests/models/test_transformers.py | 99 +++++++++++++++++++ 2 files changed, 99 insertions(+), 4 deletions(-) create mode 100644 tests/models/test_transformers.py diff --git a/tests/models/decoder_only/language/test_models.py b/tests/models/decoder_only/language/test_models.py index 3610f40e8edc3..4e110366a09f3 100644 --- a/tests/models/decoder_only/language/test_models.py +++ b/tests/models/decoder_only/language/test_models.py @@ -51,10 +51,6 @@ pytest.param( "ehristoforu/Falcon3-MoE-2x7B-Insruct", # mixtral marks=[pytest.mark.cpu_model], - ), - pytest.param( - "kyutai/helium-1-preview-2b", # transformers backend - marks=[pytest.mark.core_model], ) ]) @pytest.mark.parametrize("dtype", ["half"]) diff --git a/tests/models/test_transformers.py b/tests/models/test_transformers.py new file mode 100644 index 0000000000000..1795b62df7a65 --- /dev/null +++ b/tests/models/test_transformers.py @@ -0,0 +1,99 @@ +"""Test the functionality of the Transformers backend. + +Run `pytest tests/models/test_transformers.py`. +""" +from contextlib import nullcontext + +import pytest + +from vllm.model_executor.models import ModelRegistry +from .utils import check_logprobs_close + +# Delete Llama from registry so we can pretend vLLM doesn't support it +del ModelRegistry.models["LlamaForCausalLM"] + + +def check_implementation( + hf_runner, + vllm_runner, + example_prompts, + model, + **kwargs, + +): + max_tokens = 32 + num_logprobs = 5 + + with vllm_runner(model, **kwargs) as vllm_model: + vllm_outputs = vllm_model.generate_greedy_logprobs( + example_prompts, max_tokens, num_logprobs) + + with hf_runner(model) as hf_model: + hf_outputs = hf_model.generate_greedy_logprobs_limit( + example_prompts, max_tokens, num_logprobs) + + check_logprobs_close( + outputs_0_lst=hf_outputs, + outputs_1_lst=vllm_outputs, + name_0="hf", + name_1="vllm", + ) + + +@pytest.mark.parametrize( + "model,model_impl", + [ + ("openai-community/gpt2", "transformers"), + ("meta-llama/Llama-3.2-1B-Instruct", "auto") + ]) +def test_models( + hf_runner, + vllm_runner, + example_prompts, + model, + model_impl, +) -> None: + + maybe_raises = nullcontext() + if model == "openai-community/gpt2" and model_impl == "transformers": + maybe_raises = pytest.raises( + ValueError, + match="The Transformers implementation.*not compatible with vLLM") + + with maybe_raises: + check_implementation( + hf_runner, + vllm_runner, + example_prompts, + model, + model_impl=model_impl) + + +def test_distributed( + hf_runner, + vllm_runner, + example_prompts, +): + kwargs = {"model_impl": "transformers", "tensor_parallel_size": 2} + check_implementation( + hf_runner, + vllm_runner, + example_prompts, + "meta-llama/Llama-3.2-1B-Instruct", + **kwargs + ) + + +def test_quantized( + hf_runner, + vllm_runner, + example_prompts, +): + kwargs = {"model_impl": "transformers"} + check_implementation( + hf_runner, + vllm_runner, + example_prompts, + "unsloth/Llama-3.2-1B-Instruct-bnb-4bit", + **kwargs + ) From d37fd9b715fb10d92945513851ee4f5b7a5e4845 Mon Sep 17 00:00:00 2001 From: Arthur Zucker Date: Mon, 20 Jan 2025 10:50:24 +0100 Subject: [PATCH 32/86] remove replace norm, cleanup --- .../lora_with_quantization_inference.py | 7 ++++-- vllm/model_executor/models/transformers.py | 25 +------------------ 2 files changed, 6 insertions(+), 26 deletions(-) diff --git a/examples/offline_inference/lora_with_quantization_inference.py b/examples/offline_inference/lora_with_quantization_inference.py index 0c454ea50f665..00efef912f02a 100644 --- a/examples/offline_inference/lora_with_quantization_inference.py +++ b/examples/offline_inference/lora_with_quantization_inference.py @@ -14,6 +14,7 @@ from vllm import EngineArgs, LLMEngine, RequestOutput, SamplingParams from vllm.lora.request import LoRARequest +MODEL_IMPL = "transformers" def create_test_prompts( lora_path: str @@ -84,12 +85,14 @@ def initialize_engine(model: str, quantization: str, qlora_adapter_name_or_path=lora_repo, load_format="bitsandbytes", enable_lora=True, - max_lora_rank=64) + max_lora_rank=64, + model_impl=MODEL_IMPL) else: engine_args = EngineArgs(model=model, quantization=quantization, enable_lora=True, - max_loras=4) + max_loras=4, + model_impl=MODEL_IMPL) return LLMEngine.from_engine_args(engine_args) diff --git a/vllm/model_executor/models/transformers.py b/vllm/model_executor/models/transformers.py index ae9f8eaa334bf..433c36dcb4bd2 100644 --- a/vllm/model_executor/models/transformers.py +++ b/vllm/model_executor/models/transformers.py @@ -117,19 +117,7 @@ def replace_tp_linear_class(orig_module: nn.Linear, style: str): class TransformersModel(nn.Module, SupportsLoRA): embedding_padding_modules = ["lm_head"] - # LoRA specific attributes - supported_lora_modules = [ - "q_proj", - "k_proj", - "v_proj", - "o_proj", - "gate_proj", - "up_proj", - "down_proj", - ] - # BitandBytes specific attributes. No remapping needed - bitsandbytes_stacked_params_mapping = {} - + # TODO Add support for bnb and LORA def __init__(self, *, vllm_config: VllmConfig, prefix: str = "") -> None: super().__init__() logger.info("Using Transformers backend.") @@ -165,7 +153,6 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = "") -> None: # Model modifications self.replace_vocab_embed_class(self.model) - self.replace_rms_norm_class(self.model) # ForCausalLM modifications self.lm_head = ParallelLMHead(config.vocab_size, @@ -220,16 +207,6 @@ def replace_vocab_embed_class(self, module: nn.Module): self.log_replacement(vocab_embed_name, old_module, new_module) break - def replace_rms_norm_class(self, module: nn.Module, prefix: str = ""): - for child_name, child_module in module.named_children(): - qual_name = prefix + child_name - if "RMSNorm" in child_module.__class__.__name__: - rms_norm = RMSNorm( - self.config.hidden_size, eps=self.config.rms_norm_eps) - setattr(module, child_name, rms_norm) - self.log_replacement(qual_name, child_module, rms_norm) - self.replace_rms_norm_class(child_module, prefix=f"{qual_name}.") - def _autoset_attn_implementation( self, From 4cbea3229d2d4e41e79319e3cdb74d8b5febf518 Mon Sep 17 00:00:00 2001 From: Isotr0py <2037008807@qq.com> Date: Mon, 20 Jan 2025 18:32:17 +0800 Subject: [PATCH 33/86] linting and test mark Signed-off-by: Isotr0py <2037008807@qq.com> --- tests/models/test_transformers.py | 56 +++++++++++++------------------ 1 file changed, 23 insertions(+), 33 deletions(-) diff --git a/tests/models/test_transformers.py b/tests/models/test_transformers.py index 1795b62df7a65..a20df6b4478ac 100644 --- a/tests/models/test_transformers.py +++ b/tests/models/test_transformers.py @@ -3,23 +3,26 @@ Run `pytest tests/models/test_transformers.py`. """ from contextlib import nullcontext +from typing import Type import pytest from vllm.model_executor.models import ModelRegistry + +from ..conftest import HfRunner, VllmRunner from .utils import check_logprobs_close +from ..utils import multi_gpu_marks # Delete Llama from registry so we can pretend vLLM doesn't support it del ModelRegistry.models["LlamaForCausalLM"] def check_implementation( - hf_runner, - vllm_runner, - example_prompts, - model, - **kwargs, - + hf_runner: Type[HfRunner], + vllm_runner: Type[VllmRunner], + example_prompts: list[str], + model: str, + **kwargs, ): max_tokens = 32 num_logprobs = 5 @@ -40,12 +43,9 @@ def check_implementation( ) -@pytest.mark.parametrize( - "model,model_impl", - [ - ("openai-community/gpt2", "transformers"), - ("meta-llama/Llama-3.2-1B-Instruct", "auto") - ]) +@pytest.mark.parametrize("model,model_impl", + [("openai-community/gpt2", "transformers"), + ("meta-llama/Llama-3.2-1B-Instruct", "auto")]) def test_models( hf_runner, vllm_runner, @@ -61,27 +61,22 @@ def test_models( match="The Transformers implementation.*not compatible with vLLM") with maybe_raises: - check_implementation( - hf_runner, - vllm_runner, - example_prompts, - model, - model_impl=model_impl) - + check_implementation(hf_runner, + vllm_runner, + example_prompts, + model, + model_impl=model_impl) + +@multi_gpu_marks(num_gpus=2) def test_distributed( hf_runner, vllm_runner, example_prompts, ): kwargs = {"model_impl": "transformers", "tensor_parallel_size": 2} - check_implementation( - hf_runner, - vllm_runner, - example_prompts, - "meta-llama/Llama-3.2-1B-Instruct", - **kwargs - ) + check_implementation(hf_runner, vllm_runner, example_prompts, + "meta-llama/Llama-3.2-1B-Instruct", **kwargs) def test_quantized( @@ -90,10 +85,5 @@ def test_quantized( example_prompts, ): kwargs = {"model_impl": "transformers"} - check_implementation( - hf_runner, - vllm_runner, - example_prompts, - "unsloth/Llama-3.2-1B-Instruct-bnb-4bit", - **kwargs - ) + check_implementation(hf_runner, vllm_runner, example_prompts, + "unsloth/Llama-3.2-1B-Instruct-bnb-4bit", **kwargs) From 96f0a3a79e86e7bdbd1a4c87869ec6ac04669cd1 Mon Sep 17 00:00:00 2001 From: Isotr0py <2037008807@qq.com> Date: Mon, 20 Jan 2025 18:35:33 +0800 Subject: [PATCH 34/86] revert example modification Signed-off-by: Isotr0py <2037008807@qq.com> --- examples/offline_inference/distributed.py | 2 +- .../offline_inference/lora_with_quantization_inference.py | 7 ++----- 2 files changed, 3 insertions(+), 6 deletions(-) diff --git a/examples/offline_inference/distributed.py b/examples/offline_inference/distributed.py index 35f8e494c9815..677127844ccdd 100644 --- a/examples/offline_inference/distributed.py +++ b/examples/offline_inference/distributed.py @@ -32,7 +32,7 @@ class LLMPredictor: def __init__(self): # Create an LLM. - self.llm = LLM(model="kyutai/helium-1-preview-2b", + self.llm = LLM(model="meta-llama/Llama-2-7b-chat-hf", tensor_parallel_size=tensor_parallel_size) def __call__(self, batch: Dict[str, np.ndarray]) -> Dict[str, list]: diff --git a/examples/offline_inference/lora_with_quantization_inference.py b/examples/offline_inference/lora_with_quantization_inference.py index 00efef912f02a..0c454ea50f665 100644 --- a/examples/offline_inference/lora_with_quantization_inference.py +++ b/examples/offline_inference/lora_with_quantization_inference.py @@ -14,7 +14,6 @@ from vllm import EngineArgs, LLMEngine, RequestOutput, SamplingParams from vllm.lora.request import LoRARequest -MODEL_IMPL = "transformers" def create_test_prompts( lora_path: str @@ -85,14 +84,12 @@ def initialize_engine(model: str, quantization: str, qlora_adapter_name_or_path=lora_repo, load_format="bitsandbytes", enable_lora=True, - max_lora_rank=64, - model_impl=MODEL_IMPL) + max_lora_rank=64) else: engine_args = EngineArgs(model=model, quantization=quantization, enable_lora=True, - max_loras=4, - model_impl=MODEL_IMPL) + max_loras=4) return LLMEngine.from_engine_args(engine_args) From 91e60379dfd9e2de60ea1863ad5ef42db8638fbc Mon Sep 17 00:00:00 2001 From: Isotr0py <2037008807@qq.com> Date: Mon, 20 Jan 2025 20:05:55 +0800 Subject: [PATCH 35/86] fix wrong llm.model Signed-off-by: Isotr0py <2037008807@qq.com> --- docs/source/models/supported_models.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/source/models/supported_models.md b/docs/source/models/supported_models.md index 1c6e3490e047d..e50acd3d56cd4 100644 --- a/docs/source/models/supported_models.md +++ b/docs/source/models/supported_models.md @@ -48,7 +48,7 @@ To check if the backend is `transformers`, you can simply do this: ```python from vllm import LLM llm = LLM(model=..., task="generate") # Name or path of your model -print(llm.model.__class__) +print(llm.llm_engine.model_executor.driver_worker.model_runner.model.__class__) ``` If it is `TransformersModel` then it means it's based on `transformers`! From 554df590dead9b358ed06bc6e42babda32411fd5 Mon Sep 17 00:00:00 2001 From: Isotr0py <2037008807@qq.com> Date: Mon, 20 Jan 2025 20:44:35 +0800 Subject: [PATCH 36/86] use apply_model Signed-off-by: Isotr0py <2037008807@qq.com> --- docs/source/models/supported_models.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/source/models/supported_models.md b/docs/source/models/supported_models.md index b41b469ce5d1f..ba7480208f8c1 100644 --- a/docs/source/models/supported_models.md +++ b/docs/source/models/supported_models.md @@ -48,7 +48,7 @@ To check if the backend is `transformers`, you can simply do this: ```python from vllm import LLM llm = LLM(model=..., task="generate") # Name or path of your model -print(llm.llm_engine.model_executor.driver_worker.model_runner.model.__class__) +llm.apply_model(lambda model: print(model.__class__)) ``` If it is `TransformersModel` then it means it's based on `transformers`! From 319cf9763b1e3c828f8b476dfcd01c66ac95f099 Mon Sep 17 00:00:00 2001 From: Arthur <48595927+ArthurZucker@users.noreply.github.com> Date: Mon, 20 Jan 2025 14:23:23 +0100 Subject: [PATCH 37/86] Update docs/source/models/supported_models.md Co-authored-by: Cyrus Leung --- docs/source/models/supported_models.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/source/models/supported_models.md b/docs/source/models/supported_models.md index ba7480208f8c1..fb2bef0a10709 100644 --- a/docs/source/models/supported_models.md +++ b/docs/source/models/supported_models.md @@ -41,7 +41,7 @@ Otherwise, please refer to [Adding a New Model](#new-model) for instructions on Alternatively, you can [open an issue on GitHub](https://github.com/vllm-project/vllm/issues/new/choose) to request vLLM support. ### Transformers fallback -After the merge of #11330, `vllm` can fallback to models that are avaialble in `transformers`. This does not work for all models for now, but most decoder language models are supported, and vision language model support is planned! +After the merge of , `vllm` can fallback to models that are avaialble in `transformers`. This does not work for all models for now, but most decoder language models are supported, and vision language model support is planned! To check if the backend is `transformers`, you can simply do this: From d346637b6dd8eaa4d715a01e869b1beb403699d8 Mon Sep 17 00:00:00 2001 From: Arthur <48595927+ArthurZucker@users.noreply.github.com> Date: Thu, 23 Jan 2025 11:27:31 +0100 Subject: [PATCH 38/86] Update docs/source/models/supported_models.md Co-authored-by: Cyrus Leung --- docs/source/models/supported_models.md | 1 + 1 file changed, 1 insertion(+) diff --git a/docs/source/models/supported_models.md b/docs/source/models/supported_models.md index fb2bef0a10709..01dcac1a1a8c1 100644 --- a/docs/source/models/supported_models.md +++ b/docs/source/models/supported_models.md @@ -41,6 +41,7 @@ Otherwise, please refer to [Adding a New Model](#new-model) for instructions on Alternatively, you can [open an issue on GitHub](https://github.com/vllm-project/vllm/issues/new/choose) to request vLLM support. ### Transformers fallback + After the merge of , `vllm` can fallback to models that are avaialble in `transformers`. This does not work for all models for now, but most decoder language models are supported, and vision language model support is planned! To check if the backend is `transformers`, you can simply do this: From 0f15f097e209d099fa18a1c81e24c86b2a1b71d5 Mon Sep 17 00:00:00 2001 From: Arthur Zucker Date: Fri, 24 Jan 2025 15:56:05 +0100 Subject: [PATCH 39/86] move the check to normalized arch --- vllm/model_executor/model_loader/utils.py | 7 ++++++- vllm/model_executor/models/registry.py | 8 +++++++- 2 files changed, 13 insertions(+), 2 deletions(-) diff --git a/vllm/model_executor/model_loader/utils.py b/vllm/model_executor/model_loader/utils.py index e5f2188bb8bc7..bac5062c7d804 100644 --- a/vllm/model_executor/model_loader/utils.py +++ b/vllm/model_executor/model_loader/utils.py @@ -26,7 +26,12 @@ def set_default_torch_dtype(dtype: torch.dtype): def is_transformers_impl_compatible(arch: str) -> bool: - return getattr(transformers, arch)._supports_flex_attn + arch = getattr(transformers, arch) + if hasattr(arch, "supports_backend"): + return arch.is_backend_compatible() + else: + return arch._supports_flex_attn + def get_model_architecture( diff --git a/vllm/model_executor/models/registry.py b/vllm/model_executor/models/registry.py index 45b3fb091bc4a..a7b057c23df73 100644 --- a/vllm/model_executor/models/registry.py +++ b/vllm/model_executor/models/registry.py @@ -380,7 +380,13 @@ def _normalize_archs( if not architectures: logger.warning("No model architectures are specified") - return architectures + normalized_arch = [] + for model in architectures: + model = self.models.get(model) + if model is None: + model = self.models[next(iter(_FALLBACK_MODEL))] + normalized_arch.append(model) + return normalized_arch def inspect_model_cls( self, From 2a4fc4f10b5a9d7549c330a30124cef1f4cbb44d Mon Sep 17 00:00:00 2001 From: Arthur Zucker Date: Fri, 24 Jan 2025 16:26:13 +0100 Subject: [PATCH 40/86] fix --- vllm/config.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/vllm/config.py b/vllm/config.py index bb7950519f101..c42df97a50d21 100644 --- a/vllm/config.py +++ b/vllm/config.py @@ -243,8 +243,8 @@ def __init__( logits_processor_pattern: Optional[str] = None, generation_config: Optional[str] = None, enable_sleep_mode: bool = False, - model_impl: Union[str, ModelImpl] = ModelImpl.AUTO) -> None: - ) -> None: + model_impl: Union[str, ModelImpl] = ModelImpl.AUTO + ) -> None: self.model = model self.tokenizer = tokenizer self.tokenizer_mode = tokenizer_mode From ceabb519bf9546b977783a5c54a08e4bb35d0d14 Mon Sep 17 00:00:00 2001 From: Arthur Zucker Date: Tue, 28 Jan 2025 07:54:35 +0100 Subject: [PATCH 41/86] revert try inspect changes --- vllm/model_executor/models/registry.py | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/vllm/model_executor/models/registry.py b/vllm/model_executor/models/registry.py index 94bfb71c2f51c..7cece4c308c8b 100644 --- a/vllm/model_executor/models/registry.py +++ b/vllm/model_executor/models/registry.py @@ -367,12 +367,10 @@ def _try_load_model_cls(self, return _try_load_model_cls(model_arch, self.models[model_arch]) def _try_inspect_model_cls(self, model_arch: str) -> Optional[_ModelInfo]: - # A model will always be inspected even if it cannot be loaded - model = self.models.get(model_arch) - if model is None: - model = self.models[next(iter(_FALLBACK_MODEL))] + if model_arch not in self.models: + return None - return _try_inspect_model_cls(model_arch, model) + return _try_inspect_model_cls(model_arch, self.models[model_arch]) def _normalize_archs( self, From 50b218a75541a4b34150ce2976d68823337e0597 Mon Sep 17 00:00:00 2001 From: Arthur Zucker Date: Tue, 28 Jan 2025 09:22:34 +0100 Subject: [PATCH 42/86] Update test --- tests/models/test_transformers.py | 39 ++++++++++++++++++++++--------- 1 file changed, 28 insertions(+), 11 deletions(-) diff --git a/tests/models/test_transformers.py b/tests/models/test_transformers.py index a20df6b4478ac..5e4acdc6828a1 100644 --- a/tests/models/test_transformers.py +++ b/tests/models/test_transformers.py @@ -6,16 +6,32 @@ from typing import Type import pytest +from transformers import AutoConfig, AutoModel, LlamaConfig, LlamaModel from vllm.model_executor.models import ModelRegistry from ..conftest import HfRunner, VllmRunner -from .utils import check_logprobs_close from ..utils import multi_gpu_marks +from .utils import check_logprobs_close # Delete Llama from registry so we can pretend vLLM doesn't support it del ModelRegistry.models["LlamaForCausalLM"] +# Code used to generate the ilama model: +# class IlamaConfig(LlamaConfig): +# model_type = "iiama" + +# class IlamaModel(LlamaModel): +# config_class = IlamaConfig + +# AutoConfig.register("iiama", IlamaConfig) +# AutoModel.register(IlamaConfig, IlamaModel) + +# base_model = LlamaModel.from_pretrained("meta-llama/Llama-3.2-1B", torch_dtype="auto") +# remote_model = IlamaModel._from_config(base_model.config) +# remote_model.load_state_dict(base_model.state_dict()) +# remote_model.push_to_hub("ArthurZ/Ilama-3.2-1B") + def check_implementation( hf_runner: Type[HfRunner], @@ -31,7 +47,7 @@ def check_implementation( vllm_outputs = vllm_model.generate_greedy_logprobs( example_prompts, max_tokens, num_logprobs) - with hf_runner(model) as hf_model: + with hf_runner(model, **kwargs) as hf_model: hf_outputs = hf_model.generate_greedy_logprobs_limit( example_prompts, max_tokens, num_logprobs) @@ -45,14 +61,14 @@ def check_implementation( @pytest.mark.parametrize("model,model_impl", [("openai-community/gpt2", "transformers"), - ("meta-llama/Llama-3.2-1B-Instruct", "auto")]) -def test_models( - hf_runner, - vllm_runner, - example_prompts, - model, - model_impl, -) -> None: + ("meta-llama/Llama-3.2-1B-Instruct", "auto"), + ("ArthurZ/Ilama-3.2-1B", "auto", True)]) +def test_models(hf_runner, + vllm_runner, + example_prompts, + model, + model_impl, + trust_remote_code=None) -> None: maybe_raises = nullcontext() if model == "openai-community/gpt2" and model_impl == "transformers": @@ -65,7 +81,8 @@ def test_models( vllm_runner, example_prompts, model, - model_impl=model_impl) + model_impl=model_impl, + trust_remote_code=trust_remote_code) @multi_gpu_marks(num_gpus=2) From c8aac878e3928cd1815261e3a7e1d1e026b182ec Mon Sep 17 00:00:00 2001 From: Arthur Zucker Date: Tue, 28 Jan 2025 09:23:15 +0100 Subject: [PATCH 43/86] style --- vllm/config.py | 74 +++++++++++------------ vllm/model_executor/model_loader/utils.py | 1 - vllm/model_executor/models/registry.py | 2 +- 3 files changed, 37 insertions(+), 40 deletions(-) diff --git a/vllm/config.py b/vllm/config.py index c42df97a50d21..40ec4377a3fee 100644 --- a/vllm/config.py +++ b/vllm/config.py @@ -207,44 +207,42 @@ def compute_hash(self) -> str: factors.append(self.rope_theta) return hashlib.sha256(str(factors).encode()).hexdigest() - def __init__( - self, - model: str, - task: Union[TaskOption, Literal["draft"]], - tokenizer: str, - tokenizer_mode: str, - trust_remote_code: bool, - dtype: Union[str, torch.dtype], - seed: int, - allowed_local_media_path: str = "", - revision: Optional[str] = None, - code_revision: Optional[str] = None, - rope_scaling: Optional[Dict[str, Any]] = None, - rope_theta: Optional[float] = None, - tokenizer_revision: Optional[str] = None, - max_model_len: Optional[int] = None, - spec_target_max_model_len: Optional[int] = None, - quantization: Optional[str] = None, - quantization_param_path: Optional[str] = None, - enforce_eager: Optional[bool] = None, - max_seq_len_to_capture: Optional[int] = None, - max_logprobs: int = 20, - disable_sliding_window: bool = False, - skip_tokenizer_init: bool = False, - served_model_name: Optional[Union[str, List[str]]] = None, - limit_mm_per_prompt: Optional[Mapping[str, int]] = None, - use_async_output_proc: bool = True, - config_format: ConfigFormat = ConfigFormat.AUTO, - hf_overrides: Optional[HfOverrides] = None, - mm_processor_kwargs: Optional[Dict[str, Any]] = None, - disable_mm_preprocessor_cache: bool = False, - override_neuron_config: Optional[Dict[str, Any]] = None, - override_pooler_config: Optional["PoolerConfig"] = None, - logits_processor_pattern: Optional[str] = None, - generation_config: Optional[str] = None, - enable_sleep_mode: bool = False, - model_impl: Union[str, ModelImpl] = ModelImpl.AUTO - ) -> None: + def __init__(self, + model: str, + task: Union[TaskOption, Literal["draft"]], + tokenizer: str, + tokenizer_mode: str, + trust_remote_code: bool, + dtype: Union[str, torch.dtype], + seed: int, + allowed_local_media_path: str = "", + revision: Optional[str] = None, + code_revision: Optional[str] = None, + rope_scaling: Optional[Dict[str, Any]] = None, + rope_theta: Optional[float] = None, + tokenizer_revision: Optional[str] = None, + max_model_len: Optional[int] = None, + spec_target_max_model_len: Optional[int] = None, + quantization: Optional[str] = None, + quantization_param_path: Optional[str] = None, + enforce_eager: Optional[bool] = None, + max_seq_len_to_capture: Optional[int] = None, + max_logprobs: int = 20, + disable_sliding_window: bool = False, + skip_tokenizer_init: bool = False, + served_model_name: Optional[Union[str, List[str]]] = None, + limit_mm_per_prompt: Optional[Mapping[str, int]] = None, + use_async_output_proc: bool = True, + config_format: ConfigFormat = ConfigFormat.AUTO, + hf_overrides: Optional[HfOverrides] = None, + mm_processor_kwargs: Optional[Dict[str, Any]] = None, + disable_mm_preprocessor_cache: bool = False, + override_neuron_config: Optional[Dict[str, Any]] = None, + override_pooler_config: Optional["PoolerConfig"] = None, + logits_processor_pattern: Optional[str] = None, + generation_config: Optional[str] = None, + enable_sleep_mode: bool = False, + model_impl: Union[str, ModelImpl] = ModelImpl.AUTO) -> None: self.model = model self.tokenizer = tokenizer self.tokenizer_mode = tokenizer_mode diff --git a/vllm/model_executor/model_loader/utils.py b/vllm/model_executor/model_loader/utils.py index ac6c4c180b06f..b8b7c0eb852b3 100644 --- a/vllm/model_executor/model_loader/utils.py +++ b/vllm/model_executor/model_loader/utils.py @@ -34,7 +34,6 @@ def is_transformers_impl_compatible(arch: str) -> bool: return arch._supports_flex_attn - def get_model_architecture( model_config: ModelConfig) -> Tuple[Type[nn.Module], str]: architectures = getattr(model_config.hf_config, "architectures", []) diff --git a/vllm/model_executor/models/registry.py b/vllm/model_executor/models/registry.py index 7cece4c308c8b..da871a35ffd89 100644 --- a/vllm/model_executor/models/registry.py +++ b/vllm/model_executor/models/registry.py @@ -387,7 +387,7 @@ def _normalize_archs( if model is None: model = self.models[next(iter(_FALLBACK_MODEL))] normalized_arch.append(model) - return normalized_arch + return normalized_arch def inspect_model_cls( self, From 1896af7b6b0f7a79744319aa62fb321b12886cf8 Mon Sep 17 00:00:00 2001 From: Arthur Zucker Date: Tue, 28 Jan 2025 10:05:01 +0100 Subject: [PATCH 44/86] style update --- tests/models/test_transformers.py | 3 ++- vllm/model_executor/model_loader/utils.py | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/tests/models/test_transformers.py b/tests/models/test_transformers.py index 5e4acdc6828a1..165fbac597b03 100644 --- a/tests/models/test_transformers.py +++ b/tests/models/test_transformers.py @@ -6,7 +6,6 @@ from typing import Type import pytest -from transformers import AutoConfig, AutoModel, LlamaConfig, LlamaModel from vllm.model_executor.models import ModelRegistry @@ -18,6 +17,8 @@ del ModelRegistry.models["LlamaForCausalLM"] # Code used to generate the ilama model: +# from transformers import AutoConfig, AutoModel, LlamaConfig, LlamaModel +# # class IlamaConfig(LlamaConfig): # model_type = "iiama" diff --git a/vllm/model_executor/model_loader/utils.py b/vllm/model_executor/model_loader/utils.py index b8b7c0eb852b3..5df780bcbeee0 100644 --- a/vllm/model_executor/model_loader/utils.py +++ b/vllm/model_executor/model_loader/utils.py @@ -27,7 +27,7 @@ def set_default_torch_dtype(dtype: torch.dtype): def is_transformers_impl_compatible(arch: str) -> bool: - arch = getattr(transformers, arch) + arch: transformers.PreTrainedModel = getattr(transformers, arch) if hasattr(arch, "supports_backend"): return arch.is_backend_compatible() else: From 869934a289509cc01e039fd3de014a816d8bd491 Mon Sep 17 00:00:00 2001 From: Arthur Zucker Date: Tue, 28 Jan 2025 10:23:53 +0100 Subject: [PATCH 45/86] fix normalize arch --- vllm/model_executor/models/registry.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/vllm/model_executor/models/registry.py b/vllm/model_executor/models/registry.py index 17987aad0669c..f29d055574011 100644 --- a/vllm/model_executor/models/registry.py +++ b/vllm/model_executor/models/registry.py @@ -383,9 +383,8 @@ def _normalize_archs( normalized_arch = [] for model in architectures: - model = self.models.get(model) - if model is None: - model = self.models[next(iter(_FALLBACK_MODEL))] + if not hasattr(self.models, model): + model = "TransformersModel" normalized_arch.append(model) return normalized_arch From df1c8b29c830964ef344cc0b979a406bdf1cc7e2 Mon Sep 17 00:00:00 2001 From: Arthur Zucker Date: Tue, 28 Jan 2025 11:16:38 +0100 Subject: [PATCH 46/86] update test, fix gpu marker and remove trust remote as it's True by default --- tests/models/test_transformers.py | 24 ++++++++++-------------- 1 file changed, 10 insertions(+), 14 deletions(-) diff --git a/tests/models/test_transformers.py b/tests/models/test_transformers.py index 165fbac597b03..86b07c2b97ecf 100644 --- a/tests/models/test_transformers.py +++ b/tests/models/test_transformers.py @@ -10,7 +10,7 @@ from vllm.model_executor.models import ModelRegistry from ..conftest import HfRunner, VllmRunner -from ..utils import multi_gpu_marks +from ..utils import multi_gpu_test from .utils import check_logprobs_close # Delete Llama from registry so we can pretend vLLM doesn't support it @@ -60,16 +60,13 @@ def check_implementation( ) -@pytest.mark.parametrize("model,model_impl", - [("openai-community/gpt2", "transformers"), - ("meta-llama/Llama-3.2-1B-Instruct", "auto"), - ("ArthurZ/Ilama-3.2-1B", "auto", True)]) -def test_models(hf_runner, - vllm_runner, - example_prompts, - model, - model_impl, - trust_remote_code=None) -> None: +@pytest.mark.parametrize( + "model,model_impl", + [("openai-community/gpt2", "transformers"), + ("meta-llama/Llama-3.2-1B-Instruct", "auto"), + ("ArthurZ/Ilama-3.2-1B", "auto")]) # trust_remote_code=True by default +def test_models(hf_runner, vllm_runner, example_prompts, model, + model_impl) -> None: maybe_raises = nullcontext() if model == "openai-community/gpt2" and model_impl == "transformers": @@ -82,11 +79,10 @@ def test_models(hf_runner, vllm_runner, example_prompts, model, - model_impl=model_impl, - trust_remote_code=trust_remote_code) + model_impl=model_impl) -@multi_gpu_marks(num_gpus=2) +@multi_gpu_test(num_gpus=2) def test_distributed( hf_runner, vllm_runner, From ffd6dceef71b18592d4293591c3255825b14d3c7 Mon Sep 17 00:00:00 2001 From: Arthur Zucker Date: Tue, 28 Jan 2025 11:29:18 +0100 Subject: [PATCH 47/86] update test --- tests/models/test_transformers.py | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/tests/models/test_transformers.py b/tests/models/test_transformers.py index 86b07c2b97ecf..842ce5d195b8b 100644 --- a/tests/models/test_transformers.py +++ b/tests/models/test_transformers.py @@ -48,7 +48,7 @@ def check_implementation( vllm_outputs = vllm_model.generate_greedy_logprobs( example_prompts, max_tokens, num_logprobs) - with hf_runner(model, **kwargs) as hf_model: + with hf_runner(model, trust_remote_code=True) as hf_model: hf_outputs = hf_model.generate_greedy_logprobs_limit( example_prompts, max_tokens, num_logprobs) @@ -62,9 +62,12 @@ def check_implementation( @pytest.mark.parametrize( "model,model_impl", - [("openai-community/gpt2", "transformers"), - ("meta-llama/Llama-3.2-1B-Instruct", "auto"), - ("ArthurZ/Ilama-3.2-1B", "auto")]) # trust_remote_code=True by default + [ + ("openai-community/gpt2", "transformers"), + ("ArthurZ/Ilama-3.2-1B", "auto"), + ("meta-llama/Llama-3.2-1B-Instruct", "auto"), + ] + ) # trust_remote_code=True by default def test_models(hf_runner, vllm_runner, example_prompts, model, model_impl) -> None: From 970428779176407b365ef45bc5cfa1b214d7042f Mon Sep 17 00:00:00 2001 From: Arthur Zucker Date: Tue, 28 Jan 2025 14:34:50 +0100 Subject: [PATCH 48/86] for now use `model_config.hf_config.auto_map["AutoModel"]` --- tests/models/test_transformers.py | 28 ++++------------------- vllm/model_executor/model_loader/utils.py | 22 +++++++++++------- 2 files changed, 19 insertions(+), 31 deletions(-) diff --git a/tests/models/test_transformers.py b/tests/models/test_transformers.py index 842ce5d195b8b..ca66178cb3028 100644 --- a/tests/models/test_transformers.py +++ b/tests/models/test_transformers.py @@ -16,23 +16,6 @@ # Delete Llama from registry so we can pretend vLLM doesn't support it del ModelRegistry.models["LlamaForCausalLM"] -# Code used to generate the ilama model: -# from transformers import AutoConfig, AutoModel, LlamaConfig, LlamaModel -# -# class IlamaConfig(LlamaConfig): -# model_type = "iiama" - -# class IlamaModel(LlamaModel): -# config_class = IlamaConfig - -# AutoConfig.register("iiama", IlamaConfig) -# AutoModel.register(IlamaConfig, IlamaModel) - -# base_model = LlamaModel.from_pretrained("meta-llama/Llama-3.2-1B", torch_dtype="auto") -# remote_model = IlamaModel._from_config(base_model.config) -# remote_model.load_state_dict(base_model.state_dict()) -# remote_model.push_to_hub("ArthurZ/Ilama-3.2-1B") - def check_implementation( hf_runner: Type[HfRunner], @@ -62,12 +45,11 @@ def check_implementation( @pytest.mark.parametrize( "model,model_impl", - [ - ("openai-community/gpt2", "transformers"), - ("ArthurZ/Ilama-3.2-1B", "auto"), - ("meta-llama/Llama-3.2-1B-Instruct", "auto"), - ] - ) # trust_remote_code=True by default + [ + ("openai-community/gpt2", "transformers"), + ("ArthurZ/Ilama-3.2-1B", "auto"), # CUSTOM CODE + ("meta-llama/Llama-3.2-1B-Instruct", "auto"), + ]) # trust_remote_code=True by default def test_models(hf_runner, vllm_runner, example_prompts, model, model_impl) -> None: diff --git a/vllm/model_executor/model_loader/utils.py b/vllm/model_executor/model_loader/utils.py index 5df780bcbeee0..927fdfb7bb628 100644 --- a/vllm/model_executor/model_loader/utils.py +++ b/vllm/model_executor/model_loader/utils.py @@ -26,12 +26,13 @@ def set_default_torch_dtype(dtype: torch.dtype): torch.set_default_dtype(old_dtype) -def is_transformers_impl_compatible(arch: str) -> bool: - arch: transformers.PreTrainedModel = getattr(transformers, arch) - if hasattr(arch, "supports_backend"): - return arch.is_backend_compatible() +def is_transformers_impl_compatible(arch: str, module=None) -> bool: + if module is None: + module: transformers.PreTrainedModel = getattr(transformers, arch) + if hasattr(module, "supports_backend"): + return module.is_backend_compatible() else: - return arch._supports_flex_attn + return module._supports_flex_attn def get_model_architecture( @@ -53,15 +54,20 @@ def get_model_architecture( for i, arch in enumerate(architectures): if arch == "TransformersModel": continue + custom_module = None + if hasattr(model_config.hf_config, "auto_map"): + custom_module = vllm_supported_archs[arch] + custom_module = transformers.dynamic_module_utils.get_class_in_module( + arch, model_config.hf_config.auto_map["AutoModel"]) if model_config.model_impl == ModelImpl.TRANSFORMERS: - if not is_transformers_impl_compatible(arch): + if not is_transformers_impl_compatible(arch, custom_module): raise ValueError( "The Transformers implementation of %s is not compatible " "with vLLM.", arch) architectures[i] = "TransformersModel" if (model_config.model_impl == ModelImpl.AUTO - and arch not in vllm_supported_archs): - if not is_transformers_impl_compatible(arch): + and arch not in vllm_supported_archs): + if not is_transformers_impl_compatible(arch, custom_module): raise ValueError( "%s has no vLLM implementation and the Transformers " "implementationis not compatible with vLLM.", arch) From 9a871af6c037fcb9fa7050999ee9682cae09a70a Mon Sep 17 00:00:00 2001 From: Arthur Zucker Date: Tue, 28 Jan 2025 15:11:51 +0100 Subject: [PATCH 49/86] fix remote models --- tests/models/test_transformers.py | 2 +- vllm/model_executor/model_loader/utils.py | 8 ++++---- vllm/model_executor/models/transformers.py | 4 +++- 3 files changed, 8 insertions(+), 6 deletions(-) diff --git a/tests/models/test_transformers.py b/tests/models/test_transformers.py index ca66178cb3028..7351f1fb85fa3 100644 --- a/tests/models/test_transformers.py +++ b/tests/models/test_transformers.py @@ -46,8 +46,8 @@ def check_implementation( @pytest.mark.parametrize( "model,model_impl", [ - ("openai-community/gpt2", "transformers"), ("ArthurZ/Ilama-3.2-1B", "auto"), # CUSTOM CODE + ("openai-community/gpt2", "transformers"), ("meta-llama/Llama-3.2-1B-Instruct", "auto"), ]) # trust_remote_code=True by default def test_models(hf_runner, vllm_runner, example_prompts, model, diff --git a/vllm/model_executor/model_loader/utils.py b/vllm/model_executor/model_loader/utils.py index 927fdfb7bb628..de612f143079a 100644 --- a/vllm/model_executor/model_loader/utils.py +++ b/vllm/model_executor/model_loader/utils.py @@ -56,9 +56,9 @@ def get_model_architecture( continue custom_module = None if hasattr(model_config.hf_config, "auto_map"): - custom_module = vllm_supported_archs[arch] - custom_module = transformers.dynamic_module_utils.get_class_in_module( - arch, model_config.hf_config.auto_map["AutoModel"]) + custom_module = transformers.dynamic_module_utils.get_class_from_dynamic_module( + model_config.hf_config.auto_map["AutoModel"], + model_config.model) if model_config.model_impl == ModelImpl.TRANSFORMERS: if not is_transformers_impl_compatible(arch, custom_module): raise ValueError( @@ -66,7 +66,7 @@ def get_model_architecture( "with vLLM.", arch) architectures[i] = "TransformersModel" if (model_config.model_impl == ModelImpl.AUTO - and arch not in vllm_supported_archs): + and arch not in vllm_supported_archs): if not is_transformers_impl_compatible(arch, custom_module): raise ValueError( "%s has no vLLM implementation and the Transformers " diff --git a/vllm/model_executor/models/transformers.py b/vllm/model_executor/models/transformers.py index 2ae6c10d15ab3..a37d1ea7c6f02 100644 --- a/vllm/model_executor/models/transformers.py +++ b/vllm/model_executor/models/transformers.py @@ -132,7 +132,9 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = "") -> None: self.unpadded_vocab_size = config.vocab_size self.model: PreTrainedModel = AutoModel.from_config( - self.config, torch_dtype=vllm_config.model_config.dtype) + self.config, + torch_dtype=vllm_config.model_config.dtype, + trust_remote_code=vllm_config.model_config.trust_remote_code) # MLP modifications self.tensor_parallelize(self.model) From 4f33ff8158e7d024d549a1f6f5a33c57a3dbbba2 Mon Sep 17 00:00:00 2001 From: Arthur Zucker Date: Tue, 28 Jan 2025 15:15:58 +0100 Subject: [PATCH 50/86] nits --- vllm/model_executor/models/transformers.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/vllm/model_executor/models/transformers.py b/vllm/model_executor/models/transformers.py index a37d1ea7c6f02..891d90befb8b0 100644 --- a/vllm/model_executor/models/transformers.py +++ b/vllm/model_executor/models/transformers.py @@ -157,8 +157,6 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = "") -> None: # Model modifications self.replace_vocab_embed_class(self.model) - # TODO: solve issue with residuals being added before/in RMSNorm ops - # self.replace_rms_norm_class(self.model) # ForCausalLM modifications self.lm_head = ParallelLMHead(config.vocab_size, From fc6a7e964d19567647951266c36fe440979bfb84 Mon Sep 17 00:00:00 2001 From: Arthur Zucker Date: Tue, 28 Jan 2025 15:19:19 +0100 Subject: [PATCH 51/86] remove unused kwarg class --- vllm/model_executor/models/transformers.py | 11 ----------- 1 file changed, 11 deletions(-) diff --git a/vllm/model_executor/models/transformers.py b/vllm/model_executor/models/transformers.py index 891d90befb8b0..6f17a74d34883 100644 --- a/vllm/model_executor/models/transformers.py +++ b/vllm/model_executor/models/transformers.py @@ -42,17 +42,6 @@ logger = init_logger(__name__) -class VllmKwargsForCausalLM(TypedDict, total=False): - """ - Keyword arguments for Flash Attention with Compile. - Attributes: - kv_cache - maxattn_metadata_length - """ - kv_cache: torch.Tensor - attn_metadata: AttentionMetadata - - def vllm_flash_attention_forward(_module, query: torch.Tensor, key: torch.Tensor, From 0ab2f82dc210af5778ada7fd77ff2195bae2dd85 Mon Sep 17 00:00:00 2001 From: Arthur Zucker Date: Tue, 28 Jan 2025 15:35:47 +0100 Subject: [PATCH 52/86] fix weight loading --- vllm/model_executor/models/transformers.py | 16 +++++----------- 1 file changed, 5 insertions(+), 11 deletions(-) diff --git a/vllm/model_executor/models/transformers.py b/vllm/model_executor/models/transformers.py index 6f17a74d34883..6c3d81d9ebf9a 100644 --- a/vllm/model_executor/models/transformers.py +++ b/vllm/model_executor/models/transformers.py @@ -123,7 +123,9 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = "") -> None: self.model: PreTrainedModel = AutoModel.from_config( self.config, torch_dtype=vllm_config.model_config.dtype, - trust_remote_code=vllm_config.model_config.trust_remote_code) + trust_remote_code=vllm_config.model_config.trust_remote_code, + ) + prefix = self.model.base_model_prefix # MLP modifications self.tensor_parallelize(self.model) @@ -199,16 +201,6 @@ def replace_vocab_embed_class(self, module: nn.Module): self.log_replacement(vocab_embed_name, old_module, new_module) break - def replace_rms_norm_class(self, module: nn.Module, prefix: str = ""): - for child_name, child_module in module.named_children(): - qual_name = prefix + child_name - if "RMSNorm" in child_module.__class__.__name__: - rms_norm = RMSNorm(self.config.hidden_size, - eps=self.config.rms_norm_eps) - setattr(module, child_name, rms_norm) - self.log_replacement(qual_name, child_module, rms_norm) - self.replace_rms_norm_class(child_module, prefix=f"{qual_name}.") - def _autoset_attn_implementation( self, config, @@ -260,6 +252,8 @@ def load_weights(self, weights: Iterable[Tuple[str, params_dict = dict(self.named_parameters()) loaded_params: Set[str] = set() for name, loaded_weight in weights: + if name not in params_dict: + name = f"{self.model.base_model_prefix}.{name}" param = params_dict[name] weight_loader = getattr(param, "weight_loader", default_weight_loader) From 44f78efaff33b289a871ef2f99249b02eb23f1f6 Mon Sep 17 00:00:00 2001 From: Arthur Zucker Date: Tue, 28 Jan 2025 15:41:03 +0100 Subject: [PATCH 53/86] fix test --- tests/models/test_transformers.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/models/test_transformers.py b/tests/models/test_transformers.py index 7351f1fb85fa3..9a98dfa14c683 100644 --- a/tests/models/test_transformers.py +++ b/tests/models/test_transformers.py @@ -31,7 +31,7 @@ def check_implementation( vllm_outputs = vllm_model.generate_greedy_logprobs( example_prompts, max_tokens, num_logprobs) - with hf_runner(model, trust_remote_code=True) as hf_model: + with hf_runner(model) as hf_model: hf_outputs = hf_model.generate_greedy_logprobs_limit( example_prompts, max_tokens, num_logprobs) From 48478369edde4b082b7780d6f814794a0630388c Mon Sep 17 00:00:00 2001 From: Arthur Zucker Date: Tue, 28 Jan 2025 15:56:40 +0100 Subject: [PATCH 54/86] update test! --- tests/models/test_transformers.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/tests/models/test_transformers.py b/tests/models/test_transformers.py index 9a98dfa14c683..7868a46100ae5 100644 --- a/tests/models/test_transformers.py +++ b/tests/models/test_transformers.py @@ -13,9 +13,6 @@ from ..utils import multi_gpu_test from .utils import check_logprobs_close -# Delete Llama from registry so we can pretend vLLM doesn't support it -del ModelRegistry.models["LlamaForCausalLM"] - def check_implementation( hf_runner: Type[HfRunner], @@ -46,9 +43,10 @@ def check_implementation( @pytest.mark.parametrize( "model,model_impl", [ - ("ArthurZ/Ilama-3.2-1B", "auto"), # CUSTOM CODE ("openai-community/gpt2", "transformers"), ("meta-llama/Llama-3.2-1B-Instruct", "auto"), + ("meta-llama/Llama-3.2-1B-Instruct", "transformers"), + ("ArthurZ/Ilama-3.2-1B", "auto"), # CUSTOM CODE ]) # trust_remote_code=True by default def test_models(hf_runner, vllm_runner, example_prompts, model, model_impl) -> None: From 0b348e4df1209f9e950cf7153f264e77caa35c59 Mon Sep 17 00:00:00 2001 From: Arthur Zucker Date: Tue, 28 Jan 2025 16:28:43 +0100 Subject: [PATCH 55/86] Nits --- tests/models/test_transformers.py | 5 +++-- vllm/model_executor/models/transformers.py | 26 +++++++++------------- 2 files changed, 14 insertions(+), 17 deletions(-) diff --git a/tests/models/test_transformers.py b/tests/models/test_transformers.py index 7868a46100ae5..895cfb74d5ba5 100644 --- a/tests/models/test_transformers.py +++ b/tests/models/test_transformers.py @@ -43,16 +43,17 @@ def check_implementation( @pytest.mark.parametrize( "model,model_impl", [ - ("openai-community/gpt2", "transformers"), - ("meta-llama/Llama-3.2-1B-Instruct", "auto"), ("meta-llama/Llama-3.2-1B-Instruct", "transformers"), + ("openai-community/gpt2", "transformers"),x ("ArthurZ/Ilama-3.2-1B", "auto"), # CUSTOM CODE + ("meta-llama/Llama-3.2-1B-Instruct", "auto"), ]) # trust_remote_code=True by default def test_models(hf_runner, vllm_runner, example_prompts, model, model_impl) -> None: maybe_raises = nullcontext() if model == "openai-community/gpt2" and model_impl == "transformers": + # Model is not backend compatible maybe_raises = pytest.raises( ValueError, match="The Transformers implementation.*not compatible with vLLM") diff --git a/vllm/model_executor/models/transformers.py b/vllm/model_executor/models/transformers.py index 6c3d81d9ebf9a..a13de051f39a9 100644 --- a/vllm/model_executor/models/transformers.py +++ b/vllm/model_executor/models/transformers.py @@ -56,6 +56,7 @@ def vllm_flash_attention_forward(_module, hidden = query.shape[-2] query, key, value = (x.transpose(1, 2) for x in (query, key, value)) query, key, value = (x.reshape(hidden, -1) for x in (query, key, value)) + print("THE LAYER:", layer_idx) return attention_instances[layer_idx].forward( query, key, @@ -185,21 +186,16 @@ def tensor_parallelize(self, module: nn.Module, prefix: str = ""): self.tensor_parallelize(child_module, prefix=f"{qual_name}.") def replace_vocab_embed_class(self, module: nn.Module): - # Sorted by most frequently use (most frequent first) - vocab_embed_names = ("embed_tokens", "word_embeddings", "wte", - "embed_in") - for vocab_embed_name in vocab_embed_names: - if hasattr(module, vocab_embed_name): - old_module = getattr(module, vocab_embed_name) - new_module = VocabParallelEmbedding( - self.vocab_size, - self.config.hidden_size, - org_num_embeddings=self.config.vocab_size, - quant_config=None, - ) - setattr(module, vocab_embed_name, new_module) - self.log_replacement(vocab_embed_name, old_module, new_module) - break + # Use native set inpt embeddings + new_module = VocabParallelEmbedding( + self.vocab_size, + self.config.hidden_size, + org_num_embeddings=self.config.vocab_size, + quant_config=None, + ) + self.log_replacement("input embedding", + self.model.get_input_embeddings(), new_module) + self.model.set_input_embeddings(new_module) def _autoset_attn_implementation( self, From 2132dcf87e5717499aa051984669e3b55d4bac13 Mon Sep 17 00:00:00 2001 From: Arthur Zucker Date: Tue, 28 Jan 2025 16:29:17 +0100 Subject: [PATCH 56/86] update --- tests/models/test_transformers.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/models/test_transformers.py b/tests/models/test_transformers.py index 895cfb74d5ba5..e79720599edec 100644 --- a/tests/models/test_transformers.py +++ b/tests/models/test_transformers.py @@ -44,7 +44,7 @@ def check_implementation( "model,model_impl", [ ("meta-llama/Llama-3.2-1B-Instruct", "transformers"), - ("openai-community/gpt2", "transformers"),x + ("openai-community/gpt2", "transformers"), ("ArthurZ/Ilama-3.2-1B", "auto"), # CUSTOM CODE ("meta-llama/Llama-3.2-1B-Instruct", "auto"), ]) # trust_remote_code=True by default From 20bc901679c4595272c818f7007a7a50f769e932 Mon Sep 17 00:00:00 2001 From: Arthur Zucker Date: Tue, 28 Jan 2025 16:30:35 +0100 Subject: [PATCH 57/86] remove print --- vllm/model_executor/models/transformers.py | 1 - 1 file changed, 1 deletion(-) diff --git a/vllm/model_executor/models/transformers.py b/vllm/model_executor/models/transformers.py index a13de051f39a9..92072ce43ca85 100644 --- a/vllm/model_executor/models/transformers.py +++ b/vllm/model_executor/models/transformers.py @@ -56,7 +56,6 @@ def vllm_flash_attention_forward(_module, hidden = query.shape[-2] query, key, value = (x.transpose(1, 2) for x in (query, key, value)) query, key, value = (x.reshape(hidden, -1) for x in (query, key, value)) - print("THE LAYER:", layer_idx) return attention_instances[layer_idx].forward( query, key, From 62540f204489a834979145b9b52b006dfc7e389f Mon Sep 17 00:00:00 2001 From: Arthur Zucker Date: Tue, 28 Jan 2025 17:04:08 +0100 Subject: [PATCH 58/86] update --- vllm/model_executor/models/transformers.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/vllm/model_executor/models/transformers.py b/vllm/model_executor/models/transformers.py index 92072ce43ca85..b449b64c2e667 100644 --- a/vllm/model_executor/models/transformers.py +++ b/vllm/model_executor/models/transformers.py @@ -60,8 +60,8 @@ def vllm_flash_attention_forward(_module, query, key, value, - _kv_cache=kv_caches[layer_idx], - _attn_metadata=attn_metadata), None + kv_cache=kv_caches[layer_idx], + attn_metadata=attn_metadata), None ALL_ATTENTION_FUNCTIONS["vllm"] = vllm_flash_attention_forward From cfeaaae4200290a2634bd8029d4124251adff94a Mon Sep 17 00:00:00 2001 From: Harry Mellor <19981378+hmellor@users.noreply.github.com> Date: Tue, 28 Jan 2025 18:53:38 +0100 Subject: [PATCH 59/86] Fix fallback, dict keys != attrs Signed-off-by: Harry Mellor <19981378+hmellor@users.noreply.github.com> --- vllm/model_executor/models/registry.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/vllm/model_executor/models/registry.py b/vllm/model_executor/models/registry.py index f29d055574011..cb61e2e148158 100644 --- a/vllm/model_executor/models/registry.py +++ b/vllm/model_executor/models/registry.py @@ -383,7 +383,7 @@ def _normalize_archs( normalized_arch = [] for model in architectures: - if not hasattr(self.models, model): + if model not in self.models: model = "TransformersModel" normalized_arch.append(model) return normalized_arch From ecf299023f258728a668ed8b701bde09d3968751 Mon Sep 17 00:00:00 2001 From: Arthur Zucker Date: Tue, 28 Jan 2025 19:18:34 +0100 Subject: [PATCH 60/86] cleanup --- vllm/model_executor/models/transformers.py | 27 ++++++++-------------- 1 file changed, 9 insertions(+), 18 deletions(-) diff --git a/vllm/model_executor/models/transformers.py b/vllm/model_executor/models/transformers.py index b449b64c2e667..238004feee7ea 100644 --- a/vllm/model_executor/models/transformers.py +++ b/vllm/model_executor/models/transformers.py @@ -50,7 +50,7 @@ def vllm_flash_attention_forward(_module, query_length: int = None, kv_caches: torch.Tensor = None, attn_metadata: AttentionMetadata = None, - attention_instances=None, + attention_instances: Attention=None, **kwargs): layer_idx = _module.layer_idx hidden = query.shape[-2] @@ -81,7 +81,7 @@ def forward(self, input: torch.Tensor) -> torch.Tensor: return super().forward(input)[0] -def replace_tp_linear_class(orig_module: nn.Linear, style: str): +def replace_tp_linear_class(orig_module: nn.Linear, style: str, quant_config): """ In model configurations, we use a neutral type (string) to specify parallel styles, here we use it to translate nn.Linear into vllm-style tp Linear. @@ -96,9 +96,9 @@ def replace_tp_linear_class(orig_module: nn.Linear, style: str): bias = orig_module.bias is not None if style == "colwise": - return HFColumnParallelLinear(input_size, output_size, bias) + return HFColumnParallelLinear(input_size, output_size, bias, quant_config=quant_config) elif style == "rowwise": - return HFRowParallelLinear(input_size, output_size, bias) + return HFRowParallelLinear(input_size, output_size, bias, quant_config=quant_config) # We don't consider colwise_rep since it's used in lm_head else: raise ValueError(f"Unsupported parallel style value: {style}") @@ -106,6 +106,7 @@ def replace_tp_linear_class(orig_module: nn.Linear, style: str): class TransformersModel(nn.Module, SupportsLoRA): embedding_padding_modules = ["lm_head"] + packed_modules_mapping = {} # TODO Add support for bnb and LORA def __init__(self, *, vllm_config: VllmConfig, prefix: str = "") -> None: @@ -116,12 +117,14 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = "") -> None: config = vllm_config.model_config.hf_config cache_config = vllm_config.cache_config quant_config = vllm_config.quant_config + self.quant_config = quant_config self.config = config self.vocab_size = config.vocab_size self.unpadded_vocab_size = config.vocab_size self.model: PreTrainedModel = AutoModel.from_config( self.config, + attn_implementation="vllm", torch_dtype=vllm_config.model_config.dtype, trust_remote_code=vllm_config.model_config.trust_remote_code, ) @@ -141,7 +144,7 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = "") -> None: num_kv_heads=divide(config.num_key_value_heads, tp_size), cache_config=cache_config, quant_config=quant_config, - prefix=maybe_prefix(prefix, f"{i}.attn")) + prefix=f"model.layers.{i}.self_attn") for i in range(config.num_hidden_layers) ] self.config._attn_implementation_internal = "vllm" @@ -178,7 +181,7 @@ def tensor_parallelize(self, module: nn.Module, prefix: str = ""): for pattern, style in self.config.base_model_tp_plan.items(): if re.match(pattern, qual_name) and isinstance( child_module, nn.Linear): - new_module = replace_tp_linear_class(child_module, style) + new_module = replace_tp_linear_class(child_module, style, self.quant_config) setattr(module, child_name, new_module) self.log_replacement(qual_name, child_module, new_module) else: @@ -196,18 +199,6 @@ def replace_vocab_embed_class(self, module: nn.Module): self.model.get_input_embeddings(), new_module) self.model.set_input_embeddings(new_module) - def _autoset_attn_implementation( - self, - config, - use_flash_attention_2: bool = False, - torch_dtype: Optional[torch.dtype] = None, - device_map: Optional[Union[str, Dict[str, int]]] = None, - check_device_map: bool = True, - ): - config._attn_implementation = "vllm" - config._attn_implementation_autoset = True - return config - def forward( self, input_ids: torch.Tensor, From e30000db23b564a5523ee2afa6cc810f61c85e98 Mon Sep 17 00:00:00 2001 From: Arthur Zucker Date: Tue, 28 Jan 2025 19:21:38 +0100 Subject: [PATCH 61/86] pre-commit --- vllm/model_executor/models/transformers.py | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) diff --git a/vllm/model_executor/models/transformers.py b/vllm/model_executor/models/transformers.py index 238004feee7ea..700661052baf2 100644 --- a/vllm/model_executor/models/transformers.py +++ b/vllm/model_executor/models/transformers.py @@ -50,7 +50,7 @@ def vllm_flash_attention_forward(_module, query_length: int = None, kv_caches: torch.Tensor = None, attn_metadata: AttentionMetadata = None, - attention_instances: Attention=None, + attention_instances: Attention = None, **kwargs): layer_idx = _module.layer_idx hidden = query.shape[-2] @@ -96,9 +96,15 @@ def replace_tp_linear_class(orig_module: nn.Linear, style: str, quant_config): bias = orig_module.bias is not None if style == "colwise": - return HFColumnParallelLinear(input_size, output_size, bias, quant_config=quant_config) + return HFColumnParallelLinear(input_size, + output_size, + bias, + quant_config=quant_config) elif style == "rowwise": - return HFRowParallelLinear(input_size, output_size, bias, quant_config=quant_config) + return HFRowParallelLinear(input_size, + output_size, + bias, + quant_config=quant_config) # We don't consider colwise_rep since it's used in lm_head else: raise ValueError(f"Unsupported parallel style value: {style}") @@ -181,7 +187,8 @@ def tensor_parallelize(self, module: nn.Module, prefix: str = ""): for pattern, style in self.config.base_model_tp_plan.items(): if re.match(pattern, qual_name) and isinstance( child_module, nn.Linear): - new_module = replace_tp_linear_class(child_module, style, self.quant_config) + new_module = replace_tp_linear_class( + child_module, style, self.quant_config) setattr(module, child_name, new_module) self.log_replacement(qual_name, child_module, new_module) else: From 7fd638f298f41044413556884b79bf24ee833641 Mon Sep 17 00:00:00 2001 From: Arthur Zucker Date: Tue, 28 Jan 2025 19:27:36 +0100 Subject: [PATCH 62/86] nit --- vllm/model_executor/models/transformers.py | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/vllm/model_executor/models/transformers.py b/vllm/model_executor/models/transformers.py index 700661052baf2..4f6a26b3495bb 100644 --- a/vllm/model_executor/models/transformers.py +++ b/vllm/model_executor/models/transformers.py @@ -247,9 +247,10 @@ def load_weights(self, weights: Iterable[Tuple[str, for name, loaded_weight in weights: if name not in params_dict: name = f"{self.model.base_model_prefix}.{name}" - param = params_dict[name] - weight_loader = getattr(param, "weight_loader", - default_weight_loader) - weight_loader(param, loaded_weight) - loaded_params.add(name) + if name in params_dict: + param = params_dict[name] + weight_loader = getattr(param, "weight_loader", + default_weight_loader) + weight_loader(param, loaded_weight) + loaded_params.add(name) return loaded_params From e714c0541f06551e15db7a3ffeb4f36451cd0022 Mon Sep 17 00:00:00 2001 From: Harry Mellor <19981378+hmellor@users.noreply.github.com> Date: Tue, 28 Jan 2025 19:44:21 +0100 Subject: [PATCH 63/86] pre-commit Signed-off-by: Harry Mellor <19981378+hmellor@users.noreply.github.com> --- docs/source/models/supported_models.md | 6 +++--- tests/models/test_transformers.py | 2 -- vllm/model_executor/model_loader/utils.py | 3 ++- vllm/model_executor/models/transformers.py | 5 ++--- 4 files changed, 7 insertions(+), 9 deletions(-) diff --git a/docs/source/models/supported_models.md b/docs/source/models/supported_models.md index 528bceba9f2a0..77e1728855b9b 100644 --- a/docs/source/models/supported_models.md +++ b/docs/source/models/supported_models.md @@ -42,9 +42,9 @@ Alternatively, you can [open an issue on GitHub](https://github.com/vllm-project ### Transformers fallback -After the merge of , `vllm` can fallback to models that are avaialble in `transformers`. This does not work for all models for now, but most decoder language models are supported, and vision language model support is planned! +After the merge of , `vllm` can fallback to models that are available in `transformers`. This does not work for all models for now, but most decoder language models are supported, and vision language model support is planned! -To check if the backend is `transformers`, you can simply do this: +To check if the backend is `transformers`, you can simply do this: ```python from vllm import LLM @@ -52,7 +52,7 @@ llm = LLM(model=..., task="generate") # Name or path of your model llm.apply_model(lambda model: print(model.__class__)) ``` -If it is `TransformersModel` then it means it's based on `transformers`! +If it is `TransformersModel` then it means it's based on `transformers`! ### ModelScope diff --git a/tests/models/test_transformers.py b/tests/models/test_transformers.py index e79720599edec..074f44c41ad58 100644 --- a/tests/models/test_transformers.py +++ b/tests/models/test_transformers.py @@ -7,8 +7,6 @@ import pytest -from vllm.model_executor.models import ModelRegistry - from ..conftest import HfRunner, VllmRunner from ..utils import multi_gpu_test from .utils import check_logprobs_close diff --git a/vllm/model_executor/model_loader/utils.py b/vllm/model_executor/model_loader/utils.py index de612f143079a..3b9e488d91ec4 100644 --- a/vllm/model_executor/model_loader/utils.py +++ b/vllm/model_executor/model_loader/utils.py @@ -6,6 +6,7 @@ import torch import transformers from torch import nn +from transformers.dynamic_module_utils import get_class_from_dynamic_module from vllm.config import ModelConfig, ModelImpl from vllm.logger import init_logger @@ -56,7 +57,7 @@ def get_model_architecture( continue custom_module = None if hasattr(model_config.hf_config, "auto_map"): - custom_module = transformers.dynamic_module_utils.get_class_from_dynamic_module( + custom_module = get_class_from_dynamic_module( model_config.hf_config.auto_map["AutoModel"], model_config.model) if model_config.model_impl == ModelImpl.TRANSFORMERS: diff --git a/vllm/model_executor/models/transformers.py b/vllm/model_executor/models/transformers.py index 4f6a26b3495bb..ae4e5f273728c 100644 --- a/vllm/model_executor/models/transformers.py +++ b/vllm/model_executor/models/transformers.py @@ -13,7 +13,7 @@ # limitations under the License. """Wrapper around `transformers` models""" import re -from typing import Dict, Iterable, List, Optional, Set, Tuple, TypedDict, Union +from typing import Iterable, List, Optional, Set, Tuple, Union import torch from torch import nn @@ -25,7 +25,6 @@ from vllm.distributed import get_tensor_model_parallel_world_size from vllm.distributed.utils import divide from vllm.logger import init_logger -from vllm.model_executor.layers.layernorm import RMSNorm from vllm.model_executor.layers.linear import (ColumnParallelLinear, RowParallelLinear) from vllm.model_executor.layers.logits_processor import LogitsProcessor @@ -195,7 +194,7 @@ def tensor_parallelize(self, module: nn.Module, prefix: str = ""): self.tensor_parallelize(child_module, prefix=f"{qual_name}.") def replace_vocab_embed_class(self, module: nn.Module): - # Use native set inpt embeddings + # Use native set input embeddings new_module = VocabParallelEmbedding( self.vocab_size, self.config.hidden_size, From fc62d7da6b7088018c0d110728348d3806ae62e5 Mon Sep 17 00:00:00 2001 From: Harry Mellor <19981378+hmellor@users.noreply.github.com> Date: Tue, 28 Jan 2025 19:44:51 +0100 Subject: [PATCH 64/86] Remove unused line Signed-off-by: Harry Mellor <19981378+hmellor@users.noreply.github.com> --- vllm/model_executor/models/transformers.py | 1 - 1 file changed, 1 deletion(-) diff --git a/vllm/model_executor/models/transformers.py b/vllm/model_executor/models/transformers.py index ae4e5f273728c..96b69ed2d1766 100644 --- a/vllm/model_executor/models/transformers.py +++ b/vllm/model_executor/models/transformers.py @@ -152,7 +152,6 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = "") -> None: prefix=f"model.layers.{i}.self_attn") for i in range(config.num_hidden_layers) ] - self.config._attn_implementation_internal = "vllm" # Model modifications self.replace_vocab_embed_class(self.model) From 5475b5b039283ec762d72d3c93f8e8a2ca33b34b Mon Sep 17 00:00:00 2001 From: Harry Mellor <19981378+hmellor@users.noreply.github.com> Date: Tue, 28 Jan 2025 20:00:17 +0100 Subject: [PATCH 65/86] Remove `kv_caches` and update scale if it's passed Signed-off-by: Harry Mellor <19981378+hmellor@users.noreply.github.com> --- vllm/model_executor/models/transformers.py | 43 ++++++++++++---------- 1 file changed, 24 insertions(+), 19 deletions(-) diff --git a/vllm/model_executor/models/transformers.py b/vllm/model_executor/models/transformers.py index 96b69ed2d1766..a6868d4418abf 100644 --- a/vllm/model_executor/models/transformers.py +++ b/vllm/model_executor/models/transformers.py @@ -41,25 +41,30 @@ logger = init_logger(__name__) -def vllm_flash_attention_forward(_module, - query: torch.Tensor, - key: torch.Tensor, - value: torch.Tensor, - attention_mask: torch.Tensor, - query_length: int = None, - kv_caches: torch.Tensor = None, - attn_metadata: AttentionMetadata = None, - attention_instances: Attention = None, - **kwargs): - layer_idx = _module.layer_idx +def vllm_flash_attention_forward( + # Transformers args + module: torch.nn.Module, + query: torch.Tensor, + key: torch.Tensor, + value: torch.Tensor, + attention_mask: torch.Tensor, + # Transformers kwargs + scaling: float = None, + # vLLM kwargs + attn_metadata: AttentionMetadata = None, + attention_instances: list[Attention] = None, + **kwargs): + self_attn = attention_instances[module.layer_idx] + if scaling is not None: + self_attn.impl.scale = float(scaling) hidden = query.shape[-2] query, key, value = (x.transpose(1, 2) for x in (query, key, value)) query, key, value = (x.reshape(hidden, -1) for x in (query, key, value)) - return attention_instances[layer_idx].forward( + return self_attn.forward( query, key, value, - kv_cache=kv_caches[layer_idx], + kv_cache=None, # argument not used attn_metadata=attn_metadata), None @@ -142,10 +147,11 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = "") -> None: tp_size = get_tensor_model_parallel_world_size() self.attention_instances = [ Attention( - divide(config.num_attention_heads, tp_size), - config.head_dim, - config.head_dim** - -0.5, # ish, the scaling is different for every attn layer + num_heads=divide(config.num_attention_heads, tp_size), + head_size=config.head_dim, + # NOTE: We use Llama scale as default, if it's set by + # Transformers, it's updated in vllm_flash_attention_forward + scale=config.head_dim**-0.5, num_kv_heads=divide(config.num_key_value_heads, tp_size), cache_config=cache_config, quant_config=quant_config, @@ -208,7 +214,7 @@ def forward( self, input_ids: torch.Tensor, positions: torch.Tensor, - kv_caches: List[torch.Tensor], + kv_caches: List[torch.Tensor], # argument not used attn_metadata: AttentionMetadata, intermediate_tensors: Optional[IntermediateTensors] = None, ) -> Union[torch.Tensor, IntermediateTensors]: @@ -216,7 +222,6 @@ def forward( input_ids[None, ...], use_cache=False, position_ids=positions[None, ...], - kv_caches=kv_caches, attn_metadata=attn_metadata, intermediate_tensors=intermediate_tensors, attention_instances=self.attention_instances, From 255ed6c4c1f7c84bc4d51b03027085104080be8a Mon Sep 17 00:00:00 2001 From: Arthur Zucker Date: Tue, 28 Jan 2025 20:06:58 +0100 Subject: [PATCH 66/86] eager tests do work for now --- tests/models/test_transformers.py | 3 ++- vllm/model_executor/models/transformers.py | 3 +-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/models/test_transformers.py b/tests/models/test_transformers.py index e79720599edec..e88f15d1c571e 100644 --- a/tests/models/test_transformers.py +++ b/tests/models/test_transformers.py @@ -63,7 +63,8 @@ def test_models(hf_runner, vllm_runner, example_prompts, model, vllm_runner, example_prompts, model, - model_impl=model_impl) + model_impl=model_impl, enforce_eager=True +) @multi_gpu_test(num_gpus=2) diff --git a/vllm/model_executor/models/transformers.py b/vllm/model_executor/models/transformers.py index 4f6a26b3495bb..ed0674d7594b6 100644 --- a/vllm/model_executor/models/transformers.py +++ b/vllm/model_executor/models/transformers.py @@ -150,10 +150,9 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = "") -> None: num_kv_heads=divide(config.num_key_value_heads, tp_size), cache_config=cache_config, quant_config=quant_config, - prefix=f"model.layers.{i}.self_attn") + prefix=f"{i}.attn") for i in range(config.num_hidden_layers) ] - self.config._attn_implementation_internal = "vllm" # Model modifications self.replace_vocab_embed_class(self.model) From 4a855ea61384368418e7f2dcb911eb832b465df0 Mon Sep 17 00:00:00 2001 From: Harry Mellor <19981378+hmellor@users.noreply.github.com> Date: Tue, 28 Jan 2025 20:20:44 +0100 Subject: [PATCH 67/86] Respond to comments Signed-off-by: Harry Mellor <19981378+hmellor@users.noreply.github.com> --- vllm/model_executor/model_loader/utils.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/vllm/model_executor/model_loader/utils.py b/vllm/model_executor/model_loader/utils.py index 3b9e488d91ec4..e9bd72b92f349 100644 --- a/vllm/model_executor/model_loader/utils.py +++ b/vllm/model_executor/model_loader/utils.py @@ -71,10 +71,11 @@ def get_model_architecture( if not is_transformers_impl_compatible(arch, custom_module): raise ValueError( "%s has no vLLM implementation and the Transformers " - "implementationis not compatible with vLLM.", arch) - logger.info( - "%s has no vLLM implementation, falling back to " - "Transformers implementation", arch) + "implementation is not compatible with vLLM.", arch) + logger.warning( + "%s has no vLLM implementation, falling back to Transformers " + "implementation. Some features may not be supported and " + "performance may not be optimal.", arch) architectures[i] = "TransformersModel" model_cls, arch = ModelRegistry.resolve_model_cls(architectures) From e416227059e639efacfa3edc53ad8b178fb78cfb Mon Sep 17 00:00:00 2001 From: Arthur Zucker Date: Wed, 29 Jan 2025 10:11:25 +0100 Subject: [PATCH 68/86] fix failing test on phi: not all remote code have AutoModel --- vllm/model_executor/model_loader/utils.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/vllm/model_executor/model_loader/utils.py b/vllm/model_executor/model_loader/utils.py index 3b9e488d91ec4..85304495b71a0 100644 --- a/vllm/model_executor/model_loader/utils.py +++ b/vllm/model_executor/model_loader/utils.py @@ -56,7 +56,8 @@ def get_model_architecture( if arch == "TransformersModel": continue custom_module = None - if hasattr(model_config.hf_config, "auto_map"): + auto_map = getattr(model_config.hf_config, "auto_map", None) + if auto_map is not None and hasattr(auto_map, "AutoModel"): custom_module = get_class_from_dynamic_module( model_config.hf_config.auto_map["AutoModel"], model_config.model) From b74886eb5eafc343255447bd359c2681503f677e Mon Sep 17 00:00:00 2001 From: Arthur Zucker Date: Wed, 29 Jan 2025 10:58:27 +0100 Subject: [PATCH 69/86] remove enforce eager for CI test --- tests/models/test_transformers.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/tests/models/test_transformers.py b/tests/models/test_transformers.py index 2c2894a54f251..074f44c41ad58 100644 --- a/tests/models/test_transformers.py +++ b/tests/models/test_transformers.py @@ -61,8 +61,7 @@ def test_models(hf_runner, vllm_runner, example_prompts, model, vllm_runner, example_prompts, model, - model_impl=model_impl, enforce_eager=True -) + model_impl=model_impl) @multi_gpu_test(num_gpus=2) From 5dabda84f9a49e73eeab5ae4e5dafc074b43a3a3 Mon Sep 17 00:00:00 2001 From: Arthur Zucker Date: Wed, 29 Jan 2025 12:09:56 +0100 Subject: [PATCH 70/86] remove BNB and LORA --- docs/source/models/supported_models.md | 64 ++++++++++++++++++++++ vllm/model_executor/models/transformers.py | 37 +++++++------ 2 files changed, 85 insertions(+), 16 deletions(-) diff --git a/docs/source/models/supported_models.md b/docs/source/models/supported_models.md index 284374e1932e6..02af270523c8c 100644 --- a/docs/source/models/supported_models.md +++ b/docs/source/models/supported_models.md @@ -54,6 +54,70 @@ llm.apply_model(lambda model: print(model.__class__)) If it is `TransformersModel` then it means it's based on `transformers`! +#### Supported features: + +##### LORA and quantization + +Both are not supported yet! Make sure to open an issue and we'll work on this together with the `transformers` team! + +Usually `transformers` model load weights via the `load_adapters` API, that depends on PEFT. We need to work a bit to either use this api (for now this would result in some weights not being marked as loaded) or replace modules accordingly. + +Hints as to how this would look like: +```python +class TransformersModel(nn.Module, SupportsLoRA): + def __init__(*): + ... + self.model.load_adapter(vllm_config.load_config.model_loader_extra_config["qlora_adapter_name_or_path"]) +``` + + +Blocker is that you need to specify supported lora layers, when we would ideally want to load whatever is inside the checkpoint! + +##### Remote code + +This fallback also means that any model on the hub that can be used in `transformers` with `trust_remote_code=True` that correctly implements attention can be used in production! + + +```python +from vllm import LLM +llm = LLM(model=..., task="generate", trust_remote_code=True) # Name or path of your model +llm.apply_model(lambda model: print(model.__class__)) +``` + +A model just needs the following two things: + +```python +from transformers import PreTrainedModel +from torch import nn + +class MyAttention(nn.Module): + + def forward(self, hidden_states, **kwargs): # <- kwargs are required + + ... + attention_interface = attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation] + attn_output, attn_weights = attention_interface( + self, + query_states, + key_states, + value_states, + **kwargs, + ) + ... + +class MyModel(PreTrainedModel): + _supports_attention_backend = True +``` + +Here is what happends in the background: + +1. The config is loaded +2. `MyModel` python class is loaded from the `auto_map`, and we check that the model `_supports_attention_backend`. +3. The `TransformersModel` backend is used. See `/model_executors/models/transformers`, which leverage `self.config._attn_implementation = "vllm"`, thus the need to use `ALL_ATTENTION_FUNCTION`. + +That's it! + + ### ModelScope To use models from [ModelScope](https://www.modelscope.cn) instead of HuggingFace Hub, set an environment variable: diff --git a/vllm/model_executor/models/transformers.py b/vllm/model_executor/models/transformers.py index 771fb9cc17f9a..f600978585364 100644 --- a/vllm/model_executor/models/transformers.py +++ b/vllm/model_executor/models/transformers.py @@ -85,10 +85,14 @@ def forward(self, input: torch.Tensor) -> torch.Tensor: return super().forward(input)[0] -def replace_tp_linear_class(orig_module: nn.Linear, style: str, quant_config): +def replace_tp_linear_class(orig_module: nn.Linear, + style: str, + quant_config=None): """ In model configurations, we use a neutral type (string) to specify parallel styles, here we use it to translate nn.Linear into vllm-style tp Linear. + + Quant config is not supported yet """ if not isinstance(style, str): @@ -100,25 +104,27 @@ def replace_tp_linear_class(orig_module: nn.Linear, style: str, quant_config): bias = orig_module.bias is not None if style == "colwise": - return HFColumnParallelLinear(input_size, - output_size, - bias, - quant_config=quant_config) + return HFColumnParallelLinear( + input_size, + output_size, + bias, + ) elif style == "rowwise": - return HFRowParallelLinear(input_size, - output_size, - bias, - quant_config=quant_config) + return HFRowParallelLinear( + input_size, + output_size, + bias, + ) # We don't consider colwise_rep since it's used in lm_head else: raise ValueError(f"Unsupported parallel style value: {style}") -class TransformersModel(nn.Module, SupportsLoRA): +class TransformersModel(nn.Module): embedding_padding_modules = ["lm_head"] - packed_modules_mapping = {} + embedding_modules = ["embed_tokens" + ] # TODO transformers will have a util to get it - # TODO Add support for bnb and LORA def __init__(self, *, vllm_config: VllmConfig, prefix: str = "") -> None: super().__init__() logger.info("Using Transformers backend.") @@ -154,9 +160,8 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = "") -> None: scale=config.head_dim**-0.5, num_kv_heads=divide(config.num_key_value_heads, tp_size), cache_config=cache_config, - quant_config=quant_config, - prefix=f"{i}.attn") - for i in range(config.num_hidden_layers) + quant_config=None, + prefix=f"{i}.attn") for i in range(config.num_hidden_layers) ] # Model modifications @@ -165,7 +170,7 @@ def __init__(self, *, vllm_config: VllmConfig, prefix: str = "") -> None: # ForCausalLM modifications self.lm_head = ParallelLMHead(config.vocab_size, config.hidden_size, - quant_config=quant_config, + quant_config=None, prefix=maybe_prefix(prefix, "lm_head")) if config.tie_word_embeddings: self.lm_head.weight = self.model.get_input_embeddings().weight From a1bd892a38d8c7a63b0e4a43038d31d57cb4beb9 Mon Sep 17 00:00:00 2001 From: Arthur Zucker Date: Wed, 29 Jan 2025 17:26:02 +0100 Subject: [PATCH 71/86] remove quantized test --- tests/models/test_transformers.py | 10 ---------- 1 file changed, 10 deletions(-) diff --git a/tests/models/test_transformers.py b/tests/models/test_transformers.py index 074f44c41ad58..c6536f37cbdc8 100644 --- a/tests/models/test_transformers.py +++ b/tests/models/test_transformers.py @@ -73,13 +73,3 @@ def test_distributed( kwargs = {"model_impl": "transformers", "tensor_parallel_size": 2} check_implementation(hf_runner, vllm_runner, example_prompts, "meta-llama/Llama-3.2-1B-Instruct", **kwargs) - - -def test_quantized( - hf_runner, - vllm_runner, - example_prompts, -): - kwargs = {"model_impl": "transformers"} - check_implementation(hf_runner, vllm_runner, example_prompts, - "unsloth/Llama-3.2-1B-Instruct-bnb-4bit", **kwargs) From 15327e34f52e4f75fd65c0beb97fc49497ace531 Mon Sep 17 00:00:00 2001 From: Arthur Zucker Date: Wed, 29 Jan 2025 17:30:13 +0100 Subject: [PATCH 72/86] update buildkite to run transformers test --- .buildkite/test-pipeline.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/.buildkite/test-pipeline.yaml b/.buildkite/test-pipeline.yaml index d5d02fdeb7f4b..780f355e744fe 100644 --- a/.buildkite/test-pipeline.yaml +++ b/.buildkite/test-pipeline.yaml @@ -350,6 +350,7 @@ steps: - tests/models commands: - pytest -v -s models/test_registry.py + - pytest -v -s models/test_transformers.py - pytest -v -s models/test_initialization.py - label: Language Models Test (Standard) # 32min From 3fad390cb2b9b7b9ae416eafa09ae8001090a598 Mon Sep 17 00:00:00 2001 From: Arthur <48595927+ArthurZucker@users.noreply.github.com> Date: Thu, 30 Jan 2025 08:24:20 +0100 Subject: [PATCH 73/86] Update vllm/model_executor/model_loader/utils.py Co-authored-by: Michael Goin --- vllm/model_executor/model_loader/utils.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/vllm/model_executor/model_loader/utils.py b/vllm/model_executor/model_loader/utils.py index 4d6f0f63e60fc..6f1c16203529a 100644 --- a/vllm/model_executor/model_loader/utils.py +++ b/vllm/model_executor/model_loader/utils.py @@ -64,15 +64,15 @@ def get_model_architecture( if model_config.model_impl == ModelImpl.TRANSFORMERS: if not is_transformers_impl_compatible(arch, custom_module): raise ValueError( - "The Transformers implementation of %s is not compatible " - "with vLLM.", arch) + f"The Transformers implementation of {arch} is not " + "compatible with vLLM.") architectures[i] = "TransformersModel" if (model_config.model_impl == ModelImpl.AUTO and arch not in vllm_supported_archs): if not is_transformers_impl_compatible(arch, custom_module): raise ValueError( - "%s has no vLLM implementation and the Transformers " - "implementation is not compatible with vLLM.", arch) + f"{arch} has no vLLM implementation and the Transformers " + "implementation is not compatible with vLLM.") logger.warning( "%s has no vLLM implementation, falling back to Transformers " "implementation. Some features may not be supported and " From 5663a0c678ec0cec4c95e821042ede6b0da3e5fe Mon Sep 17 00:00:00 2001 From: Arthur Zucker Date: Thu, 30 Jan 2025 08:55:13 +0100 Subject: [PATCH 74/86] fix pre-commit --- docs/source/models/supported_models.md | 22 ++++++++++------------ vllm/model_executor/models/transformers.py | 1 - 2 files changed, 10 insertions(+), 13 deletions(-) diff --git a/docs/source/models/supported_models.md b/docs/source/models/supported_models.md index 02af270523c8c..4a099646964f2 100644 --- a/docs/source/models/supported_models.md +++ b/docs/source/models/supported_models.md @@ -54,15 +54,16 @@ llm.apply_model(lambda model: print(model.__class__)) If it is `TransformersModel` then it means it's based on `transformers`! -#### Supported features: +#### Supported features ##### LORA and quantization -Both are not supported yet! Make sure to open an issue and we'll work on this together with the `transformers` team! +Both are not supported yet! Make sure to open an issue and we'll work on this together with the `transformers` team! -Usually `transformers` model load weights via the `load_adapters` API, that depends on PEFT. We need to work a bit to either use this api (for now this would result in some weights not being marked as loaded) or replace modules accordingly. +Usually `transformers` model load weights via the `load_adapters` API, that depends on PEFT. We need to work a bit to either use this api (for now this would result in some weights not being marked as loaded) or replace modules accordingly. Hints as to how this would look like: + ```python class TransformersModel(nn.Module, SupportsLoRA): def __init__(*): @@ -70,13 +71,11 @@ class TransformersModel(nn.Module, SupportsLoRA): self.model.load_adapter(vllm_config.load_config.model_loader_extra_config["qlora_adapter_name_or_path"]) ``` - Blocker is that you need to specify supported lora layers, when we would ideally want to load whatever is inside the checkpoint! ##### Remote code -This fallback also means that any model on the hub that can be used in `transformers` with `trust_remote_code=True` that correctly implements attention can be used in production! - +This fallback also means that any model on the hub that can be used in `transformers` with `trust_remote_code=True` that correctly implements attention can be used in production! ```python from vllm import LLM @@ -109,14 +108,13 @@ class MyModel(PreTrainedModel): _supports_attention_backend = True ``` -Here is what happends in the background: - -1. The config is loaded -2. `MyModel` python class is loaded from the `auto_map`, and we check that the model `_supports_attention_backend`. -3. The `TransformersModel` backend is used. See `/model_executors/models/transformers`, which leverage `self.config._attn_implementation = "vllm"`, thus the need to use `ALL_ATTENTION_FUNCTION`. +Here is what happens in the background: -That's it! +1. The config is loaded +2. `MyModel` python class is loaded from the `auto_map`, and we check that the model `_supports_attention_backend`. +3. The `TransformersModel` backend is used. See `/model_executors/models/transformers`, which leverage `self.config._attn_implementation = "vllm"`, thus the need to use `ALL_ATTENTION_FUNCTION`. +That's it! ### ModelScope diff --git a/vllm/model_executor/models/transformers.py b/vllm/model_executor/models/transformers.py index f600978585364..bd3896c13ec5f 100644 --- a/vllm/model_executor/models/transformers.py +++ b/vllm/model_executor/models/transformers.py @@ -35,7 +35,6 @@ from vllm.model_executor.sampling_metadata import SamplingMetadata from vllm.sequence import IntermediateTensors -from .interfaces import SupportsLoRA from .utils import maybe_prefix logger = init_logger(__name__) From 5679d4d3b5f29c919a37db2748b21c76012cb03a Mon Sep 17 00:00:00 2001 From: Arthur Zucker Date: Thu, 30 Jan 2025 09:09:56 +0100 Subject: [PATCH 75/86] update --- vllm/model_executor/model_loader/utils.py | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/vllm/model_executor/model_loader/utils.py b/vllm/model_executor/model_loader/utils.py index 6f1c16203529a..a5ec91d2a2453 100644 --- a/vllm/model_executor/model_loader/utils.py +++ b/vllm/model_executor/model_loader/utils.py @@ -1,7 +1,7 @@ """Utilities for selecting and loading models.""" import contextlib from dataclasses import dataclass, field -from typing import Dict, List, Tuple, Type +from typing import Dict, List, Optional, Tuple, Type import torch import transformers @@ -27,13 +27,14 @@ def set_default_torch_dtype(dtype: torch.dtype): torch.set_default_dtype(old_dtype) -def is_transformers_impl_compatible(arch: str, module=None) -> bool: - if module is None: - module: transformers.PreTrainedModel = getattr(transformers, arch) - if hasattr(module, "supports_backend"): - return module.is_backend_compatible() +def is_transformers_impl_compatible( + arch: str, + module: Optional[transformers.PreTrainedModel] = None) -> bool: + mod = module if module is not None else getattr(transformers, arch) + if hasattr(mod, "supports_backend"): + return mod.is_backend_compatible() else: - return module._supports_flex_attn + return mod._supports_flex_attn def get_model_architecture( From 03f18447bb0d32efde7287155aa85d5fd74a33f5 Mon Sep 17 00:00:00 2001 From: Harry Mellor <19981378+hmellor@users.noreply.github.com> Date: Thu, 30 Jan 2025 14:39:25 +0100 Subject: [PATCH 76/86] Fix failing registry test Signed-off-by: Harry Mellor <19981378+hmellor@users.noreply.github.com> --- tests/models/registry.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/tests/models/registry.py b/tests/models/registry.py index 7952e65aa76a5..1cad1b584ea86 100644 --- a/tests/models/registry.py +++ b/tests/models/registry.py @@ -279,12 +279,17 @@ def check_available_online( speculative_model="ibm-fms/llama-160m-accelerator"), # noqa: E501 } +_FALLBACK_MODEL = { + "TransformersModel": _HfExamplesInfo("ArthurZ/Ilama-3.2-1B", trust_remote_code=True), # noqa: E501 +} + _EXAMPLE_MODELS = { **_TEXT_GENERATION_EXAMPLE_MODELS, **_EMBEDDING_EXAMPLE_MODELS, **_CROSS_ENCODER_EXAMPLE_MODELS, **_MULTIMODAL_EXAMPLE_MODELS, **_SPECULATIVE_DECODING_EXAMPLE_MODELS, + **_FALLBACK_MODEL, } From d001748896c6a03ddd52f5339217ff2a2682d28c Mon Sep 17 00:00:00 2001 From: Harry Mellor <19981378+hmellor@users.noreply.github.com> Date: Thu, 30 Jan 2025 17:42:05 +0100 Subject: [PATCH 77/86] temp: run transformers tests first Signed-off-by: Harry Mellor <19981378+hmellor@users.noreply.github.com> --- .buildkite/test-pipeline.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.buildkite/test-pipeline.yaml b/.buildkite/test-pipeline.yaml index 780f355e744fe..681745428d600 100644 --- a/.buildkite/test-pipeline.yaml +++ b/.buildkite/test-pipeline.yaml @@ -349,8 +349,8 @@ steps: - vllm/ - tests/models commands: - - pytest -v -s models/test_registry.py - pytest -v -s models/test_transformers.py + - pytest -v -s models/test_registry.py - pytest -v -s models/test_initialization.py - label: Language Models Test (Standard) # 32min From 4741ab2e4debc66c264001f41523541bcfd7ea24 Mon Sep 17 00:00:00 2001 From: Harry Mellor <19981378+hmellor@users.noreply.github.com> Date: Thu, 30 Jan 2025 19:14:07 +0100 Subject: [PATCH 78/86] Update transformers pin in `requirements-test.txt` Signed-off-by: Harry Mellor <19981378+hmellor@users.noreply.github.com> --- requirements-test.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements-test.txt b/requirements-test.txt index df7e904bb0d34..f7bb09965ce3d 100644 --- a/requirements-test.txt +++ b/requirements-test.txt @@ -617,7 +617,7 @@ tqdm==4.66.6 # transformers tqdm-multiprocess==0.0.11 # via lm-eval -transformers==4.47.0 +transformers==4.48.1 # via # genai-perf # lm-eval From 9a29e4604541e8ed6ccdb55b2e3c4565461f359d Mon Sep 17 00:00:00 2001 From: Arthur Zucker Date: Fri, 31 Jan 2025 10:09:44 +0100 Subject: [PATCH 79/86] update deps --- requirements-common.txt | 2 +- requirements-test.txt | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/requirements-common.txt b/requirements-common.txt index 4ecc513a0853a..cda38acd798b8 100644 --- a/requirements-common.txt +++ b/requirements-common.txt @@ -5,7 +5,7 @@ requests >= 2.26.0 tqdm blake3 py-cpuinfo -transformers >= 4.48.0 # Required for Transformers model. +transformers >= 4.48.2 # Required for Transformers model. tokenizers >= 0.19.1 # Required for Llama 3. protobuf # Required by LlamaTokenizer. fastapi >= 0.107.0, < 0.113.0; python_version < '3.9' diff --git a/requirements-test.txt b/requirements-test.txt index df7e904bb0d34..9a1e566c5b3a1 100644 --- a/requirements-test.txt +++ b/requirements-test.txt @@ -617,7 +617,7 @@ tqdm==4.66.6 # transformers tqdm-multiprocess==0.0.11 # via lm-eval -transformers==4.47.0 +transformers==4.48.2 # via # genai-perf # lm-eval From 5f6668f7b891ac6dee7b2bc074319b06565c1c59 Mon Sep 17 00:00:00 2001 From: Isotr0py <2037008807@qq.com> Date: Sat, 1 Feb 2025 14:07:53 +0800 Subject: [PATCH 80/86] make v1 work Signed-off-by: Isotr0py <2037008807@qq.com> --- vllm/model_executor/models/transformers.py | 1 + 1 file changed, 1 insertion(+) diff --git a/vllm/model_executor/models/transformers.py b/vllm/model_executor/models/transformers.py index bd3896c13ec5f..ff1ae0ac85bac 100644 --- a/vllm/model_executor/models/transformers.py +++ b/vllm/model_executor/models/transformers.py @@ -221,6 +221,7 @@ def forward( kv_caches: List[torch.Tensor], # argument not used attn_metadata: AttentionMetadata, intermediate_tensors: Optional[IntermediateTensors] = None, + inputs_embeds: Optional[torch.Tensor] = None, ) -> Union[torch.Tensor, IntermediateTensors]: model_output = self.model( input_ids[None, ...], From 95c191641530a21883ddfb19a8df7ab0714c87e7 Mon Sep 17 00:00:00 2001 From: Isotr0py <2037008807@qq.com> Date: Sat, 1 Feb 2025 17:56:32 +0800 Subject: [PATCH 81/86] fix custom model test Signed-off-by: Isotr0py <2037008807@qq.com> --- vllm/model_executor/model_loader/utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/vllm/model_executor/model_loader/utils.py b/vllm/model_executor/model_loader/utils.py index a5ec91d2a2453..78adcc8dda593 100644 --- a/vllm/model_executor/model_loader/utils.py +++ b/vllm/model_executor/model_loader/utils.py @@ -58,7 +58,7 @@ def get_model_architecture( continue custom_module = None auto_map = getattr(model_config.hf_config, "auto_map", None) - if auto_map is not None and hasattr(auto_map, "AutoModel"): + if auto_map is not None and "AutoModel" in auto_map: custom_module = get_class_from_dynamic_module( model_config.hf_config.auto_map["AutoModel"], model_config.model) From 29066263e9eb4fa758fd0fb765866613395dc398 Mon Sep 17 00:00:00 2001 From: Isotr0py <2037008807@qq.com> Date: Sat, 1 Feb 2025 23:00:11 +0800 Subject: [PATCH 82/86] fix incorrect backend fallback Signed-off-by: Isotr0py <2037008807@qq.com> --- vllm/model_executor/model_loader/utils.py | 46 ++++++++++++++--------- 1 file changed, 28 insertions(+), 18 deletions(-) diff --git a/vllm/model_executor/model_loader/utils.py b/vllm/model_executor/model_loader/utils.py index 78adcc8dda593..234bc4d9a6914 100644 --- a/vllm/model_executor/model_loader/utils.py +++ b/vllm/model_executor/model_loader/utils.py @@ -37,22 +37,8 @@ def is_transformers_impl_compatible( return mod._supports_flex_attn -def get_model_architecture( - model_config: ModelConfig) -> Tuple[Type[nn.Module], str]: - architectures = getattr(model_config.hf_config, "architectures", []) - - # Special handling for quantized Mixtral. - # FIXME(woosuk): This is a temporary hack. - mixtral_supported = [ - "fp8", "compressed-tensors", "gptq_marlin", "awq_marlin" - ] - - if (model_config.quantization is not None - and model_config.quantization not in mixtral_supported - and "MixtralForCausalLM" in architectures): - architectures = ["QuantMixtralForCausalLM"] - - vllm_supported_archs = ModelRegistry.get_supported_archs() +def resolve_transformers_fallback(model_config: ModelConfig, + architectures: list[str]): for i, arch in enumerate(architectures): if arch == "TransformersModel": continue @@ -68,8 +54,7 @@ def get_model_architecture( f"The Transformers implementation of {arch} is not " "compatible with vLLM.") architectures[i] = "TransformersModel" - if (model_config.model_impl == ModelImpl.AUTO - and arch not in vllm_supported_archs): + if model_config.model_impl == ModelImpl.AUTO: if not is_transformers_impl_compatible(arch, custom_module): raise ValueError( f"{arch} has no vLLM implementation and the Transformers " @@ -79,6 +64,31 @@ def get_model_architecture( "implementation. Some features may not be supported and " "performance may not be optimal.", arch) architectures[i] = "TransformersModel" + return architectures + + +def get_model_architecture( + model_config: ModelConfig) -> Tuple[Type[nn.Module], str]: + architectures = getattr(model_config.hf_config, "architectures", []) + + # Special handling for quantized Mixtral. + # FIXME(woosuk): This is a temporary hack. + mixtral_supported = [ + "fp8", "compressed-tensors", "gptq_marlin", "awq_marlin" + ] + + if (model_config.quantization is not None + and model_config.quantization not in mixtral_supported + and "MixtralForCausalLM" in architectures): + architectures = ["QuantMixtralForCausalLM"] + + vllm_supported_archs = ModelRegistry.get_supported_archs() + is_vllm_supported = any(arch in vllm_supported_archs + for arch in architectures) + if (not is_vllm_supported + or model_config.model_impl == ModelImpl.TRANSFORMERS): + architectures = resolve_transformers_fallback(model_config, + architectures) model_cls, arch = ModelRegistry.resolve_model_cls(architectures) if model_config.task == "embed": From 8c33bd679654df663fa612210fb1f7daae97af81 Mon Sep 17 00:00:00 2001 From: Isotr0py <2037008807@qq.com> Date: Sat, 1 Feb 2025 23:43:15 +0800 Subject: [PATCH 83/86] fix oot registration test Signed-off-by: Isotr0py <2037008807@qq.com> --- tests/models/test_oot_registration.py | 4 +++- vllm/model_executor/model_loader/utils.py | 9 ++++++--- 2 files changed, 9 insertions(+), 4 deletions(-) diff --git a/tests/models/test_oot_registration.py b/tests/models/test_oot_registration.py index 2c413a633896a..0ca7ff0a36b86 100644 --- a/tests/models/test_oot_registration.py +++ b/tests/models/test_oot_registration.py @@ -13,7 +13,9 @@ def test_plugin(dummy_opt_path): os.environ["VLLM_PLUGINS"] = "" with pytest.raises(Exception) as excinfo: LLM(model=dummy_opt_path, load_format="dummy") - assert "are not supported for now" in str(excinfo.value) + error_msg = "has no vLLM implementation and " \ + "the Transformers implementation is not compatible with vLLM." + assert (error_msg in str(excinfo.value)) @fork_new_process_for_each_test diff --git a/vllm/model_executor/model_loader/utils.py b/vllm/model_executor/model_loader/utils.py index 234bc4d9a6914..0b8dfe3297132 100644 --- a/vllm/model_executor/model_loader/utils.py +++ b/vllm/model_executor/model_loader/utils.py @@ -30,11 +30,12 @@ def set_default_torch_dtype(dtype: torch.dtype): def is_transformers_impl_compatible( arch: str, module: Optional[transformers.PreTrainedModel] = None) -> bool: - mod = module if module is not None else getattr(transformers, arch) - if hasattr(mod, "supports_backend"): + mod = module if module is not None else getattr(transformers, arch, None) + if mod is not None and hasattr(mod, "supports_backend"): return mod.is_backend_compatible() - else: + elif mod is not None: return mod._supports_flex_attn + return False def resolve_transformers_fallback(model_config: ModelConfig, @@ -48,6 +49,8 @@ def resolve_transformers_fallback(model_config: ModelConfig, custom_module = get_class_from_dynamic_module( model_config.hf_config.auto_map["AutoModel"], model_config.model) + # TODO(Isotr0py): Further clean up these raises. + # perhaps handled them in _ModelRegistry._raise_for_unsupported? if model_config.model_impl == ModelImpl.TRANSFORMERS: if not is_transformers_impl_compatible(arch, custom_module): raise ValueError( From ccbff79904f174f7b65ac54e56f1a590ec85557d Mon Sep 17 00:00:00 2001 From: Isotr0py <2037008807@qq.com> Date: Sun, 2 Feb 2025 00:16:22 +0800 Subject: [PATCH 84/86] add transformers tp test Signed-off-by: Isotr0py <2037008807@qq.com> --- .buildkite/test-pipeline.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/.buildkite/test-pipeline.yaml b/.buildkite/test-pipeline.yaml index 681745428d600..e2bf876322f41 100644 --- a/.buildkite/test-pipeline.yaml +++ b/.buildkite/test-pipeline.yaml @@ -486,6 +486,7 @@ steps: - VLLM_TEST_SAME_HOST=1 torchrun --nproc-per-node=4 distributed/test_same_node.py | grep 'Same node test passed' - TARGET_TEST_SUITE=L4 pytest basic_correctness/ -v -s -m 'distributed(num_gpus=2)' # Avoid importing model tests that cause CUDA reinitialization error + - pytest models/test_transformers.py -v -s -m 'distributed(num_gpus=2)' - pytest models/encoder_decoder/language/test_bart.py -v -s -m 'distributed(num_gpus=2)' - pytest models/encoder_decoder/vision_language/test_broadcast.py -v -s -m 'distributed(num_gpus=2)' - pytest models/decoder_only/vision_language/test_models.py -v -s -m 'distributed(num_gpus=2)' From 3647766731c09a85c21ae7e9b7572587ee0af1d3 Mon Sep 17 00:00:00 2001 From: Isotr0py <2037008807@qq.com> Date: Sun, 2 Feb 2025 10:50:25 +0800 Subject: [PATCH 85/86] Update vllm/model_executor/model_loader/utils.py Co-authored-by: Harry Mellor <19981378+hmellor@users.noreply.github.com> --- vllm/model_executor/model_loader/utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/vllm/model_executor/model_loader/utils.py b/vllm/model_executor/model_loader/utils.py index 0b8dfe3297132..55eca1b77df11 100644 --- a/vllm/model_executor/model_loader/utils.py +++ b/vllm/model_executor/model_loader/utils.py @@ -30,7 +30,7 @@ def set_default_torch_dtype(dtype: torch.dtype): def is_transformers_impl_compatible( arch: str, module: Optional[transformers.PreTrainedModel] = None) -> bool: - mod = module if module is not None else getattr(transformers, arch, None) + mod = module or getattr(transformers, arch, None) if mod is not None and hasattr(mod, "supports_backend"): return mod.is_backend_compatible() elif mod is not None: From f68af015f5df4a08ad682df9cebabbd508889497 Mon Sep 17 00:00:00 2001 From: Isotr0py <2037008807@qq.com> Date: Sun, 2 Feb 2025 10:53:06 +0800 Subject: [PATCH 86/86] clean up Signed-off-by: Isotr0py <2037008807@qq.com> --- vllm/model_executor/model_loader/utils.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/vllm/model_executor/model_loader/utils.py b/vllm/model_executor/model_loader/utils.py index 55eca1b77df11..97a45fdca8b4e 100644 --- a/vllm/model_executor/model_loader/utils.py +++ b/vllm/model_executor/model_loader/utils.py @@ -31,11 +31,12 @@ def is_transformers_impl_compatible( arch: str, module: Optional[transformers.PreTrainedModel] = None) -> bool: mod = module or getattr(transformers, arch, None) - if mod is not None and hasattr(mod, "supports_backend"): + if mod is None: + return False + if hasattr(mod, "supports_backend"): return mod.is_backend_compatible() - elif mod is not None: + else: return mod._supports_flex_attn - return False def resolve_transformers_fallback(model_config: ModelConfig,

s3R|xH$69KoYwThQ=15M9#H7u0DxZrui1h`Xe+}Wxo(Jap0u# z&!|(WD2&CkFzaSUM!fz&^cBsF??r1|1AJ+Q z&BNw|cSau@iHFko@Gj#=`xeNGD!iB7ulB~llP=zVKB;a+Q&Ir|0fUXVYL{ejbjwAS zQYD|Z+qFDUGq2W3-ZE#$v8Bc04@7$47bAR7mztQ;x=v%+s2s$}tM79Vjv6AnjEv#? zebrMxD{L6HRMe?Jw;Mu&H`_nWY#p}xM*8!im1NYT!uvmUSX~>HK|y^Mvd9-(i8*R> z^C=|X$@(@&`F#^7O^6M};GR66B5v#28r%MT;8uTxE_Ttay0HA=@4DH=VPhf0^7yPw zX}Ik8`YRgO`S^^`b-m(p-I)F9*}w=^9@U>Ph>~O_Fdyt^D(rZ#iCW%&i5kP z=NC#sYn#@*Ds}v1CzqQK?z~*?xt{_swoAnFs?!*8ujg(yt0;~io;L^uzj*R8I~%1| zTPhk^E$UAsqLcsQQu^<^>0d8va;SIix1P<-J4pA^3347o z_&0=4;jMu!eF~xax+WtXcj>FS2))lS*Ocs(GsNjB{f+nsPEVqR3#kj0pzRkhrR zCtC*wY4|v@QUq%`kEsXK$?%{SqX;uNs78IKOkQ#jx4DB1|E= zN(K%pJQbGb?6e6iM4Gi$Tu1XkauR}W<~ZM>{mv#1%LMUE+aE_|dJaSYk5ZMegjK)4 z5aU&?)EX8MarAp*UU(_#brK~>t77gg6nK~I2uX=NgF);fCQ?Lw64G-Gc!}7-`ZuwW zl%&Tsr@wTS+zlTv=I_l~y3`ujRj2mwUta#nD%Bw%^UV z_9)gXk5UBnr}E{o2#L7D$i++&8Ce8d5*}XN0RIJVxki*7;bh9 zRA{1yq+6}Pc)J1`V?_dg){r1x5A>CUsUGEOPNC`)1qp`9z!1in6Rew9qy`;H8{lUL zIeu^=5iM^g{UzBb(K;x*&o31o-U0)Hw{Xz6ofj{J)evW3NfzEy6&Oh?J1E*sxk_nM zkco}NeIU5aU73f73nirNo3DBW51=%M3@3At61S6{6#UO?1GP(KNZ2)jf#3DHm%U}L z671x*VuzugMa!m^!1C?5EUwqpXa|LMJ>-=U-bzwz#I_#ea@L#*^FCJXr z1%-BnY&5orBVWK2;+x~wVBhqV&bwZ71nbj?4e@8%&+NP_ct`yY^j6IUeK=Fn)g6rX zS$)43&$hc^H;qOSE2cO>1;aAPD)BNm_t?yigG7bMU(GN!NCMqkZ96Yec8gYJKW^?e zc&{FIX`dNj>x`$QRdoUObMh z&hI}4Rn9)-THAF}QK_J3Q6eaC8aAUDlg;2NYGn>}ZRL~y!jR=ON^^>UGupE1i;iqX zYbg5td)?&`Eg3tsgEU~{Kwq`<I0}&rk67>@^soDi#jE`xpE<4!}@BO-XS5x?U*iCoaBDf<4W3 zC_=()8KyYjf6ekcv^Y^(B$28ofw&S2mhL1ueMYSH257|9pl7@C?GDVk^1UfDrK5o5 z8Pr@JGVg4eP$!!XjvYym&K)-szA}Wyy8W3x-dMwEDf0~_?ac|e2#qFX+OGacoF%W{ zn>oCwyOk*Xg#5>WkVcx!XUifoG|Qb*;rRzeTFpj81T6aQpC5(FjUQF6B;@*dE?0nD zufpt6wo~i05_!dKi`47496uzrsV19iH7kw0q)Vh2y9X1qo5|wxH2Ao2VniIbYCNo9 zNgNfhvw!rXGt3BKE=bAi(TKcJO6k{?t=Dr>A6u);XlW%FB*3UNfwx0P>AL<%a(Pa> zsO20*l^D^xdsOqQ>!7w%?lAm0!ta9HlA4PG{+nl+$>wKX&qu$z``vWZ4<9CWyxZi_ z6#scnqx)`(K@8>&Fj(GWoCMyI*I6?S*|-=iCI30^*gag^H0;i^)egKOOf#Ai>bv67 zJipt6{WkoFR@RB*#dm!+oMmK-L@x7;TD4;6((8m4sfj6&s~jz`h)k z&(3lGU=n$)R#N~%;k$&rIbDt1Xm%1mo1!C-9q)GVUkjURn6~H${g(s!|hNJ(kf9xdDI4zit?1J?LbY795r3*Fo zV);K?030rGVdD!y(-|bBGl*)r@X7MrIv!P?pneW5kSHF=k{9L*%0!8Y$Ht1Y5*JLo zgnV+YV2>n0?V%aeKu@Avly}!J{Ge2#riyK2#!#TxY~n{IWP#e0r1bttst_GE%Q{|K zO*?H=k;Fg)vux3L5zHbzX2CjUiAeHlC(ADX^zMB_@AeLXmN*MpsGi7tpsoWFx+`T+ zg+=s+?D>nL=iSJo+QoKe*Tm6G3*JfnGcqC}FE&Q6#Zd)jkk*v!ZX~THeKV`c53>dR zk);O8VY<#SeXQTQ-?$Oo{mzOUm8Ec%f3Tx-<#64ZUt1hKF@gVJF<4$V1KCC3t^g!7 zREEhUus~{AwyuKKqA3H~zGeC*L|iIWT7rawi7u2%AH7^n2q*V4NUWHawmPm;b(c<& zsD~b2O|(R3VzocJPE{4PtNzy_ZPO=$(kd=>#Y#0_@m#{Fk&hl?zehk6jfvcYRf*j_ zMhq&!l@pa*sKs2s{T;tG%*s3xvq2ow1PmJd`S{IFu2#cY0)xCrj4JXRcg72o=Vl|1 zOMt|3nchmPf^gDwxP4kx$KP7B_P5LFitFVfRXGgjSjYQNR{*v})+8s>;hfF5lP)jG zSctk2QdJQ+sz^2B6md5Yy-A7FTmA&g5aKE=Xy}d=Gf{8yl5y7KATuBKMEULf;jLGK zvWTLfp$wVjrvdv&I=SfG%$` z6i@E>fT`h#ky@|BDkU2JXJ==Icd4Z|IsaaJ)~P_z|MgEEDLFILschr#5;-g>@1b4- zl;K#n|E_~{i@1If7552^j&v0|-Pa#X*kC_*zjyU;J0PbL@;VPs%pL-=?URON6e0r@48~oyP~kMJiB+dw}`sY80g*)9990- z&X{zc2t?t)m&FQ^ER6Y9$zQ;{&k_!{nNe1TG~M&NqXS{3#FP-6V~c+H7H?g6w) zd-pZ$aES?D)DKy+_E)Wz_xPGD_T!NM+cS>Wk3|9=9Z7WI*!NSr(by`{uPll@IbDm3 zL3DrQbeL#sAC=y|MeMyHnD@c_)%XJQX)ORR&&Qc62|qr59wlr)k^}BJiP#yMUP?y& zFE8>xZbjf-M)==7Y9%^Thh23utnx>l%>dam{FP6%Exb%uM(!Vfl7WP($2{IGfd&#l zqHo)f%ubnMhp-r@!wI{S5xUY>beF%W9SlzsY4`iRi z46BTJ4pPEN5YEL4x=2G5wkkWxKcJzNQ2sn`m5$16Hk!f}76fLvcgXDm;hnIIk_v1i z8mQe#NxO7G#o=39`Z|yq3SG?iFtAd6xUNTEXeht9f(v-kbU^zfad*>ep-W z#_C)IgcH7K8d1%_GsA|&g`o%Z`9fQa2ia+c5MrnG!dHTev$~Hb|C!Gbu8>@ z2*dwshnp*%IB~Ib_rq2Cc~tq6^U;}xm@8=?_=6FAc(LW0q|?zAMsd4(_{Boo2A=!Gcpjt$x~_zjWjmH-L4+x^z> z6ND|@G&j3qC+5p1OOpFv@F_H~A|VE`6ja0)yz*Mo%sm~znDdM*#hISjfgfynyqmmv z(r$(tgl!Qlq7rcwB0?(J^b0Rw!ultjr*coK4+a_(7S+F3*{78Mz2Me3-@Crt?^f*e zm5qP=2&1HnXOEv|XJzY_@7OcgrJCOK*1I)LK+K*s&;NYZ^E64HZ|nL2jwa31{9~(j zbJ6sV1WxvY_0M~0m{ji?W2;*eYG8FoN6jQA+%J3`?03VTH3syD4dzI15xc=I$YxHX zLfbVf%US0$3Hm`HoZqx>d5B6nifkGBe$TS!MZ}#Hbs(u;V>X1BwLGs=H?wbxxe?MZ zpVn8T2N}Jg!980lS~cv5mUcU>Q`EYIq3R3wGNj^9t&q^+c6^rJrCZSqudt1H9% z^WxujzdFKovf@B(AJxROU)@aqU0jSvc5gYJqv^w$N^ex^b^ia+^_Ed>sN1^mDup7& z32w!MTXAb4xHW;|uEpK0xO;FbUfkV^I}~?!FYfSVopbhC`+j%dpBWh$`7_@!=X`7? ziE}`D9@_P}^jq z-lODwWj#i`&%rbCrB8+rP%L-&BY&)X46!P&tZAZZ%IZZhRh{39oU^^M%#2=OE{eQ$ zNuCloZ)4pP2mtGC3GpV6fqct47l4$w`Bb$2RrR(Nxmp1~QmqirdrqQX$~ia$8Q@HH z-B=!U?cYHRAz0Q(v0Sdy^uYsPTX~MiWGx>-1t>Fh@jZ<3eE|WwaV5O0v)`OA4+ESR zKW(c33|1Fys!VPa=c~A3>8p`tH_sPKz^P@NxS~3ehx#sory;pR6O@rra)< zMhw3BuZ#+*z-5b5ia@D?2KK>-NrHQoW^+(Z`0wB$hZK%Jhpvgi^az6`ooIq0U%_A! z`Z(N08KVp+?!8=jh5_}sQf>|Mi@S&;SHAnMx*E`_e9cpYcqP8ME@h2w8uV`u_YQ|Z zz?sSWX~TiKFHmnN;3)d_?dv8E5s?)Emvd~Mc3{xm=CibFwj};|79WQEdqdn!O~YUc zT81yHCB7)? zYu%TAfVH?sEO%Fb7+tYg)gdxpr=s^UtE{_`$44HSHZ6L!k*L-5@`u`uk8XK?cm;E8 z(wK+<0;>=o{6=;MH~gPeGhG|(J`!Cy((@LEv5v;Ti*qMJ`NqeMXt0h>)Z|m$eoQJl zZLO8Y!$?)5S6Zv&;=#CB$!%{`hR5Re({x@GvoF1-aHof6VO{gV@vw-f=yy6_4YdCU z>f?$DYq=2^Q-j~uXb~-0T*stz_kpOe2z0V>MqMzlfM%K&8 zc-AvBd0xe8z|lgu$RES?gFaA=HR}EKfXXLtjzBK}F36b|M-IXUAbcob*;z!1wT!@V z3*`VP6~kS7$3=kL!jT~W3pJVn_`+44l^?+>Ml34F?Af8$@vTw*)!0*gz_@%OpJ&-j zQdnD>=r{VwuL@(YN|{FL@MW5@_axcFq}Ak%aw@y2s|@;uW1EwYL42WcM1ghUmv{yj zxfOMbyGv)yZ^cfV2vp)pKQI0z&6s=Kjq>a57z36EFCDGHIoZ2N1#@~gpIHtFdUX?N z9EUjGn`_~_mzJ}thnB2L{n3YRGJ0}y@@2>2(Z=~9=PG@W4=B!P^y-_KKyQtLwm zdzwEN7_h$ychF14r=b$=2{v{3hpe#=*kffkBN)3XLw1>n=Sp{o zkUWtWJqz(7VSHhE&k}jLbrWtcTR{f1dTeB2ACsjtuSL7w2&b4hnFU#}Fe5huCdP^v z9zRVmWs{hPv{-H)_g+gz@}Du3qD8?}qLJ6@UzYRwogHLP9lAZ0Gg49tl2S3O+)Q2% zK5fQnuUfahtepoOhW#uwL3Q_|{O@qKMVfa-+O5g5S1HsyN0a*3#}4uSSxEn#r1%w! z{6x<#=c^3k3ggp$wF-rwFBc;-hi|{)cpcPnVi^hBxAhLngUU|NXg{$Z*9}&5^7LSI zkCVe-d{W9T6eVc7ZP>o}d64}mjETjnP}3f8q(Ltb!<`S$M~N=dE8X7$+JAv#4C?5^ z#|mP@iUocEH&*|87e35jk3ZGstmZ7c2v90a6{n;PO%TBN0K<|Pq`#xYjVndvXm#&> zFBMhyKou6HYM+<-+^6{u!V5kbhx8u__pmYS%12nP^vm528oDf z1feO9upi(h6!#i5%x%y{!${jiJ1Y!<=1${VA>mg=hyu=JDMBH*z3sF~ z2nprp(t|m?eVw=Un$F!c%?X-n7l;VJ)#!9L3>3w5rKmEb^S*BcKay%X`Pl8Et`mfb zSe2WKWA*gva#)?0?8QB|)W0Pn`=qwPE%6xX?Jb@p3z~6cdf%9}3fjQEI4v?Kw^_14 z#NqC5Czum^kSYs! z1GmcT=3e|c`5q)qPwD%GqDSN4%icU=v)$**>eukG+fS2NE^GMZ1z=|(b(k*c?Sk5jW0oGqxgE6)66O}CcPj1JAquFoq8 z9A~+t!6m;17sp>FoQPf2Y#-l}Z~pRqJFi(uWJfxQHM>fFlq7g9NiL+ncE6IWdlM4etT+N4g+!5J+$RmoI^^XJa0X)wiLTt769r zYf2CpZqaNs@$i$3o}4r6#WYLGaF$YC%{^qt(@SI5C9@=$$Y4)|To72iB%6r88Q5{o zo!1E?QWdhaPD292C^R<~r60@IhmtV>8WlY5##HLM&;z+Ael>FHO-KP3B5ma7fHZfP z+z=^fX7X>)eLs3yo*TP-*yX$_V}Dq5(t3vKnsr^ps_-fSy8--Ihc~b*2Pn8;Bri8^ za6+LpnA|E(7!~)WR2TwcN6~^-sO?A{3|Tv^$5)XX^x;oMyIR9cTGpuTPjK+?58bFW z$`%)wbz|;5ggN?7n&8vYPjT!zd9BY;Zy{>~x|fLYCAGgP(xp|Ry#D4ouM3lAnIhCG z<`wYazkLjukPN}>Yb&y!{?YZ7h7xybh8HLVJ~m*c$)~361=m{Ga%X11brF$ntm8gg79Qtsfuh5<*$ROG6*Uz~<3qpNd(U zl1ef1fA&=k$3-4+n*Q)S(y`QR!8PoQ=$yARJfHC+|SlouM! zP@Bf@#d6@y^@A!PkKWM5VGz{tf{4&A2C-KR#XC00mQ5(aU)mhn9fx{uk5N5VgFCz! z7kN%Oa4yKFCmEP4K`qg;L_3pnUr(9*(FE&+mvzV>^P>>!{i<2w0gz{R;e9?NPTC!? z!<(F!WH>Gf6ty+m%T5+>VQF?b^PYTt$;JEmuO0}5>%b-uu}1Y)`-qkS={nz?#}Pxd zu&mx{IuB=a&-Z+#p({J7V!hZZMz7m)g>1Ig(ayE>%zam6r6G<5KGvu%N z6NLy52R`9D&lPgoAo>4Mk<3#yn25cR5av{6VB?FT4kCTn$kfhVw{NO~9hX+Q%>&#l zhT~K5b5!R0RX3SWT2AIR))L){a7mg2SxKgmRdptDZCV=+U2e_V53J^Hdkt?h&}>*0 z10Sl$(p=Q-hP}?h3F<3sxeTx;u?We&JB7o5pW`cvEwDDm@z99SEbq+oB7v`>^GMG& z+F4!d%=G)dY#*aDZf+$m(*HDq`r-tB@6{IvYtuvgysbLQDJ+Uy=c38Nf6oF?JnCEn@pe}8YgfSW`ymC7iJB~Bp2 zLs=MF{x2;+X|Ew=V#BN~HQ5j-VT|GKn-?e~XG+YB0Y-cufSu=KVW@6Mdxq!e9K8v< zU8pn!Fv_cz^v!8$Vj>xMW=CSHqL)VV{>1SO?AGv?$wiYvC^g-zJVYud9W)UvtqDyW zB0>CeQi?M;gz!NP|HCHXR4$N{T_|iOT!)3zWF#|GRmUCSn#-vy>MusJao$%sE!(ZY z{V5t!^hnaW@^`UWKzSHg8EVw`(7StHqb$rVo$(>QQ0MiDxs}I!h@7l!iiQ6z)oC;G zTmCFsGmUf+oYrM)9{#kr{OTIFO0wANWI0X4$Xsx4KZ`|_AkP>kjhQucK2LPw1P}ym z0~*9~trh+e#|lpfma8(i=gMCEipcz(LozTg->ySlNjkf98q89Rx1dzL`H+gph|@$4 zy=_j0P-jFuw)8eDWsW~8F&MhzrQj}x>AMI7VvJ)m(sEky@vi>_C9{AaHM~FohrZ=y zI+|L;i>XohTIj@hbl)8h-Fi5m?q(~Q>DD7xcx?VKK5|vGD$hMhFLQx4yL{x46&?!x z_t*^9@uD$Z>`~y|SKeZAcftLiaW%sziUnm=v$1iM2n~N_bo^O=ML2lcA-N<2AcT`X z##)Bp6h*|PA?azbep}WEAQsgaL)_uAFZ?ap8Mg-&1b!sa|CCGt%KOMci-8iq?IT~P ze+Row@w>z4RYXwo5bmE6iCd#_o`^xj2?Uv`&xc?=P+ zM0>IpJqfSRqLdp^C-dqp401sxjTdF#nHip-5QcdjoLYR{!kz1tKfXNPaOSzkCWHse zm;?{0j`F0{*dj_qr84U3xVR|_-VUs(X>WNFI zEXhV81ao6<9WSdgFa?-m<5Nz#>wOUgB&lh;-*Wlz7JHw4JLa z%_?`VH`rVL?0;%OTJE1sulBK#%Vsa5fl>YQsb<}gx2@s+h!_UCoeZE}7zWLNB;xbB zeeSB2*0}demUi<~K50v}cUi3s4vtbh?=C!<&JA25?u*9bq0LqA5LQj}y~8vb?*AX5 z{kv-_puyGDbaaQ8&0B7;qthb()^muC9(gD&Vuz| zTmY>M&&p`Vw#JYIY~AYm!Tuw-3CEF0=mLfhwFZ3AvL^$mhj2{kNTzsZ4(jei`BRFX z6GBbYkO$$JO*q+onTF#S6XNBFb8D4OigB?*yA&dXt7(wKL;>O-g#G7S*m?%FQs8$= zt`kkg55;IsEX2>vN3zbD@w_<5ojS038vDu*R@n9zFJrxRbYUFk*7p*~WIZqxjG2R4 zx0K3=YyhnW4uiSPf0p-mVYKrAqMIIj7u;IWdkYWZ9u30|B9>+=ZYtHe%fPE*jU zK*NEB>?vVIy1wo{@MdqnYy%TMvZ6o$+c#>L`8RPKI5WJoj%L}Pp%kKVoxFF#I3++z zc5p-%XLo?Dm`AD2GJ}P{XiLMuSLm2p(!ik@ZD8PmPuc6n>PZ86%=5;JQfb^ads&68 zz7?g?PS;iEz-zt<6j|$u3xRPp%R%puB#?#HJn(2ScfLy1TsCQ3BdqRd&fuiEu5l-J ziTyTy;m-O-v`FR2@>=z`+ktpBN;C-~O6u$gY27#cx>#^Rn+ZyjyN$x9 zLbuUyWC+}}r3t*rWV@Kz5tHOcd<`1TpDSi$Aq2Wxk8;AfBYjt_rvge#jUs~KatXPp zlGc!nqW6E4tIcbx$)8@<@2TF7g;J_VF_Q}u*=+p0r-_ZsI!j$fU~Rp`wkDcvzS#D8 zCR5E)`d_BN7svls$(C;C?fb%P%|f7@_s$`X;Ux4CV|FXsbWeY#`ANmWdwuMU>!0$P zJmK?(tuOjV1tlI^MX`IN+*l9i`GECiPa=yNYNH(MTDiZ zdAg?f$j|bB%m}BP&4r=2*Q!@=@hxiPO)$LyZEMY@i=8=Zh3;|Y%98q zeiGUVrJrUELEodI*ELGI(_+P|hU4thiR)dHTNz|*_5K^0j+X~t6gTsUD|vubi&f*x z*efh59`5nIk0{8JAKO5z(qsas|8e4V{uxnMn2_d?8D@BY7C)X} zY%n02{A`*Z!duF3^p(t*EVmChs4SPA1MI%aMU{R)0ZlvIr+$L}6Akk|!vFd^z!4aLaP&4GFUAib(tis~OWm@M7sJrqc4OZ|oE!5kB%z|jyhI;3wf;$m&|9MeMZo3wS9w{-i+;9S z#RKYocv@BysZd-?I7z=Fcd;V44=J{@mWb4|)oUY&EN?xO<=<<$wz&f$+5B5w^F zW!_@Uy!U{nu&+U-vZZ#jLxO|R7N*52Fql=N0Z*9U2J1{sRr+(-8K-DC?hnAgK$IU& z3BlXim#$=Na;1t9qWF-nfJs;Mf#zY>LEUB3Q_`tApqLW}ofskt#M;tqF_cJ^86qDV z$Fav}7)Xf;VcYUF8A|Zo;JdVaBVazh)mm%?I6EI zAvvFo@aP0Xb)$}6q<)0&V-NnBVzbO-IXdk6Tm1D2p}$4Y^(RNrU+FA7H`}DYQqcm{ z>V$Oz*w9f_bF&)%Layp4xV^aMS{sfu^X<2Ln`6X} z(sAmS^O;Q;4Uy>-7t0YC)}RiZD&|X}kL<@;%f}}R&RPzS0zXSjdtSN{kir!tAqF0$ zI<6OC`uIARco$)ZF$vm#6mwL^Ux<;8i-!8ChIK$-EBXljx4YWs$X#CE&ytEh-GFJT z|G0cAP{Nsd3zIn2KP2L6XK|B6-?rRVGAbL)OV^&8(}(@4qO`qUZi&%eXztU4K|z?u zxQ?Y{*{8WkB#EDD2xeJ?;Rm%LwxdpSA=AA>paOv^4vtZ%AHS*p^wR;Cd%(V*V1j>; z)W=35un}uQ9yP5gJ3`pxjPsFoY`B`N)SD7Fz`QJ2k1?*c0w|WZ96qN+u-OK@WOEBO zQvv+CSs11v3^P-3PI@bTgYC%H+kIF)c>Ts*WSaMoI$==qh`9?>oEb#~>Hw^EV)vN5 z1^zgx+8H+WaCeFgU_>v7*iqigy({z_dEO?rT&h2DDSd|~K&~i!4UgJsQj~9mrVkZH zg{{9(0mc}Z{JWI`<&hMZ1F>cnIA~moWdxTRA~!2I%{60aW5UkOI{$s4#xys(Q;CW= z0W(s1i!R%z!&~nw6;5OFL$7IaEp2Rd<0_M8`Jc_U!MfFj4@U-Hsgx!@5=B0ZKpADc zXq<<%UqZM+DB%W=I`b>&vCqlNJ7wqCD$oR=FL;uFofj#oFOsEC`R>3IM6{)@$B^_^ zp$H=%Ah9baU*b4B&<2qwHEIJ4CMSM9`01y96sS;uPN{gV^_8zg|2tWvb@;Ns$0zo1 z?63am*&i&EpE(mNG<~ho8a8}APWzwSF3(}k-u2Ye?ke*mq5zLz6i(PG-Uf7a;&g#N z?=1p%)K^rbw{m{r8Y5Y#zYXLY69JJ^TbRla8}kq82qFTyl9TsJE`nZjkRGp5N~`pGINjZJ_@zTBxB&9I9Phv-k$B8h!wb<;ZgdBN?Jm><|h$QqHlJg$~L?)31K#`m-xg zlc3Y8vS)-VGeb3GNw@<#*S6+8VKz`l8g6D9xk z)ghCdTFu~gImi$d#gc|n0=$bd(GXwDwwjd?EqA!dnWm-ELqrAMD#eM7hQr0F6r72q zpsWRxyq`goh2XF4d0W*T1KzNWLYcY%h%{|&H_vS5^7{rdSVdui^bX*s9L%p9KP6h( zH8Lfem{&2W%nTqWKd;3iOLU3_n?wW+J?qh&W8cyPs_WzE^}-~u(Vij*^3-+P=)Qd*~TPd2nj)RdpQ?=bT& zk>0EI{iRQ;2*50=EDIU?8mix1-V4nfuIG!UyO&aoagC)t`01Ye7 zOLotzsywRy%(wO8>vgJ*LW%9sUi>ZJ_ij`^8|(DBg@1KQgR{;c&B#kFw}@R_3n3s<}@&0Mcm zec1>cJJQ%I9{+@2-uN7CxU1TKD&LE)^GjLgM@SvRDA8F1-^v4giMR;SQ| zTgN=?y`i0r!jbQgCf|jTh);x0qT@6qnV z)Yq}lGN8)BGB;r1#yxI$9eQ0rgp)rTGk!DS9y;Hr3@k7OV>_s4%nMqO>ZGw@MWUhc zI_!TE6nwyfM+_VwN^Td|A3(-mGC6l=o@C@Rfmm(CMFKc!;?E***eUVtul*^5m%Pd6Qckn+^}KeL}i zvaU&-76<`Xz!AjdLH{7nmJk20<2%D3CW3oJlI21q0}&lL=<~TUtEFYVezYcE|CLN+Db8PqSRs z2|}KKu3GHoI!?R1ZdY8iI&No1|5>trSYg3LEH*bEQTNy07)qeoXvRPKFR%$6#aD2z zN8YM#OunD|Gd*y*ezJ4#uysx*2ftKVr)BQ{oN=7^pb}&b;736Dk=W)Wi9V#_l z(5I4Trwc7jCo7zKBdEc~q}=wlsuc4MVtl!%hWR|Zx`9dN+axU>3DGNO$@^zmRa}tylz>X zk@4(zv+hs&0b8{{SMSE}x1vmh4~hPMR&S7DzCeb8lk^!5hYXeANFE=$i!h?FU+0vS z`rYp$h(*vhNOLiX@%=q)Iwa$YHc=-ipV&GF^VmD{Hgu1_QJ00s;nOh42m%7!Qw73N zL7y4>H9pK$Emo+b=6tHFw+p8RUSFS1R0B{108wi9{fELRT%~5qhRiV!Am)!qGl5cv zID6Q|Ho=w_?E|`iL}12=6Eu9kZd`}blaBFK&<#EfPn$!3WCs{=84(`EvGiudo zZILOZ;mNW$p2r*}R2L^&AjacBlrHbw`gq#n`g}d7x%)gMU}(1*^BGuuhPyyFM!-xD zfeD4Em1u|zcD$iA$=~rxEfDe5s0Q-bXXzgFlema+$B3wj3@{6jJ0H(FqV2W>Ux|I@ z_C4$dxsoVm31|}hiPJbhpI~-cW$rMU-l#TMOY<}|SFNG0TXXvaUu3K80e|q88X;2J zT#FT_oP>q0)k()5V>K-&mD&O}SxYmDuCk!35iN+DXooE1)=(zj4J^W_OT(j9wE0$RPJ5S=&V(f0 z(1bJ<{l%6nN){G$4;1^#-fE@HEWHrirZE3WJetq`eIJmG&ie?|ZBp7qdql(B1-s=! zw>*;-c5^SWDWXJ^KMybSwS^cilhX!Q@h4oUH8-VAOW3ct)r;O8d19G{#F{H);%9m^ zasge)pl7%;eyy2%B_nQs!Q3GC!um3rTzlKRABvn1NxmW_9ZmxkdyAH6O&x95J`;GK z*tXu?z3)F-(pgdcNBp_?!8a5~r{`EbYa{&)>G`BmQhg_3F+C|G9H6~jH#2Yo$LHJE z{1gK6i2ac7iT%*yl6&oEnyKlie1b%A_Tz`apCu*TFMjz*Nb-{7)D!DDX#=% zSn3J2|BY_Jjrv>m0XLs13i2pyw1h&<75ddrPS5}a|JAJ>i(gHP8_P=#H${KcFS_r& za|0=NgP1u`opB(bYL`evMY$QK)qD^u+Nn89Qf3kaA*W!*HXeEkp{WBO`X}JM$vueN>nw4COmt#S9pyr8w>uY8Y$Y}j2hwUKij>pHb z60u2OA?SveY~RPH1lh`*Z8^j$i|#3KJF0X((J(SG$r(p@{?b^8$tv$jXzC>RcFp+t z!%^gnq+WE}m)U{TAx{r>eqyEd$wr#kaMT}mVu)?Wh!*nn(O;VmsT)fCDRhi@P|O7x zdg-Pnr!}HJeZNkOFL()2zZAMoe6@mq=eqIaOW%0%AiSSU;yePG zaNZNJov4fdJwG4P=C;-?@RNr>UqWwc()8t<4Bhc#gU}yjmC`Xuoz7JpXEgK8|Exp# zPWk5+hv2SvhnWR2%Uce+dw1{UJD9P*L1XlZQAE5}w`Z+G^cnz~v>z?^4!-*PG#YR6a0{8Ge*6d{JUX)Ha&8lfO2rq{(SAZSF zMe>|(X8toI1&9u|KA*ayoFI6cAwd@^D;JP+)%{p$ALE~+nqMP>ou2$joG{oQjtGH6 zm}(Rn@hPK2N}x0?&sHpjR*RegSsQA}{n5(*vjyfxXk9{NxrFpy4mBt=coI849k!hO zak*PR;wdy7tR}ZT{qvdkwVC42<#~JnJFQ}saBQdw)P97BwTC#z2u&_Hr)S5xrD{Fi z=6rSjbwDc^W+E;3+I&a-Nb>|CT=-zG4l~)IH^^e=14X_zneWpg6PNp2u~=;Jz^jI z$?=lH5NU*Z>>?>@pZ4)X#WDYxEV;@_bH}*G-aKY>8F@KA!%h5rt}v}qZE8am1_;2= zPajHw3Uf-v77}U8cSVXk9zAJbw*n=N)v7@QhbD(G$`Y_BQ`MgsY%BINBefQW=CirOHF#A#;j6(F={>e6y^vxu4Cm%`nq5>QPpjvAg+w!kC65XQ50p38K;{ zM`=nmBD4W$H&}B*V=n=0I_DE)d`d0SeMhjKdxRW_lNKhd=1CaSlb?~i` zJ{Qm+@<-yG2N6h*=K>{DqShE;#?E^fbObY%5ymXrm)=u`v1gZ-q+H(SEr1IQXUsjc zF`H8b1}#icdEdSrCkkOV>`Pzl;47N>U)^ybfv+@9W`D#%*2d;#i~ckfex^Z*h*Q$= zRpnC-3X@t~XoAIMB}_kphQ`fCSZbOimA=YD_Ez!e`yV!ueWy(G3q!u@_vr-g*A|rv zmcTKczp~ba34XI8ulWdFs0kOyuN>)OD0au;dc^4N-=BTp`ac`QzklK-g6pSYxBPqx z5v-|pa@uZC_4Xs^7|2bDZnH-K%I&cMf%(?!;ug4jwfuF{Ud{ucM@9rwFaatrd?2L)3}|+5P%vUG&^ljqH+{>Wru1dc^4A;Fz&uT`8iNZ7w0O`{Gj>{~kAMnn zxx_fB@ncr;kS48{O-3Zj@;zDW)*EP2((trG$G+V1{l2zTsI)|G3wSa$-8xBG+Wnvv zq?Y>u^x0g@@LhfBT116}Pne(JtPxy;upJ`V-P|+peldU|>}!nO^ab5Um|a>Yi~rf{&iOrHAtc?6RGeunlmq8B zw}nr>JRc~zF}6_;8_mx$j3Ro4`#)n{CNCy@3&Twre1q7WHVNX*g;zN!#j5(7M~JO$ z48nfYId&lnzL5mC4FB_=-l5)oa(Se)k6?M66# z?ovx`JULK7j%X^G#wkLSP-)#&VKBi$e}`&^XEv^Sk$nNc`q)#;ktxQp;gL>^0GbVg ztZwO1lJZ@qpOeJ!jCHg9>l4>fj|R7 z3&wllz0c~A7xSMb6OcmEEwZlKL!v_a{P-?k|2(Q{|C>sX4#Rg4C$k$C@*gzJdKyY6 ziO%PSSD-RkfP75vxar48_iM*K`-1~ZtKY-=at*&hXVRQNM&0!YP$3|+&yyezk6~2y)}?9!u5-zY9_Y;6kkx=?1bsS(P-9JhOL?F0{)0+X4uT4MQTX+eY8QoyRL`O6mp_B5(^jItnnv(;- z$x3hb2?DxHeYhE655m$)DiA*PCIJEvMKzsHyl(eFR;QziUhGub&D`roB7*E*u$r?* zGsf-SHCOhv9OfuhyplRWFPxHO_7v35O$I;N1LSfwlhggv;zh{k9zln<@5|L$(nIp4R9^kq@ zrEOecxn~-uQQ8;?LAI1Lg=vEQl4b*7$vH?y#)V5X1|I@hy?xluBiHbsxQ;+Vg9Zw* zBWfZ;RWKqQvK8K)C7}VKlS|>)QS%Hh@%RUd{e>*5cVRKU89px3J<~mTKHo7e^4X-U zz%2)7fZpEV_n-6M2aW?P=Uj4cyzDmCS(2ZN_B!4x_MNX!O`VVWod6lmpSmfy6wu+) zhU>TL12KjT;N-mTZ1<_D5qh&o!j($?g9+^u);ApZyS0p`v+>>YE3@0J!>A-OXKNxJ zDvBrr&!h$t#pn5EiG59)AW{TAr_leN+{U91#AsBv2p}lQ^qHH945MB)~rGVrWSZcP1NUmzc=jGp92suRp($RSt^D|+L z6nvQkqg=9%gj>19p$JCU_RK_jfPD*{dc+J=u8qO&82?3wC^yfP9WBc5Ly6d6s#A;{ z@0J7CZ)vZe)2#eAGVVlEBS2O5oe2N4GK*!n!oJYV-7)6NZ6RPJ82i{j0hcd4XK}>F z#GwBW6^fUT;}iV?v)MLYcDC5Q5c*dd_M`L3wv;YhG_~spY21V&>v)_XV!~v>c9H%^ zi+T&Rm!~GDKm(Hd%GFpodI!Rf5&=hI-mx1Wk*4oEHMf|LC=fM_p*^lQ^nx7fQP{+{ zA2i4*obpPejO^8+!X(Ij1bnae^#*+7rSis+z^OT2VCCN-C>~cIU1A^YzoZ&H?+0f)>HG%X=E)frNKg;- z;mg#T&_GRd>Fp1>eyuAw0)5Zj{T~tIZ)ar7m1f*=p<9wk#4GW(_BSrikJi)8hxzR> zqIkUwC@gXk=7sM+HR$HC$S?sMlaUh5ucUZMpk8VzQlCp|=+%p#cws`;6b5B{$~aU6 zI+;(cIxqc7_|#of=ttJ@M}DB3jcZsWliqCVMn1drP@F=pcu^UuFh8sG1j_FRX$^wC zBvV#cB#2}liONgL$9KxSCE%k{|EZmx*=tRrK5#3IxG0M=hHO3Q_Ezjk_csZ`SRH-BUn)LK_eCL*Uw+fgU;uLQ3P; zfu&M*nMHLE?aIn23rC;>2_|yHfi9p&EmvxF8AHX*ejJzpUd_dgw;~Lv=wbW585))G{S(;ot9!)DxSPt<Rt!~IC{gX~8j;SsO-rqRq;uD{vx0mh8 zju)HN;mPaWa?~7rCXo|8tR!DRYYMxZw4`H__>L;Yy7E zSBd|vUi+p5U#h@}Vj&Ydc{p!i#HSvAIV!(tR?#n zt$@tTy_wn_DstRDw1NyIp``J@+jKGV(&K~3&b56~e40W&|4nLi^i3Ms;LsT#XY~~^${<3s5 z&x)If97IqAFh-#6<&1s4qM$ql=NNbuPVM-SI^UD*4FK3_lh`T3MR9LKJ?1(wgJH2S zY*er?9T<^f(7OYRl-_7-mS_rgrU4y54G^`m2P!@QhjhT+!MI;0DCp)2)*MK4Bh zF!SWyJ%c0{Y>n!o_=NvGssNF}#q!hTylPpZFj5zLYEmjo^UKIjK5d<{Wz$V7{= z2}W+frRo@d$}PbX)*$D*wQh{j3+c1gZ1>b0ALO#A|h^Tg16R9){{* zd5VRB3hjHz^V|kKNFOy)**s=>d=3PQF#dNr4f_=;z8)DmzMlK~2yj{Pq&;mrImWTL zFt4X$@_mBPjWvHmeR_Vba_FY}{czJMKbIaAz1Dsopglz{FW58=bai`#rANJxYZtGI zyg`~?JhVF>A@0n=AT5nMCSARTSMyEH`KMF-+h{Z>wf?iW^6XM|JruX1)`0EtB0AT+ipxGW4*1+te)HfAbDzq3>G`&)l@|{5 zih?(a6H#Oe^s7W;DbX$P96*cgT?&x{CjehMb?lle-u=?J zYw&XD??4$`iu_XfMAF~QTu7X?N$#KmgM($FM(VfkyME59N4Iuq0dVoDOf9&%%hm!) ztx_bJ!@x)aCr?v$6HZAvh6cr@XBC;57S2B(gbRq*2nydNw&QZCXZ#HboZFbH~*yOdFn`$}h!FxPZSX147Ad z1%-x*02RTLx%>TqMgp(AJO{Ay)^)3?O~tF}nb7(r)6vcI<$|I_yd%wu#~BC0XYuWX zvhS9dpKs#K)T^<;>WLI^u%-k0^@an%=xlO@)@sU;#%wB2dTM)N{ME%%>~L83iaGc9 zdE4Ec+2eM5#ba~SkpdR4d*+sWse#c2VOvtN)g?wa`t)}xT`rTfuL&BKe&lqQIh zd-n~swbm)k*GJ{&Z~M*HsBY#q9?1)eSXc5-wzjO~GtT;#tT<2hmhis}&!68fgYpln z(Ns=&DY@t2>*|{DJYI=7El!^EFREzUAQl{veQ7cS$}CnMCs>EpA{VEW0Td0fmOnDZNj=u&u7zz}L7enJ%Q9m#YNuL)I`4b_?+q>V(8rjy(`>km5ZRgcv4C zI>DKqVM-IzSOO( zur2veX(X))#DaL>W%2D&=#$V<0`i_VG%Bhoe95AG_a$S_hiJm1~@;Y;HZx%7Jpkwp6v6N5Y z5Et+?q@#gIudwa(tN5#?6my3LcJ%d2Xb|BHN=@N7Fl86JG*r@}=c zEYV9oz=vea8Got#qcN!eU7A{;oKk|80VV3SJFWQ+=gs~6ywpryQrb5z4q*^jyv(|q zTGkb1>D*bf;6;+w2vT=bP%7cCsM7E}{ao%pG*RY-JYnssux7!j)@wWf^r z1DKou@};e7ClR~%17BEPiFGGOi4w?Nc5jN|eIgM~F^>A>{=98VJAd1B0M)5!*>N<0 z;SiZIe#Ts9EoW0X8!|jsLa~r%E1b9RevhF}xJbtf=1PAB6LC7$n!hJf&az=sHv?Jz zJmk36h&O|XHx?q>=EZfAn}uw1-sM9%79veAxQtjJGcj3>=tayweuTPI7EoSX-`qyr zDj>v{_QfbOsX=g&#TW?H-{MAlmI1$fgW!(qGf7u`u`1BH_gwAayMPnB7<8Y9;+%;}E2zi=#@v7zjWMGrdeeP{~4_4D|t zzO!h2pn*faz}m6vzIv2jlyKwKQ#arhVub&?PFbd*d`o^L-EgxO@96_8`2Q-s`mb;u zjRywLho@%zx6^HMBj+3N!WeK|j1G%CX{seTIifbNm(IP-rzQrQq#^=`m#$BH4-sCm zT{`K7s=CPqq%Hyv4qo*lmQ$F?ZSL(+Zcn8?X-}bNPbyw5SBoDpuGKUC=Y9QC>3=^q z_`i#tXpyLHPE2y%C%_2W*Sz1GOI8KNApiSo0eIm^mrph>!uOrebi*uLv=_DC$Xi zRr!o{yGu;crOa7u*@e_!EbTEj0S4n~p|?DO4VMccd;|LDdRos^ZSwOJTjARb=2%5{ zB}lhyZ%|^(D5i{jI_jQ{%xBWgz!0+&NM)O}K;9=XVn|3}PXLqnhs%$Da#YaBa8yKE#pijn46Jx6$G}Nv##jR@B!R3e6lC(^{Wf z9*Xz(NUD`lP7-sF<;+I?e{8*VRMczNKYT=xmTrj|29WNS5C)hb1sS?Sq>+>s$)N`X z2?2qjhVGJXB$RGM5Tv{79nbUJ=e)o5+<&sx_m5eueO=e5_TE@zqF?4jm;9^m4u2lw zzScDHqY2s|cD5}mM9>!%I@5RDnUp%9( ztr6)i|M?<&AmU57UvG2!C8A`CcWi!%uC-ol88$1tMORi~CHaWm?$e_G$2(==eckOI zD$|>iwncyG|6HuMdO1(?9Mv$BCtuaBik4|DxB8!6 z$vA0hj+SrMthS*{jRL!;YSeb!W_V9_qK;2G$IoA%JHN%do^ovnzq_=83HvvYo?IM; zZ(~b-YN&Pie!lWwu5`nvf2w?DIb*zF^ZOthjC420vX`MGSaGL1)M@_XF6O6pHkN~n zoDKI`75XamjUQ-&vN`Wn%(S36y55EBr-k9RF*51Bkx{UCexIgOZd|&}(>N89-Z-F~ zXc%undif#G9#H}I0KkhK=%RHZn}{QQAcE5F2+8XU5m3l+kvo4h`sRE=4Ozs1S#=5e#Vu zu<~Hd$V*PuR^*_4B0*XeKk|^38!)VS+}{fA0gA_bsK_x3n&btV10M1yPtVx+qfb;` zG`~#ai?0G6Ta24|n%`s7R5F7;DZo~d7h;KoD^kq-(;;i_@kj>B(7XK}G5qn^Z@>^3 z^vgrs@&1%k$mNIZ?ePGe_M9(ohR08aro%M(`tDu7wX}2T9UZCL8zZUHY01;!hw6)V zXSJoyAy<$)reF6AEs=w#zn`BM@@Z9+t{&b>{sUwF^RRqsr&9f42{$)AFt|PZbEQAR zOql)9dOQO)sB`)iu5+A$xZbWZ4y>Rl`S!7bg)BwXIdepO;|aarK?lmjI5&7y#>o4Z zs;it{b}WfEKHUBVvOElo?KHEOqaBm{N(c39Mop?qN}FNNX>3Fit{;ghm;5=DFT_lU zt+KsaAfbe~DZXW$6{Wg_$62y&6a^PplrvVV`e}fCl2CO~!ae6u&`l3!U*j3e zC$52D$^ZU2hp5cfA&MxRN^=}CZzAbG{=vbmEuv0}^p5S=Y zdH~8w2ZT|EpH;goZ%&caI z_8}}=+|WkUXi43MT6BzvPDb#WFp_5IQNM^BE9Hkj}}yMGVWi?EF_AmHGG#u~;OIOI@Tm|5E)N*^8) zA>xs|nFHR{>KtHwMoS&n)--{LKxU-iE&u2$P3*+cPk0AU5m^U_7KzXUP)G5MA}tQ5e{0v@}a$?%9Mi{7o-_*{Gs zKix_%PgVWh^*^u$4ccc!a27nriG)vMZJS&NgCvfA(AUSPUGucLU~q1^rQOti6?Xsq zLhA0D^_Jkxyu`)_DUU7+?7$WKYc9%)^X&Sq#*gdUE7;Cs&%-a~8jKkG-fK&q?_E4JmHrPy_{GDjIN$ zw=3>a&+V?;x-%Tq{vHe8eM;YE^P4;o%-_GI_03(Y^72Y< zgb((veD7eD;U}&jPJr~&l5Un$Z2A;SJt3iUASKVCfg53UqzuU6nDxtmKC&Fa31Pv0 zu^)s5#g~zFk2fP)!fK?aol1yXE==E4=wSt=pvJ$JMpo*ge*pyGTpJiKt0>HzRXS4g z{a}Vk_*I(PfgC6&AmOAkbby89F$H^0#ej$KW|Bc_S@mdqsU9!sc*2B$N+T@^J*w)3 z5(kY<_cV1$x{o}VC!xKcFqbE>mPz}~qnrM;qGYRKJWLAM3{upN^dmBvOeEh(+PR8y zG8SSV0o$n;p0?Y5wUoDM04spda@MRhj0E|e`ph>c8%&+Jw%pzQUbS;{go%6_zgT43 zQfb~paR$1i-W^PI={%SC8O))Y8RV|CHS^=OjHKuzsLc+^rk!ht9>pVxyY7o($NdyP zMIWkXQ2F4FIisiy(c0KAj7D9}-#!@Ib1?@+G=(eIf?7H`#Jr~)vi2W<1i;To`!7C; zOJeqTCK8kEi;Z3mDA+O)^%OlIb;a6u?a{5sI--ir5(L5FC&8 z%a1jdRi=%vcDZTQcs@?IhHFfsCp>*C5A@E6y;>rw|9yM$^FE_}cJvm)%k68~MfLc} zolKKYJwAQX{}AB+r@Y02)YLy$*;XpL(;t~;ty;{m$~+20A36!#g}WvHCg0wNjB`es z$QOZP2kNK1o7Empb6&Lrxadc6D_U^m8;R1BY<-oDtq&^dFV9_4?Z%6>X*nvvPwfsR z$UYJ`5F0@162^eo~n7 zB(qVzHjia(IIM^zIYhyZ<~ur}hqyK+iQ9-mz2!93L>=qtLi-Bs;GA17e$?@$ROe#pNQIyyK>)0% zhl7AT?M8uva&&`tDv}yoW{nRs@4Z=229_%o27`?#tB4$TBkGd@^MA4cl#@sakThu- z#K%&j-=HM)N(n_!&r5CNyEY`Y3Y&dIcGXb!Y@!R3i-VfvQ29MeUBDUq;@opyRWf(r zu#y$ZpAx<;o(MhUKcl&U8e5KjA$pkWznihbmD5p(`#RA7%GqbNQa9b}?{5LENb4a`W@>RL@8KI?wR=i^b>dsw{vXE4CGT!t}?eFrI9T zjuvq%TmV^kP=$GQ0Llb`UcY|33MGta>>Som1cnGedak>-r^uQW8;CgMd6}-vLH7(& z9ZWB<)IY6?hp%GLJyu2kprSXi< z|8%NmEdJ{qx994nst)!Ok7g6^T;=+O3+9ZsTh@j+ z=$}esV}*9e%!KCxQ%9#|~CxCv0QL zRZCD5^*!EJ8|DL-!z+BMXO%$h3wTo;Y_5DLyO7X8J)u;k4t~1Eo=^g*eb3!VMWCpN zJs;rBHf2S}O&?uO6nRp89%e8QJ36Ndu5KDNih{m}=<@wcg}fsO;av(|mb&$oI=Yr& zPf|?-pq;0|4kLQXsfkUIP)Uiw+wD)el1SQ080nW^S=!zzJ2?+$PqN> zsLQvM+0o3;rk|G}Tj`_V<5c+6x)l8%+ckdf1KzXzGA`{VkHs?f1cSlWg1hn5lV?Jq z?5pdNR1H_{*bKs?_Z-Yb%%?s9Jz_ZE0h}G~9p1(Bg%jZDw#sDs9pFk`9NVUedM(-9 z?(QA`2b;TKr1J;OJ4>62F1fZjT^^cCxa`Bz50(9G4atIrC$(ZW3r;S=P9Pk>Rm7X}Bis_nl7TbCY-KqmO6H7j!qu z|M1`cm3sb$F)k4F6SS3GqRbL0^PToqRg|1tCrW%9JVIE1seX$GiQp` zP&#DQ#`gx|OL!H@?25sd-rgCC@9}eh&2E~x4dtPdCUn~Ln4cgu%$+YGwh*|(s2Bj( zpZf^Ieeu@k4O5R^oYQf^e0gk^Vkc~2IJ@9uX{Hpi+&ko*z!Fn z)Wx|h96)hU<=cN4%v1d=_cYgsICZ_H+aYlXVkDPu!K4I;LKKGTjk5~^umI~!m+?-$ zMiBXZwspM(ZT&*kTnqUUj>PYR6N_36@x7gR^KOtfIby>M+ZvQgt9ldJfLYxLT5w5d z2oow_TESEK1w=$@3n%`tkXO?}sq*M3>Yw7D*9n!GNx!a*g?JLC)}o2TGaXV65pP`n zSgA9dB-`gGgEu|o_==9|t&gd?c`(a^5Uq8r4T-X$UqWy~pY?h4J0Fwb!#*09wyxk^ zJLos5Yb~u+>ha|dt^pyP8y>?Q1_i>&8dlHuNNojCv zsnPxCd}FWj%$x0HwS0r*qFRZsE=(4348apZq>YeEAUs;o{NORuDWCz+dKhrf>S}@< ztz?*jf9t>Q@{OG6e}?mVC~9Y#bD`@j8eGphUXt@7=ql8Awv`w}ati^k5Fi)K3L6_nV_WKhmyi%hQ+*0PDYS%sxeh&8iDW0 z?{F-oWSUAg1H>`Qw#dEPf3tAW<-Yfr&5>mLT*1i%lakv2Nv82kvyb84xJ7E;`1Z%3 z8;o=(Cn?n+CMvE9)%q({;s#KR$Av?#XU$lo6rq9xc5DoR^DRn4nyLoB4B@G4$%SMu zl?%We<%k3*w6ttWK>}0)s=zD(gsn?Wn@+xN5z8`auS0;$i8w-8I^G*%Zb4rx5JUT9 zxA@)rWGw~Kanum)q>WCoc)hx@ytR&ZM95@OJFrZwD#DPJUoo@j$fZU!u@qD-pq)I z@+}ks*U3}oNF$z)(OAHIAJcGJpVz)JRQpCtz6s&8AFO@tY0(Ode68mYKdGSgK%bgx z;`a<+aiLWJNF1{v(nVwP)s|!H+t0H4)-^oy`AaLA25I`XMTVfpj&w%JzUXcvG7KmV z9VI=ow$Vd`NRIk?n`Y06G!6c0AoT1p(=&dehwON`4%|TPnl^Y)v7mhom$AJ7lTCOC z+Nhlqi{r@#k`;t0Viy_~7UxSHzhk&xG$kKaO8RPbx+_nV;1hb%N5?q99QVv()I4Lt zEH*BtrkfiqWX0Pp1{Pcr9AQW$9*7tGX>6Ea@(OOdD93^(S6dW&ws0GqHxUk>u!7HV z2cnPT9_1tpQEK*UEkOEm!5_|T^&CD|21#;>$uAY$tw<;9Onq<^S(-?5CE96-t9Bl( z=S&(+F;Wy}#h`sgDJ`k-(ZmIg;wTm=U*_@?{#W8)WpUZy-q^azS}t#PmR4N8xtdYC z;>udUbtl!UWi3ZYnj+DV0D8XoCLv5iOi|a^{oRO+e>pcEsn!f&`8sbYz!^UBZ1wu; zdy=TL=Jt7y#Q)V%{;qME2UJ}m_ku?h5<#Vr?GN1;CP$Z=#?Q1#xu>#-E@PT{B(>i9 zi3P>Un;rh_kyr^Kb034U*^Yb?mpD1fRlAzwk#>^L1K^s0LIDghF)<5F9-7$@dd5@# z2+;^(h(%J70v>11YvQCw860M$L~tD(ftxfC2Z(v>6Ql$6RY>m_lN-R3FzT_$_h@iI zv;Zq5Kso06^BiIhiYcUpf^g*K#A~y4gD_dwc&6m7SzKu>W)`~TRNz-y9iVFh5}P-6 zsd-rdmN$$P!CkrXTv&t%TzEA3)2RhvZHpYT94ZW&Q7$5n@o&GGnkkEHtn)`ec@QR4 zD*{iY6b(FVwpVBtMrDC=W?STw4O!ZPNUjieD`t+zByi$YJx+VYznCvM7@}$I$C`aY znrZ93=4@sqSgE3Guf=&ml4Aq$GmpSmX+mMLSTdyFS$q2CX!ymvrr;Aj48(t>VvD1= zmz_$WT4*qC2P&n&EP@T8lp4}Yv(hxRzFmQR^nIw|CjC0vEXm;#nQEQ10@Pp3`* z>z7(@Qi$vieYE*~xh!pIugsb#u~XQUw3R&hu&u!~(Eoz>Or6O>vj>M0ROTRUlK@PL zLFVa)=3*Fpbe{ay&qUTkoit!Kuc66T#$So(eMc_7 z^xEJUMO01@qz};3qkzg+S+z}Rg z<*iSK|HJAkc8M>3q04kGYpLMc={mFZa59i3(n%82ufP;D=n_47YFk=n>SOB(%cB3+ zH?*~jZ<_YToVRxp+wK&&Hppa-4f__d>kD|lA1+H8Oy91ZX*`Qt{{3I{?mq`3=pJmA z2sa-d@bG!FX&)JyJJ+&2!VyV+#s2r>5Z#_$J1*Q3PSFDl4HZSHGKujn$^%aPO~EG@d!EC2ww5RX-+th5tq7=47=Cn|+;Yh6#53T|8W6rL$rAva4QX&|WxWXP?NxG^ z88qD8pTE&o1J1{bDNZi`JkcV{gorVRay}t2Y+-s&DECsnZa{h;t(l05b1_8B+u)e? zYodRND!JT*9C~;`gfam#7dER{?5esx&icedI=#FHgWr%S@HF;=p#ga(>85$VQ^X5` zgs~YJUtYqbl!!3$q^}ze_YmEiG;;nBuCWF^<^(dhCC#sBDmm)O4Cne$q3qWV4h>C) zDE$^VtR&q4jMn9o;$voxJFQ|^QZ%QbW7MP+0yJZ!xQ6|1w$|ENPJETIHZ|)8YVMKP ztH}&?Jkzhvt600GFagPXN)TaNR~~ zNK6@t2~pgy z8`T!c()Dgg#|Ziihk=~9Re37yi+n-2(RdxRa4eleC4T_gnGvmwb&$5SwJgE*#t*_n zz?-Bo*)bVVxQ$_c0%ZUIo@TK`%g|UG2A-{}Uyat_%qcqH2-a_US*GAdmX!PxjP6tb zVd2xf_HP{yhz0c>BZA`J6*7U*fZLJ~;zSb44a5J=tzc`xliRys5QDZq+=Vca3PpK? z?Dj@bq3x0hD+bJG-Fyix7f(8Hp5wzx>?Sej`k|o8r z#xF~Rv|1!q$Cv5pX1!>9q=Y>qDQ65UvH-oNz{D?=COahZOxo-*_-O3xO17!;7460&qDSnG{TBUR zHwuF^wP{r~O+0nWvvB3a$@ms(qb`Z9YC`3CAM9E2>ixkri%DzUdGY9|IHAM%^9l}F zX|h8x+)vx5^V8Ot_<*u6;|0TWA1g<~R7Gy(X}w{#(IT8vzJX{1=(@G^)Qmu}=Cag{ z9%6mt|EE`b;w*S<6S@C79Y5*w)tK5drLEg_Sn#?ECDp4IDM!zqo?!6Z$u=yP)F$&3 zhsv~FNlIKcHwR{K&&`AfZd^2TvG2Tl`1qTn*Ft?|{iTN?PEbC$n&<6`9k-)?wwtFI z^qO$d4PU>Jcl^K9bDbhaI+!56IbT`n_Vk44(C4ZG8>TtoW^eV|_kWH+5bwM8u{;%z zHBBYE{qe^yCg<_p@44G%EAYVF;HO~fv21FP96I-7nH1l`Ou_nvsfSCMThlSW1DP{s zR_PPYshYNO^}TW>*-k-=qDZS>5EP&pnxd=DlFef`SXb!#w|K@a6?Ia0dgnOM)e$A+ z6Za4=L3riE3{y~=aW8aL3Vs@Z_@`v6sLz@60nEc{ItKorgeOqv2;Y+M?XBwK-A^gl zsnK|9nes3zCFtw+vv)f^etv{BQ69itfL&!N(V~+VpCJ{Ak#&kI-WMCG=}DTq1~$0cwRf2y9LK+SxYyQhioRFTh8WG zWSWm;W9Bcd=sgb9o6f107k8jf?rxjWyiUbo*K7Iu$E!9I(;t1z-!h@Uo8z)dQTeff zJU_EOGKl>UiP>C%M>57CP0=3t|H+SJQ3SOxNXQk>PdTLQ#Urjam6|h;5#!iL-gjjC z_fjVMcgZ7uXJbUh7tOZ?c1EL`_vga_q5lhqf?}8|9A*XReY(vhE{5cVuTN8(j(UrQ zK?3b-TpU;NWeJTR(85!CyTW@a55L8(#T}(bRYVDdxLOb}M2LIVBiT+EJ=Odc-v~qt zD$U<9K<7jzCAHP_lLr(|Q-wVVlL8@@(X=X`4PI4|Sdbl&pt0jCxKU?9zK+!vmp?8E zT)E9UH3_X5zkcfwZ>&K|ZL8YF9PFSLp$pMi7#B6l-a64TB$FdPq6^mM>*lwk_I^}F zTBNlgDXN)QHO@~4cx^HpJhXzKW8kXGs>Ji!y z8;JFUa@lCDAvX}$oNU$7!I=Kqar*kqdupszB5_P_+TL;BKyCFg7MbInuBk6FJYZ-vIk#z)x6ya*Cfq8jY{KqHp!Cl;OB;RIAnWnNy%H2DfS}jkT+-yE%|V*6lGQ0g8hkR%$L{(n)1Uv8pTi1fz-d&q`>gQz$wjb@UBRxzU0V}8;0@NC?|zHN zt@ZWm>&4NDT^T8@K8C5o`vTGFHxmQrzhk^UzOG+vLtO`6AFMa2nobumxm*cSZP!xl z`@Ig^*AUaPaSZG%NSSo@_-}~hU;x)4p}Rs?_e-f@X5*TC@zw6#OW`GSraeF_pnz;9 z45vh+-d4>w@zIBq#W2rZ!`lYTWM0# z8sg9{;jZAiqN;C!e?G2r|8svJMOu=cSrr*64HPyMJX0+)p3}6$QTzn_UhYfIdb|f3 zC2&oKHV!rkDK}Z zTQCl$w(UCX{VFW~_DEV%Wj$AmjGY*Oa5G6Wu~jwGprwI3cx=L0$rEnEDin_Bs_1fP zZB4#txUd-$YL*yQ3@tf7>SQ!RjG(!#AUrzgnV2B|a;uVJZ&;B=gI|~LsQ1xQ{G6f$ zO|S$kp<_^9O0|ERZqFL(U@xGyMX_nt+Z^rC>mfCjY`2l&rE{8KcWr`>`~ByFb)duC zyX#49T0`0TTxrOB^CXk7Yx74Nfof*EO<%5JBu%e(>$A$54(R4Cw{5n|=;FSUVDC2+ zG2X&AmK~R~P;~(ro(r~Aw<~dhciqPqF=%CO+oTOfG#YrbOnFhiuln+y=Uy3S+0W|- zr`!K6@}N>gJ1a@o#Y@eb&BA-Bz2$qVv~Sg`GJ02kK~)?rXGUdfNXiDnXcagUOt~um@bS zMaHg+ws-_CYb=5q;HJgZga#SU*CU-FqVmxwwQ?PStQwSJ;=3IA(lOFEQC0%d@J*N^SV!)hP%1pE)<3 z0huA^_k)s|&Uc&5zsqun_iN^MJ zRQA7~iAlEdRt0^^n>xku5QH9-o}Js`P3|d3+M?$v>Dt>DqaF6WHv50=mwDnK-~7yM zc3su+<3MQ*vc9wVFwZ^1TOh7b`Pbsnnd6gEXMdHC8P(06m%ZLi=ToaE84v5Pfgev9 zv;2RGpKo7&EEcjodhByj$tLnu_$o)5KFGB7-tgA->h@k_V6xe`@L~A^Ps!h7^4Kdz$H9i8=SZJXw6ReA!PU+N69GleE7zQNVlk4a5`XANlggFr!^W z?k^gLtnfIUmpR^Dv2k=>g(HiHYmOoBoR3$D3rkBUpJUS_**_W~H!=fsY?j?ys|M5h zgOLh@+P-zhXsWeE#-Zz9_Z^eV?yxY43qQHgz7V{7iKMMolgoepS?;G?8Zb37t$m~$ zG=@a0XejALGpGT`3-Ij(?|dQtTX#7k3G&@SyC8YaAl@$ieY|bFvnp#2p1{H|X$kd$ zD4VA3rU?$T1p@raGV#HFHr`#~ipus+?puek-{exbiuCwp-fZdzg+Xco%1#n{D<*kO zO{hP}R<-n8K!{k^j27Z1FgqAQoNdx?3KMMhH~rv@;yH7A0{gLQc{pS z#WQZc!I1ZJJ!L6i*OC5ob+y#f&A~5(Lyms?xsk67zwWOUr!;Dsqb)E#c$XPq-r5_TQP`LD_LZAKTKO+QAfMw| zSJUOL>FvA&n>rfY_=K+6fM*Nd{NIxhq@ny{bBoI3@+-OlIbG|I@6O=Td_}J7x8_dA z9oEu#B$aV=@_|Ur^**59PVO>Aqe;DO6sD*Viw;haU(_@%MEzg%mF#GFh9L1T?8;*g=Q|upH%k0*|-}D&P{6w`NDb6pq%= zfK6yn!_@kezJQKLRd2T-QYhgwI+Hs91xWyYxa=pK2o(E1VWhJ?9@gNgi)_unV@*p@ zcuxU*jH|8<$WOXt!YzJYWMoi!DU#UvNZ~5&FR%nPT(5NY6m88cYl^1oTLPaH!62qM z&(RKg>0sIShM9gsEq!adN|-{`G6|w1nKjhK`ZRskgB6K(tSZDIU*#I)w&aqTxno-5 zSrvZLc2Wa%?aPewIxH-Ezf{VCk=6XX38d#$(prPW2gFqk&c>cCO`%`8VoK2Ish^bR zh_Xq|uMJZdFVZ0#bXU|HVPC?Z^jvy94&j{oDvS)~C;qF*z{+Gwd6CtX(gy59?eIfF zY|PZ&joTo_D5n*%WRRp^`j*e>+BgLzqUk)+?B+gvI$hXM5~WjWl=qdVMO&>l#^*Le z)O}}?&F_Urn$QD65kazJ2i`B~{SJJxcfitEFPzONcOKHg!W&i2;)EBK8G$-9WczPT z^(6P1uh(1hOcFm-kMK)2wvX%u9?J%-?aT`1Ed6*L=u3qlfU?P_fiuA+mQKYu!yBsk zf|p!+R$Sq)uI;nR_jC2Q^3ekk^8Y!RIqvTHGeavfOo8Lgffxv@t(fx11Zj9IUy!U3|WF9vcUG$&GW@JCNwwho+ zad{Wpwg#5EKDY7c+x05cMKaTHCaG+eb`um2(ES}UB)Y!*WR(P!iQvn%?AkFv(IQ;p z{o;Es#7&hx0igJYPqRJ_D5*8RB8+j*=VZ}NFA_z|xU7ABPKAk})F?Z!9m~8cZeoKT z8L5^k31KXu(DUeyTZk&e%r7hiYypG0RzhDbcG72Yt53dmaersrRg27g!#hT2`yxUs zN=rqpDQEDFY)u?g#YldRJb{eeeBvRH1#zcfr?1R@iVI88>up5@N`OjEBQZ7e<&uwj zXbb(oDO;2rFm%9>0M7G$Pz>0E4AGh;94UYlYgOlL(IZw!e?IB7Jtw794W3Mq+du9PJPVI|8dkl05#ak`>1Tc zwCh}2H7_sEXC;*C#*6KrYBQr_IoO_8pKSc7I^H~LiZT!WTCa)Mx;vWQepUCSiReBk zR$}XE#%0UloaMdB-|D1jTkGXbbKfzKuU?##_{-o&ao-X(q>JA^yYsd@+HkbM;4aeH zz3t1LNOrpamdJYYgnLfV@4v|C=*Zw0j>8vf9ZBPYXQy36yZfToOCu_h)P4Y>WkoQsO)KVu;~mM6piknDq5t?kcY&)LG^*GVCgWiE7waZ@+k(K%=4XQa9q?qJHzR)1+A+8&g%S-m`5k$N8sZSJ=09E9h zs$T^-)xK@ga@B^Qk{2Wh;qDW(I{B1E?DILLnxsjo;dstYYh44B^nTS`QT4;`P|RsIYr%u}hPw4*FSSY_e7EUD*dnL}}tG{l$1 z|1cwkb%PtxloQtmnT-wtJ0r?MF?(bse(F~cJi=hL^&s-7{lJe|@Rsk@XgwZeU?jRy z%sBUMrM0d+4gJhC?U3xfg4&~BCml^CxLB_bRKU_0WQKShT7pHi^$gl7=Pnm1r<0ri zode{y+v{$ITn?*EFzCHJ!|f_+_Fu|W`8|Ag&du)F_a2^n6AF$P3jN%BT~YZl?puKT9WoMYJ0Kn5vQuA5i@cxv-D1F|mA^ zpg!SOQz+@?-iw&1n2O`n^b&#DO77)X*IQyAKl}IIO>N9Y`|=TM34E;ak8ck<(9JNZ zd)_M_v0X^2mdey9GF_rsX->DrsXw}~CRcm_My@T0g^Leh85m9@-P$~oue4A8P`zgF zJJyxre@z@|DvY+qUGfb7??S_Vm#%~i-!xNqyKo)sFdTQiJ?MSv{+KlxK0{A;y??}= z{H__V+0a#V+){F$xRLpCL?U%Fy}32+9Fgg>rSGvhW}Ult9974Ku|G9DcAoWYUp{Kj zbWEvj{$3=h_hzWRWc+0EUsd#5;+vQ&f!#^U{-VR|TL@4qA^sHv@zW%PiQ}F)HQu+k zH!R&no`V{y-4E3UOcl^^U2>A+7;I_LK+P^ol;wMp_rMA)oz%7_e=OF=77XhO&w#r{ zpcw_hgkvs8f=l?73TAfG5C@riN_q&7RaItoY?BhW)Q`y-YK`tF+d;=;3=HLs^E*PA zw;s&0k<#Z-*yeC+J6%hjdx9rqE&2dAf(KI4%^~46I)P!H1bH>!2iV)oJR7Bkz465x~xl zWFO5bL77I#ho?slXG%GgNl{;3Mrdo>Oy%p^e;*3<`O$Y?WU64wZSqC0z}PMS)o@DQ zq(`yGY#fguf^ZKf^3y)r9u7S={}XFu>11NJ;nX@s;ds@bWBZZ>5kLu^6EBs}4G`)( zD_XS9xo!(H%}sO2OJZ;Pp@ptpOLzB%$)KaX){iOH`;Xt7*$lI8hzs(YHu~v&H`8X$ zl~;|Gmj=75a#+L7<#U5o#q5oza$XWiXj0tzY%*#{)l=N_tQ!8Sm57A3J3Gnf+uXXO zt+xauU7hdN@z-AntN(aO_F7{NCUL<_`>`w)9{Lf4c2u4v<~hRn!y zH|w;WdA~3xr!CXxO5bC9eVHmNd2`+G9uX*Id_1K+ORAF7HCpZGe2#gq39+^h7D0DZpJ{D_MOV%gLzV;lEP4(ldZLzN*dh09ZMi@^yr;B#fmnmsH^DmSE}Ispkx$9+9BXjnebav_T00 z=o$x-+v8$)vNCnVD$9dFAv|j=tzn-Ol&NrGB~jf%Q#vGjwlFxv4vp~>@p|W^2oj3o zV~FSV&bu#ObE|V#+ozHp#?%VL=MXpOpwB15q3=Cz9X3cRK&{QjSK4+XdZs4;QSg@X z+ALF#54APnO16V`q{MSQ5sf4ZKZ8ju z&N9F?d%$htbAD3YZ9+u&TBvr|r-%p)5jR-$9YHJTmL6{`acGZ16RdV45 zOon@W`1VeCSM5*JfgcF*(4*#2w`8S0w7Q~HcSBx@uaGKi*KmGQl?dcPO z?E_8h$PbKv8k7G-tUgO6qW3}g-0l!9>X5Tf94p;@w;qps=*Ib_uCh|ocdxM^;O(h` z&#$77?j77zsf8hH3TjNGWT(+caZL(6S}y(vtOynMqL4fzfH2}m$VIm>jxvY^o!29) zA`ZjwPL)L)EV6|~>bx*?T;P=8bS7RkR5sKsv^`jCN_64t7AHw8)X|iW z&O0pt0tqAxxZgIq)!ri5+$Yn z2zC#7+V>2{66wi*e={Lh`)Xb_?PsH3jIvU95poPI7S*GmYo)@h4x!plo}z;bUg#WP z%o~m@%{PUzw_X0|oNfyKZCl$pQ7nNF4_e;Tc^>g1JY+(Dpx>leYcje2QJhgGc^yXH zvA&A9We|?B5s^kWYeMhL!vTHk71G3J2*1i2DY{LpAWSuJ7hf|5B#PpY3@P&HjdP=; zr8l#fH-ys+t_h@R^Qq@CYU=zIrNT4c$1A8CsypfeXQg}p-ziyIJq5~op z9-;ThirQ7x88Xi25Z2K8S6p zcr#p^GpvbEsr;nPVHgEua|bU_ZI$JvQY4f2`9Nx!ACyVYf1Y!1LC57&cY0gjK8Blr zsYYAS$q9d2AVS2JUM{6y4cYm!Ns|41+qc0&HGBDT5$eyf&#=tqJRG1TBXIif*SQSyQ~3uaw6g*Dc4POqCyoP()6UCvy*0|!uR#R2nQb~Ra}3!AHz8+~?Ma&>YzQ8166N@zMcC^cU?q>hIPn#xom#%e&6}sCwYD^VzrlDC7FNFHPRji% z$!7DC?LPb1In`p*@X&0Cv^L%)J~XT^OizL`c&lG@ez_X6oF0$O0dftj#pA~ZpqQv% zwz)W$_D>Y?EPfV9NGzlv=!KC#ePN9<_?69|%wAgYJ`&5~?DR86vQf`8B7n5flL9M> zoB&hx6&_ZnjvM@2u)BOM;~4p-!5`ZA4INq>(aO^sjdU*!>v?1t$|xLzG$|bvLGN5| zu|TJYPH{gPCXnTjLto=OdnQ8}@$^ab)3QR^su(n*1JyFT*$oC)p(DJd148{9XS$BbW*0;H`F+G1rMep}q2ucSQ!9y*4&Y-0m@GN%X}JEX-Q_-fcN zv?elE7@t9$!3F1PR_~B~fksQ@xM0;LU@1O-9GE9lo`V)Re$wqADMQ*RW{SyirbtTn zr7#^t5fv1#L`pSXsi@U1Hkxcye02GR<#kN-V?kTIfP+o)BS;K$Weivh#X!wtPpsixf}PE>Z}DSWQ165*h2NA94%T|+(EB!-AxXo1?7saVEO3ldbT75k5N^#PKO`soLg0qdAkWyZzj0jI@j zuERY(JMcdDJ8mjlbhHjC(610-KCum$iH;`ZXMdz05&5h)O*NM#2muNclecCXd+ZU` zZ=^ELDJ&w5UgjzSvOYYGgb=CB0e`{mq)|Xt~Cw(qt^l0{nJhX+pLM3;`D_qc^^@Z0`Rk&UIE8 z-hi+S_mNwJ+8>HZ$>a_L>4&w>&oRd~zu;YEWTWY%A@`7PtxJUl#nb>w6^bzUx^;Fg(Q#Oc&xz$xfKOS)S==N`=B2N>g0 zozzpa&;ukEviA1P_~B?GEfEtG_`RwT*@{)@=}4P?gKNx49z|t#bgRhMH6RL)5$RQ0hvHItT(^KfO@gBs? zsZhU=gv0>YwI*zVW|~k)6Ll2@EEz|i9v7xb)&S{GOE0PNj(Q*3OOtBbi_LC~^h%AO z;1QjT7L6C7ePC<}P0R==F>68xXypQL*=RNOt@A=c= z7(R^QV*TcvYp%H_$JrQk(l`K@hx!AMGG#ecLZ_Ci%ah+N6|_%Ui(!Pbf^HX$Av+RL z&G_?}{IIK{7ez^+04MQ!Y~)cSU;!sfJU)g~mPy($k?I0NJqQ0DV&;}EC#j}bgmt0Q zlQMphv%T>NXO}>&QR6}Yw#jQeczyW%`Y=CIFdc0UOPC)+glUsFgspi&%nYtp3b4n~SPhf-;6hh|UrWzC8XqEuq_U}J{%LE|byF*(Wx;A`D9I-9 zx~C;-UO*IXx4TSjBN;Vvf%r{@Pwkk)7;?~{29f;^5EO}Kq{aeXoT<3bf;-1ME_wzR5hVb9W z`yYs5xt{rGKHQ`X+s`-qK^3vA-kkTMkSZ+}fq~+jq zd4Sgf#OHU~v4f{iA+q%p%C9#n)r?Q;FT%hMxy1XNcV3Is0wdX+T-aVCU`gH z*<^&I<)mXAMjP0jO;q*Fbvj&V31?>{PblpypV>k{F&k5?$x3O3ArVuoi@c=jRKFRs z;;%z><DRkIJsyHD90oeO=C(@~V0AJP4nvg8I$If*W#z znwVm(mErSd{dLcDaG3M@Xz{Iv*6z9Kuix>y%$#>;{EwkTl`dnLeeUM9kxSQ;Irptx9Fa}twuez# zyUV!1yN7k>LNI@By6hbyt5WQXyLl_VlPPBTXjh(hTDj%tgT3qjL- zxOsm9s>U#0;N?}ty5AX=rv?a5>(E72i|KtT(WI!lSppHdAz++kmgSY)*2A7W(Aa$f-aglL|xIU4@Hihr+o)idw8nI-5%0P0# zuP`s&%g|N^OeHo!?8ejnhr6PEl%GSff>xqMJZr!jcv#Or|wxzm}tQA6z!_i1~^3hxchha&!6?je{X9 z>$NY(?iJS)^wW*aRtdND_bBOO>ve&okWoyqZ5=JTlfr9Ftr6JBr?Rj682twJ8CuHL z+JGME@Y=Ct(wZ-B#yIWbvyq*}Cc3-GHCujp6 z!bgupr$78hS8ilOxeB3a+Qp*wy>?D2Ym-@VaVek6cDDY_6!&$5xJpP_qZkH!I8{po?b^KQC9wU$ir z;5uNzR&bGy$j^3mbSy-d{h|5254 zGfCxeg%*gc)~0H>s(#|RJq`X@T&j@sk4Av~tpOQx%2HpS)=>~rx94%i*S~%7_zD|6 zuB%-rQ2i(16$9S<5*1D@i^kaz3iQZk{J)ffjU4Y3-hmprLd1Jho_K7Q+LRqe1n3_f z_}yK1ne1L%h?NW`vN&|~oaeysz|?_vaJ|deLB2Xa_X(uOp;N# z-k@T6EEO@e_2JhOSXP^o!n;;NKRM(KK#kMgcK5`$9P!Y|L8V&7<;jCqFcJ%6^$pWI zSaI&)j*?u{hcM}+u=M1`uz2X`1No_psKROs}olds~@}>8|nV=zd!m$*4+H96sD?z9OXs z?9DM{1AtSHNiRM-)Fo7`M+!?n!MM0-6-d}A1A@+=mIJ~AP|#cY5jtnB5AQSl5(jOR zLCB{C+7Akxv?eTFn8yfTg}2gb(tk0+k)Rscs)aIYfT*PNXyw>_2Ns^ul`YH}9i7V9 zIX776$1UkBnG(JM5cwD4NkKXuWu+ROZN82`PW@|E_2D=Kz)`G{YOi1Z_<P4Y~}JRQNF^ zks*(?QWPvJ{H?5$nVdY(iGNPx4Im#wnAJ8OdlPNgTC% z?U*{nrB{%sA_5JT!=B2hZq1`ry+hpJ&k$Z;JNeqq+*`o7DDk$-`?ibv-^=oBGDC2- zqeN8MjkXf~r}9JruJ~%l_&*_Q2jM{F_+fxYO#7b> zZE&ab0kDSZpllRy|7)pmkWv5j1maV<&-K}k)*?Kwy|lj|R&^TwVZJ;eflvHquRdMK zb+y89HL z>p>;At<5NrS@{RlCaWT_{XC)S<_w$p5q2!LHZV=z>-9T}c&)+qzD}02j5^*1uO}|; zUh;h8u|(C$w?&)lk_&646w4P%2V|5frZQ8f1ax_*(mWh^5zxQFRc!O#f0xFE-p5HP zJnMTcec1PO-s;TSLoY$gW^-g-^g;YkxBID0ZP(M;R+9i@Xo0AS_dzo0(V9Cfl|kIh z!hcy)6~Qqe(w(iw{f-BMmv1WvM-mXS--1tS>f%?`Q`p5Bc=a9$z2Yj(C=`O=hifHe z+aAXLI!YbbuI>Q~!qb;~K@|KeilOrZ92=%~o zo5aG=tlF@}^j||STV@gIQUwFXE%M5Q%@M)x@cY2bM(|a(xq)D78UrtUewAC%oM_7Y z6`X_OBxgdwW!G5 zLw^8eTM@KMKTPB7J$(5Wuv(PBET%PcQxp5wKp*zfAT zYlAofH55fDbnqjlT-5gTx3l4p4iO4ZiKEX*GZST{>X{K#$5NZ+^+eTv`rqb9zxipU zzV2kiCA6*;W^I}1g)*wPlA@&F2BQ3U(@-xM3gBBzYD3fhxeE1J? zlaXJ5YdWrDZETO2Y7+6iye8M0?h?^DKJMDt6*axO*HVZ^YW8o>cs#qj3xH(^VA~i4 z;%K)uQw4tBdI?Q7#~>j5n92t!MHKMPFqji1HtH@ZJc`IC*`Q+P-6wegS!cmP>MQxN zFTc}R&y4^4vN#r7H(0^F5a}pu7fFCS-mAMM&x`EVYIMjP5N5w4uK7u&Q1>M=smJnz zLZwwLNCJ*xUPjFR7vwKY4|(;03gek>JUv^`T@=(LDRpQ*lxsW{FM-m z5CKKoiXbySvg*9c-qAG6epKMpd%}?wHiB;Rozl7>(jcjAplm_|^}{gzMdG2wszJ7> zL2rv(f!?}E%<%{ol!=nuvimh}u>;(^Kxkl{hdBtIYvIAzAW}8lb;?s*e0*hoqEk5r z{z1~Ljkd!(Blb2@Rh$;0fvnl-N%lHu-f~%)B(?Y?v$^~|7&m@GIJ<~>GE&e0!@nq(C`KQy)xE@1dVl0WHsptJ6Vtl4PGsRnL^`kby z)9UK$xp7}^hKO4HJY&RIl0CG9ZsU65bW&nTbNpUzPUoTiyRN`N)gaUJ)s>sTV2#l$ zc4_Jh|BVfwAc7fV)XPJ>^N;+lz%5or?gn&Bfbtt9e`27Qu=nMxofWHYZy-UVac3bQ zAdtnd@M(xK3YY4{S`M)Z44uQqHW8Vq_-1$66EIEJPu#XkrK_6%pHKezCHmL*m)KE6 zLA+5~PG}Fiy~9!x0lkmGD%qE9@5;lqP!yMcGP#U6ykFb*p!K&KlXXUMn!VurHj;k4 z@1&sfGY=K3r*nNU(RI91aBWy;snf`Ffoa!&(}Z&neszFeAdNj8L-C}^cciJn&}10QrW@3(>{5YJN4^tfco7<`OKL9@!;d+3ZhP4+;Vu3( zjh@yZc%R|&vf^!FJkY}2G~{cP2&$X7`Z$;&ykk+)I2r@1)UFC1%+P$@4&ewP`Hu?l z%=f;)u!i?U z^K$)&59CF-+$t*AP!cWgVY1F7bIHnCbK4O%5+6 zZTkiNLExrp)7eml>owv7qvxV*GYd?avgT>r3XBvUX-?B8*@IzbJz;4kJzLJF%QEAI}!(d_qp9z{O!7V729ds;#t0!Ps$p! z|FW~L+r|2p;M2St<_`7-@jA;D)3H0&{1D+O%LY9mly8b z^Q#pGS-{!aaugPchuvZ3X5WY18qf1g=O!cnmOuS|`GMKx3rk?PO!<9tU*t%RJF`mW zEg_^S67Sj5;6GjXz0cSS*`}8|}3ee%_U_fnRQbzFMbXZfB*|dm8({PX*xi%hG3#L;Oo-7`3qiuXiSWiIw zlsYwnL0gORj6u5j6%BMXA0)?GI%pgJu@Y5YOg$5k@Op}Prz>5ms8CY*EG4Qqzm}w4 ze1-`~xxvE_Yi8)-jaCFbm#0z`e{v$6*b7#4<%Sj3br(-qtGhAE47slvZjtzt}PZ=C)^Sa{OyV)R^2iTioQv_fikEsU4++9MMgtyibI2x_Z5In0FrU%2IK> z-ZBrK4aX?J39T}W|F!dOSC*`BIe8O?&l)p|nj<=7>X%i0e3#S>!b(uipI+7)k2XCVb|xs_K~&ThDbZic-l2O#B4fjwlIvlrgKmg*N>j z5yf{f2pb>qes(|QyX+>?emtL{ay0hgEH!X~y5?t=*V&{_?N+w0)AW@`UG2-|I!~wW z-woBT8q?f8C9zpl#hn}-)ja?4V*K5{6_+XTX6O-SY+?SvqWR8$S~ex2SK~FY;qfA; zLFSfV{b(mtBj&!};TP(Xc#s-6Bq4#o@a(4i_(wVS%IePm_IJeNb-IEIti_AEEan6n z#b7LW_BPI$Vi_555z6bU9Q*H-Yi(2To=jScf_jAzSP+i&+nFe{*oOsSJ5J#E9#n1H zq7yYh)Tb;Vh9kPCEuCvY zO0{LREWI!C)%McBb-423&Bi;pZ4nAkShzVGkySp}APEF}#e*hGm2U}|Me-5#l~LI+ z8w$KH%!cEbyOETGDS`hrxBLnS7tu6W3BGQQB&oKjo8()vN9}$IyGR+@rAq3ccsI;o z0pC0c$1XLoRAxwQ1Y5_`?~>}Z2&sSlRqw}#Hafh{J!R^)VCJCP#8X?rK7sw1KFJ${ zX{Ef&HU*Wpj^kuqDW7~6emUe)G}J~(zTTy;!2C)1B4zNaVTL(HQT1D^fjb)!lF@!P zg+*uPrdK{Vi-hgr^NQl3CH(1@qlw~7wwN9R7pXAo5E-7i@Z+y*qrKl2tXCI27yBAU z7o$21UrTa{ZArrx+&|AVYA2ltTkKFhX>E{;)`Z5mTPc`R39FkH){t1l66nQClZ*1R z;;NIZ)qm^i6`gv=M;&eFlo=@Nlc=cTo9P$ZO`a2)9Mquhuh<-uT-ejoGh;C-9+#aM z6exbv`%C=pAn>yX+P^V$Qm2Xx-AowT!RNoW46lW5IXw@b|D1|GxJ8wwC(L&3=mxx< zm{{$xp`>Cc^}F}`J~e@24MaAX4`kALH40|F0FZu@Wx_%!in=54e06IyCpvGrMpMqX zW0YxzMV_g%);;_+aHQ=2>&r*V&-UhqMVo3FAm@fK5zr(XQKRfN zvH_x`PW*(?1USF0<43D82#)352hQBVy7GHv@**rkI}AaESn?Em0>Dihb#3!9vL{^H zyl$}w*dc~M=Tpib)TQc^19xhR=Gb%v^}6pBT-nEyV?Ij}_BZq4JH$#mlxXx8Qxu>} zIFOBt7x6j!*XV(`fL{Y!C3jFPQp(UFYm9j-dy^C5TcT7b!u)1`GYgpZDR+_7^v6N1 zVFe5h2Z$RkT+J>lgE}sa-c&caJbRFL^jT=q=g;40@Mpm}wQ}(uQU2DFRByr+UL`dV z>>cZpIwp(9jX5orCOG!JM2|ElN$eC7JbZL9qA!bUIN-6RfXo;$*#xfNBoJ7;=k{vQ zjXq)$LfXUde|K_v#J%cLAEl1) zH?mXMi|HDZ6sF*}8See*+!5W99r%}EwlDTb%P+seQ8E^fY*^`9y*7CXz>rUtpRr`8 zAzZ``i4H-fwd=V=7xzc%@#EI=%I^;^^nyz;CNSJ3d@YA|)E7T-)f=!$US&p4grAUa z*30oG{GbU`d6OfI33nayIvC!P9~JHo_~s${cVTtvvfw#(`Fk2i{N{Qf4a?BNqTK!D zpfhK}W7{oyuj6&pg*LWykHOWtgV@)%(Knt9YYtxw>!O!54!rZ&18Tg5KtlX0$+PF@ zf@^J?MM8AWG(YkgA81Ns{=dovAaiGx)bjjvb+3UkZ4}K$#ez!z_IO}pU%@MnWXr2- zw#=h>y$t1i^O>rnreEYqog#1m``8Aid;k6v8kL(PFzh;`z*Ecs; zf$W`b_v-eyKMz{5v+~Z~4u{!gL)AHFC>-UVYSeZ=D!nf-?OEqU>cdpZ_$f9-Fce_P z$U1MPT7)#IRk7a%Dt?p@8YJUHssZE&XJdP_6)6N+K`;<#R9i!oNz5YLt8p|VLLlS_ z0wSx@x5QEaIj{QEZ3e^x+bF;zwIao>a|#2G8Os^di28@*t7OO{r|YNUrO9>~bZ*K) z%^X0ou=~38btlB*66g2A_ZQ^K9IU zt|L#Z7>GoeH&;30XX;Dut4|N=%OIU$9Baa?*RB$Ew)~=|xT3N?1*!5Y2x+46Mz3mc z4cvHbD_M>j>#I~czgC&3`@$L|!J>$HdP{t*;dX|~K6DhTll0u|ohap6=5{wi{il6k zkr-oQ8zeqmtzIV^7vr5w3T~}9O{VMVHaHU=Wpx;c6f_AW#8feQn{#{a1&wM-|Ebi~m83&0d&_FP9iwA|S{pk)x@fo(K%^Zn;oy%}D0>j}o@aK?p4 zW62@>Z++zdfx)SCN7myvO_!CFcs!34PJ7q#WPpPf((9us9YUH|l!TvPf&jSaY{Vmb z9_G?zFumN9qPp?w%X=C`0B)&H#he$%xXzW;={_WV9z_a0D$yRh!eB; zsO{9pzSMr}kn3eplohUWJ)v+w*SRW}<;v%fGee?iY2~Vf&?1eVWTPig$dMzZ33i{R z?ex9o#IPXpVvC|Er7N6}84UBy#q0NxL|P}ksi+AR&$mGWwuK~KdSrrHyr`-rKT`55 zii0ecVgn1~bQb5fbt@M`qMkbEV4E9v=KINHrxr@40~GvkBGN_)6eL!^>1N|WyKsTE zWqqPgm+@}oiJiKypo55RJV?=GohPNW4~x<&$u2Kt@G#ld01#&kO(MfUmEp7*fqv%330e%ZtF-MX*NV59zK8dU&8LxGGBig;f^LZE>{CZ1K(I9*l zo0RHVrxa2gN|w0978t5v*ZRIS#li#d6Gm)A^kJ!%iallwcGFTSc?_u%TYr`G%PR+5 zUYn3$ZqOn8JYA*VvPrH0BKw@VsS;fi%0ODgUW|mtt+g$usb>wk^)n6UtgaRW1`pO3 zoA=#(J?f`)UY;S0?a>p=4&H+bGxTgU*U9gK)6AzjKWluAjT@FtVSIHuTUhbMCT0}> zP-eCtB&KLYG>{tA|NK+@7|o7s!x){|mDq8%`gK98968TyHu!|DWAs`f<0pWq0~Nh? z{)um=nN6QB<9;%ZQmQtJ>MEA>+qXN7Rx~w0M@6BR-MXcS6+b5%lDwjHYP)!W^?b3p za@=qGI;9bNW_S7L*wh%O`b_)H!ASrtJS5LF&nEJOyjLHZlbu0f3k^#R zsTuWObcMQa!H*;@k18`M4c4?ZMn#fp8yhY|DoVtwyBS1tvZFZr7 zl^|_^=qT3eU}2{QAI*p} z*o+al&+GxhR}vBT)CqiUyVeK3mkqAda<5VDWoK5c{hY95^NKzY9+3~zT+XH|H6(f` zghtpYCO}F+b*x@4WQ&aeO*uHlwGlPBk2&Kgnx~)>vc^~nQ$BeyVzQbR!Cau#p0fi> z_}p_`6%|dKn#JG^$I1uw`p7b;@ZKSR;A_+m5ya zcmyUai8kJzx(9)BkqN}Fz~bvQCRWLtY^FeU6!FQ3+88n@WX&)s103BqYaZ&{&TfGE z*|=O4RY;poy=;w6gE=660n0h9Fz)!p6fd)a{dCxv&yWIBVNh=0M4cDx^+p6$zk3Sh zq>CC4CRzwIx^U_tHz4UY8VXBgt0c~4b@~I16 zz5K17>Ah~3T6+V7V%_}gpzYI>G>DLVI37(^6M88tGUVU-pdr@Bu*o~PNBE%FJY8{Y zDZVvuIq^w7HK_@6!&qU*AXo0kprHU-zc4?<(1L;Hka}w)O-}tdHKJcrOu}Ye-rMED z`(`j*wY5$6ApD=gf*&J_9JN!&27#gL8b6t>8t)!u?S!B+;iC@8h?#l!qh3PQz0KS) zSc#QFlyW@lUt073Te4&psEx*0yH)+@qg?ZjH-R&U6yLtix<^q%KNb9srY6cCR9DA$ zd-AEHI{{a+AAtH#J`5ui3$zQtqEzabziPelu_(d6i5RD7IPDBut%HT>&=o7V*G*T6 zO;lg^(l63*^N}T{r?Np_MxhXw-+^&l)%K7eL8kl(4LUwut6Y-dt~Zv=e4swII&EsV zOg)7+%=rS}pN;oVaBx@fu+!+B*+PMv!r-Au*b=hgOH{Eqwtf!-Uf)j(**E6hq)g1r zI<@x+HmfDiCi0nPm>psZm5x22lJ!rE65`B>B>9rd=_gQfHVC0H4CJG#H_?;PN!#{O zPbbhatYVW=b*Z6)kqxQFEkHz0q?mV|YS*5|+`qj5NNd*BdTlTAbhj$ew7SD&Mk{&rkKLvI{Bi91hqkW*7N}k0E=GuU!d+W`8DHi1} zr@kp9an312M@96gyfc|yD+>lGP?LLF;kMok9;|#--5n~Q8JVD0qEl(-Vgaw^g2+Rgpwh058E$SKErCrtlm|wqv_9g5t!S z=be>m9uLNNrLNRuCZl^24P1UZ;oxdAji`9fl)r+0LyPthhpXCp_Z-9G#B-C@f8k&9 z(!Zz>kM=XvGS{!l@0aJ6I^MQlJ7-Tl*N!Q=A-h5KlX&dvy|vnGlhfCc(?%Q^XpSzz z?tC{#NlS%cIui7$psoyFSaWx=^x!aBwZRePLyi6ZWp~@9mZYi zE)~o|;K86S=0>#g-nnw98IC$nnSB-4Utq|=hzf{^8){SH-{JJpFe~xavMu6=Z)dBQ zitRmbVG8|L72B0>3eMOs1&@OEmX+%!zAXV zgIX13=|{)s>0@N}_}gqFG%S215L#^#0aOFo_0_YaM6hE+*c|Uj8q=Uz>y%TyQe`3G z8~h+XL%V@D){027z$X=S`Fk|@*}DFN)4DeU{+=q5x}U{G{staKV~z%0KN>y(80 zj{dem#MjI0klOD%g`tk6UqnZcJ5qiK}73+zTTdzr;2W zP2-X9a=zk0mlQq%f6L~lr1d*`bbtCoM)z{fWO?vIj+~D*g*IAQ-2PDSTVW8Z(>!Q|@Ip&PCa*Y$Vg=kH-{OJ#idcK+kQ~Sz}#{G6Z;_&OgGR_Vj6+d(UQ*{$8xk1S? zHd&Jo_dh)Z*zVf)EE!*kzWJKjz7X)#gW=yrpt$U-LZ^l>aB3o&UbrU^m{>R#56}DZ zAb^f_gVnp06&d(&!z6Y*(BQRML~Lv1{(E{(n#a=o;_k4sY;w{IC?1#PAhdu=?^9)q z$j+8siGJs$X8gPUWoyb7*CnMF$^niGYzxuCz;tb=I0H?8I_ z_n*m)eDR-pl3}@To1s&cwoJqA5d-_0U(=t&sc4(|VbPWV8YzuWIFV`%K}V`nI^>Td#%CULvX>vpcVAGL@g3$l?ndCM@fj@=a$$O%DIYcG|f3;o{jUg2g$T;Z*coX>p_rct@nP>(v+cs zBRYE{q%>k^eLRZ3kOPEz`Ct;awwv1>AeU<1q`LL5fAo^CQ1FL|GHMLjjkgtb$NB1$ z>tdp1M%2rH_210#>g-*}JdqQ8C7Bs3?7q2t7rXc+$HecM4OIYzlmChn2-HVE z9M5L6k_(c9S50VuODz~J+GZJ7&GW^L30A<*)SU{y%LhgKT~fKr7PgytS0(JBgHP5Y zpUW}JXv<1O)Zz|J44)62ji#@;S8+S`WB z?}E{sD4#f*abv5z?ISaMZX?jtGO4aS*{J`PAN~Vj|EL3-sEztqYd`O`j>Ybc?}_~K z{t8fcd!~|e&%aUn2OE&pqtX~2gAcCu^2Y9%d{$kD=P`K&ZDM&_Y!=A=oW#v^Sw+9} zI#W0F9X%^R=)=;?QHUYT#o};F^I=B9PC%S($^-QRQY(U^7o*?Hx8j*v(%Y1)>4*r8 zO)iQ`dCLY`jY2T3@8epkQzxDYAi%aO2;$er)Gko%ec-T931YZ`jXFf$6t zw5dH&D3dm)n{x%OWf46DJS1(Ha~1`g&G(Z#M&4L@(2duDb8Fe+2nZ8LQcdObB7RVy zUFgx2B!sXcl^M|VLK^OuGoBFSsxX?xi-1*_r3FKCl zZh@2ir`5*;t`LH;ONk*fF@sFTr`s`_(8c_ShB*_asDY>-WL98i_esiK&<94;$rbB`M|}=p?S`j}P)njK@Gz5T!up=2vCZ>AKcWV3Lvk}Fq{acKp~uQS z;#=~@H@vi{%)!nlXx3gVJ!b9$!rC|Np^}FhApeoDO;dxyxgWMtc=WL2Bn2O`CjyFZ zi0)584#~kfj7bt#z zoS7MG!XF>yUM!sOon$9r>+L94IW$fEi;ZaSL#RvwS^wP+uhSW$6RKKk{^7KDj{{!g zm1%1&(>~m?oM#qvM{BI<`40(ev3CV9XG^u&$^q=>Zl$B7vYp$BdsHY=Au7{5#vdkFsht$&tlKVHx)f}Z~G?H=rZ{vMb@y9`A})>K%Wy|22l+s4;@ zenWaB#4!0vq@N^6{9>LEjYA0al@_Ph(K(u>dy<31S#^>o&yWOBtq}Npf+9NJURt#s zB+JVgWe4l33376H%-oDA%V&?#81YTkE*xDKHez11R3;TVAmY@7|LTHnJnfM-SFuPU zsJ0B4>lhdNh4!QpzA%zyR8%)Ku%x!ZI9ZrwkawkFmT~P^Ga?7OZGDxv66pgK=fLO8 ztr(twm^~mr3m~ZZxw1h>s6|qkIu#;egxDsMn7*8M1}ld=zBM&PYk3G#J<0Np=mN@% z*rj+jWUM6qgn|0lPdrZoKM_NW&@zW=-nSVZ+W!5mf;CLNpb#<`^-e$DCf{XqYrVOP zsCcf19R(FvSQu3X~k&Jx9mjm#9{vIZiPMM zoO3nRNqeqjPtGo_K|PouuKMHvodFNXaIV#DbR<7$zj3PUAvfso*mn9i_1P0a^<#Hg zXwX@0yfK#XJbTqL(tHZC$nNaSrG-A}=weZl>}=4vC)h}S1%6mAdo_2_E`Nu9*m{Bf z$AD?*x#02h)VK|}m*Qo!Am6mUPbo*qA+TUpCx|b2aAWXoFrD{Yph{Hz@813I?*fpO zC+t}XnLwCRm=gpj(S4=m(7Ea3pPo+htA&KCU)DDLxqAwey7ya3M$V(rZ>Mq3yBELg z1$U(c%zJ(n=cC==)@JIT$(Mabu{HFTyb%Poult~okeW)+7s8B`ND5A^=zGn96ivvr zxFXL0eNv`ef(F~&>qdkJL0OS20uo3 zT7fm%w+Jk+=a5!yxWlCvsW2FA=K$kmciEuF!;xKQ%}^%eYvy36V;Z`hXL<4*!#?S& zE_T}fH>%`p5QK9E%Yf{UykP<(+U0ABH;sMo3vopz=v6}=jY>aDDGc5ast;iwRdasM zIEEi7;h?7e_1#Kt&B{fy$&m5vi0iYSgcm|hDlN0DqeCFiynz4!{ag0`cF7^>20FSu zt>kf~Y{!eY>Zk({H!cdL_niM2FZ7PfYp+qvnKW=kC!^v(EMO5Pzf2 zX~o!9ATWd72y&C1ZJikw)f$1qE9Xm6WS>i*RVWnc4v45MI*e>!qSe?;V8x?ciR5T0 zI`}ax~L20*Y+T1C3*`i(Qye=~!P9V6PdUlF? zDT`&$a6)MKdU4R!{-6u_iF;}1>_T#rzV&=FJb~!VaATjv>M?x)gqpH$$qE_OT;Xh~=W6y+WLOl` z9pUj494(kyLHd?L>TqCydze?)_nect8eYSGMx=;A3Mtw&72OjMMFJ*+@4Ui}m2k!r zlEX*iW#C4*rn!Flmjirw3~}oEx8}j5dBDDQjNqeuB6dVuw}r!wUYm-DpZn4s#TN(m-ZZO zon_t8RSWq3(eU@}&m^3W_YLO1pS&Tu@F@KMT70y|o4; z_)F2@b>5RwF(+WLQ|WOJymYk&DA0fo`N;Dqck75}kXk#iSX7l7E?^WX|YSKkbtJVV*#-ORKjqWBMGT-nIv?BY7gCsGHR0 zHS%ZB_J|zEfDGp*l#v~e^)suNn~t>I3R6OTeh_^TDot@nPdGFLag0p-J^eHz1p7h(s;d6Y^h?|eRJ2SU z(+E(1y-QW{JqA>BO*~7Fhx+&Cu#C62FjwF&dqb~p&POwKEQQDfu_3WOfOaWiP!e|N zD%sP$9n2<(f5Mt|Y|K1k!P_SaU6+WNQu{FPi4jKhRd*c9Ohm*gQ#_N%J8KZ(J|cn! zB$ahHxvm|J2HtA~e)y;6AH&4cimGGmfqn20=v+{+dLjCTGs_6wQQA~yiANndBICbL z!Et#N&l82W7p6P$2=28-wkZgn{e6z!ZBbkc<5E#Tt8h7{G2%vA*`1dDp`&?(0h2_JT(omPc; z1>epJ?hU?0u*wg`kfqFK4X8Svc+^Ul zzG+`YcD7(SOv89fSQ~X}=x+6MMbr9K+BpOfdy$qLM^tJ~J(jxsEu~B;yLx&r7UVJ8 z258lT&EO*51@@oGOiPS9isqJmApRjejkd&fePl3ao`APN0@bn5$2}=Pxk4kJByKtg zqI4N#C0%V+HBOpn63=AMjS|s#VqNvO*Fd3u(*gMYNQo(BTo7Nh9 zi_#Oe)hg7ZG;h5NQECPfV}WghXY!eetLh1iFWar7>7OuY)2pB6oA#F}6x+d{7Tn5| zMxa|UOEbJvc8LzB7N|JaQA{9pU`Trl7(;AJ1E1hE($A*Rx#2N^vy*`?T2i`5Q0l&} zmvZwJ(z@CX7UDIEVzr}>oYgMQ4DTl1C`GwIy~@1V%4W3(elUa`RZ^i)Ad(RtQW*cT zxMIH9CfMXxSV#ic9#-rve+WyEFF1M};km%<5E$8d!E0 z6w`*SmNMDCvhTa|TnZ1xBwgh`lt2NT5MlKnKl_A**nTr87e~GqtuJ9xEpHrzlH58e{ zwNLL&!aKySCFX{lXY-V{*A-1QP{iA07{V$KOTN5B+mB4@C2KvsoHYoubYC7wdOdv-LD#;Lg#It7kqSqKG>Kn)xjG zVhsV`SXu;!P1%56#e;=@VuVtRW@)%38(C4&i+eU`mwDMFFhPtkCo0mtunCbF@Q9|Z z6xqY7{4ytnZBAWFwgsb29tFQVz6NKIj<*&u+_|U}zlFo{Pa%(@%t?zFG2^CLxkG!* zpvn1UvaCq0@VlwEnUMstsQ=PGG!c;5$YJfMWqq0G+gcaIK22T3tbf+$``6yBQhODDCju$R;CB z`|9gs;&%RE=&3cnoRhy@Pzr+>%Rru~s+=axcdkYuPzUOhceca}!^EdVN9 zKf8B=RnzV^b5bCt#)<9(@cL2)CX<-DQY$i@)abmJNArH;!$J?Mm`7WJG%4v&oJHsC zoEz5HfJ;Yy5oXN)iln$H5_n{mfA?=CPS+$PUSy1dMmtgY>e3E4dk(pp2#AS^d9Y*( zYm$6i4JuMn%45 zT$l_{=S20za@?L=3y{W$gsRW%dY#YOXkpw;9eh7GDslG`&wRs@(1!TwX`ZG0Up-G3 zZ#%~#Rppfrw-9DG8^e8vfnI$is_n`Mv-UXJLqTpx2k%>2mCHv7t?`oU6aTWQ&XA*5 z>{rQUijvH_>WQwMd(Mi}dQ89-vg44a2=dE~2WEaaGNHF5+RkV=Paxhe^%p>r2?tph z(%8&n%*c802uhIzI+@~fA(YHk@TcZwr0lH1y7V)a9|vw>#B0kUupbG9Y}2Tv>6X{J zM!9hHdfi7tV=rD>7N1BFi@m`$bU}9ZJ+B1);Ri$^;7cB1j)k?-@dH@D zZ=7J~M3I?&cdq^cCTwU6-gOUCrfBxT(FrAeL()S|750{#jD&TmiV$JgI;%xiVdAkq zQZK2v>HTDoqV7t~Gh3pDR83|ZSJgOKUJQ*2zmIq%L6LlvR{VXApFJn6 zbQw2pg1y>=?iUFYeP`v5&lrq>fudeB&NR*QaQLsmM79Ahd8*qKlMq1+D~Ye%7f!A0 z8V9QWF4S}HsNDh=-UzNbj&)}@WLpk7WC@V|pdNG!80mV+Twz&w*$;Tg-^H_}sm-so zQGES*;Wv#5@^sHn(wr>sj!u=pj1$*l-jMsm_)BpbqPrmzMuq#}(Gjupx z)7;NeRb4^+w5pQmuZCJ9u4kr-?bc)P%>6aq?fic+oZ3+>oLw*d`}^gi<(XP_-t6Lj z83??iS}NS3QrM zSjHeuE9Bve?dJk-w&`H>{9If91G#+U5qr|@(V+(4*mtBL=~Vvc^l_dX0Tqej43_bi z77qcLUN9q68D}}&?B{i`h6x@(nKEpuDzuv9VOCOWMN62DSLA5O9~O`^DJg5vsOD%! zFlW;h%jt*sqe$0xAD_r3PVJ41frT{@<3Va&pNoZE!nFFF7#Ecv1fx78G&0}u>OJjg zVgzak#(5rWy0lHZzj~Qs8+4zyLzTr(t9pu-I-x4YK#_C9yqFh9LOT&bXYdQ)Gj_fJCV(%VROc^q1XWg! z*7WK<(}tKai*+llxD-v&R7CxZlZ9xbZKX$*!?fD->SSJ3R4jKcIPGf4&~>d= z9fC9S*v4lVXX!iS_GcROit_H*3z61}W3H={VIh|`!p+$KUW+dGn-~KJhUO@$Z_Z*g zXNRG00`bT{OkR%A@+Gab`dhc|6p}5>$v7GRZQw0W8CsfKFY3>1KIp56eoXY_UR&EK z^H(3=^Z_y?XkNd*3ncrzc`CN~NJd-Mj?U0TEg?&i^KQYcZYW#9LxI7x_+U03Su7c1+trz2D-qKY)syX$0SrJ_p#9)I}x z*0)BIS_kmWuk}LCl^cyN{+Fli4}vr#zvs~?y;=XUDCrg~?8?woE2(rp1K z{{(!pTaO-qKlM3g2OE(9jzi86zf{axtPwYt37l+Opy#W(t-O7ogK^(Yk1vxkZ;}Ft z!!0fTW!EK&ZIa;ic)!LUXetGkXjqPcx(lar^-7eRg2vb{3)YtVVmV)wo0155KG1RORin+mt$$h1%#yqt3Q1bMA(zQ!c3%JGTd*H> z(S3MmKkI$D%fI;P-Z9_(|DE3eq`SQTK&Rb}bnbIk+BwlSbpFq7NeQ#J&EpD}Q`&+Z ztj{0j7#UewuWF-ycg^@vE$N4MDt`2OG9-G2lj^ocRun!9ocC|4WBz)gobDBA&YmBt z>df_g%$fn5Aph^6^goUB_uO-S=L53bts;uKpUyH*G%*Xv_B5^1sh;9A7tW_l@waoS zP<8dczq<1di^zbfGc4qG@cJ&>)DcEHIBXoVIeBycqws-5!tuk)ozOYOSve5iWuf%- z>s1O_r4RaPLE5|DKhjMTN=OD# zL-PJmtZ2>qbWmF7`0c;P<&w(xMhtr~nQ)a3!EER^1HukESs2k*4O*GUh2yGpM^LiL z$dH&{^5S_(CE)uE)K@ft%HqG~I-sjBXPn}BF+d|n>~@$XYaiOI^7#$aUd8#>q8q{wk0$-Vskq0H1;>!! zoR#<~zr-G}s9pcw-{DxIQ$Rh!8%`Ig@+$LH=jSSKhi3<28U)8RPq!(y{S+caJ1=DC zA0qu`aT88a`6C3laRaP9Mn;Euz|qeiSk}u_(me#~p11sg@?tM>!nZa}&0OvIzLnC2 zmwynhb5x&q7{dNcMYyilR@+?~y7V^cg>LNRnrDhVl7E8x>eUdT>I@_Gp*L5D)#PE{ zd4jhTEX+V7Yi+Ep9dnJH0meQ$?yz*puqPj7%ZWV2Cm9rejGL2RQ^xbM`Xmt>m!!q! z0mpXB6#wauF68H*@29@xL8DtVRO10_auy?6;-cMK#>2qrS9k7X?)2B=e)3Wvq{w+> zGQ^ZyhUawvBaXg2sUcjoK?DjSPx#jHlSx)dzPs1SG*|;dVJ7b5s|a)3Nm(d2!6)P1M-f&0F{`Tz0UilB%{d$ zuS4@-@*4(`Bw${&jmn+OR~bdOf z`3oF;IaY(MPN!~vaniAF#&yok5POnU;dw$?pQ}V*kiWw*V!n>CxB0s-ZGyfreevvW z2$ya#q9NH=e9E+mr*y_Lel3B{5>~!h>9NvO_*hL2YuZsUPg<++d)7)9&zm}X{g!W+ z{ct6Q2<0t z-YHc&70t$x&j2CEe|Xt|@5vSqk6R~aKcqT4I!n4iDnh+*OecNsoeJGCO}6W-x|!TK z`DbN%-@njp6Ck~!3f9#M>s;ltY<6>Ep#M#L-(pKhS$G{sL-I}MI?>YS%=N|C0>w_w zAJ>$@XQUQ=`l{0t*f71&mefOa^H{gI02VbYA?JMi#ub4Y)g3 za~We6gW@m=8y$7+t{zWv-1;O;3|L>6C;}Vc7@DYt$dtz#%OyxUy?}ctQ7~_>@fBD( z^fH|>dP$y8897QE!0tV$R{F{h^@hP#c@+LjlWijGZG$mQl-+T>7Y(h|H;XpUh;_zb zPJ)mSMh@kcjLA+fH43LlgN~KAKL+SkUPjgMGoqAddmVFnQ_=VH>12w2iV#qKw;q3S zl2Ju382N2~P}0Bb=?8tWHhtW@FuIjmT5)Q!104=n@FBYUdZo7hLnF<{TC0!IZb)O@ zLg%cVPNGUP5AMjF1C?-@_kDvfOK1J|Y1v+%Swe&m4AXrU#kX3$rdJ^SxnU?d{0XBw zXHlU@h0Z93(Kv!K!FG?TTpje@EzLPRkpV~x5LryW$HC42+0GpHSJxqWb!?dIv2ae) zE~AiR++unzA;@w-BrQnx5T;^}`}tmw!_U`U3AG0yMUknJ2fODpshvn-X1zp)(@8_% zWZ8loY9D;=0Kg%%C%I$A|tEi##w$6Q<29uRb3iddJRPmy&$%};SVmlF>Llvnrit~9= z^z@&=ARe}~QlVrSk>uGp(c+A=}UR)Y4F%EstK>)ib5|SLXzodtP(^WDvni^@- z-nQgpcBB|Bk!Br;8mV6UrczyW;ucfHnFBk~TwRe)-hA>36VYRfJez=i0a^>L=eq}y zY?8{Mh$%~FiJ1l((}S1t0|wLT60Us_m+igM_NG``Id6g4Z}b)&csMJ34_vZTs(9Kf z661!HSj>Mi?n|7JoJQNb7ZJ^Nc+-8BAnhLA$hdk?SEZLl61@XE>;B0LRki7<(k)E=OI139==qO@+|Kg-WlrO7^ zQO<5YV#!`WLW!cRpSO|Mo)R;!c7J(Y_?=PMh=V1nd1T*GEAWy!;cFZ?jBx4Mx%=mV zIKX5$E|UJ+@Z~YeO?c;B{{!3+ET`?a~!bIktnK3*A z2^bHdF$(RI1cNJO^$qvq1qj7D`4uM=78{)X9LfJ6t66{*gL<&d-X$;w9yNeN!RgS? zTxcnVDZ&+ebbg5YA9-r|Bp$e8cfQQtmq-e1+8n=wmw>`Z+9jtKA>7BW9+>RUMWkzL zMURcr(B$rI=!gvJn3iCGD7PI>&H;k%FsK}=E>a7aCsoY03g5VkAvdxFd8iaJz`Y6> z8TApvGzHv+xRtM4sU8xNV399oGj`<10Fm*h3wyLrlY*wIMQA zWmPZoerILzMdo(m$Uxzd_qyzTYSljF2&sJLSrvHNt0hcLSl=CEPXexGOp;0^*b8V& zU>9b&%;%r3cI2PGmVeO<|a?|S>P^mnWSFyJC8 zYUrDYBQH9SIc1Hb31|Exe)7m#6D=`f4tS6T(d#<3mu?N0tsb`YHOi5Y&)X%pTijtK z7gHnq%>T<-0AS+Th6g!^N0#j9YDN2|eaotC%ci5{jX?9dV~{)@t^B~2!vXWlFh(`n z5>I6sq^AfCNdPB^73b^m}z$ ztHDF#Y?UZ~dcGb#BYD4UK()u)dDipl`hZW8uv^rJm$%m}3CE%>S(tXVJB4~Rt&Gz7 zeASVE@!R?<`-PN`KRg`|;`#(G`gWb$cJ1|#GrO?ElXUBs4r&%perHnXlxT{^h3nvF zG+0Z3-8h`qJmpig?z{BgU#fOJFn9^XVb83Npk=a@^jr*%WulQX`iQHT7o~yUHf2A27gE%r zc!Uqwz>3sFQ4CR+D;rOW;;!F@T0qCufJ8Eylh3@}Fj!IdwSyY=7X4=5<1VvG8Zc+C zuKd$w>N#b5gHaL$g}~YUGgrs^7(h?kVs)lB`7-tbjD$ze#6Btc2x8Sq0DFkyeOX<;wq&r)@Ucj`7#5pj)(^3Eo97*_l_o;(>Gul z6uH=mi7)Q_$-cLFhFdJ9?L=wPAMblGHN}B-SIc#o-i$lZ*rov!yM6k>udwzl^0p;7gM_a)y|-bnR7-gN-ofxIuONPH>mgc!px zc6v?lBxP4vVVO&?mCx)(RB+a7dn?~eA1}MKP3&}ZrKv3ew~BuAVIMDZc&<_2^xOw_ zjxR6A-X0!|53D?#*M)=j;Qw`#@)5|1>?mc0$mqO16Mv~wbNeyuU3JOdb>AfK-?Q>> zfDi2I9XuT2BYC6vb6HBb)QWI(LX)oL_II=AfW6ki408lZ>Osct2n`Qs>(@;MK>lp6 zoNf@Q);JPkRxf4r3i_0!_6QsTM0^G7#Es6#(&fK$++jQm+J| z+&EuQ1uk&W5g`3HVGXG+I8H}U^hk^7FfFwssDpxLYlu?Njuc%)&#M|LcWoM^_qNyNYF_M(^$dW9lAAjAz zm26J%z{EzP?t{{x48uy7U~fdCLa{2MbZaCK(6n_MHVkQfG1bGSr|0iAEVMF0lJ4&U ze9586Uefkq>J#{fNVCpk4jl6c9EG)l1V-m|xeW3&VjO#-g^ywfj@*UC?G+-Xuh>LM zHb=~BPNxyzPFhX0ITuryfiAF8+`H#XC-5s`A4!KPbB=D%{WJE~^(>&8)7;=Ioz|?+ z?e&F_?|)ff`~w*LKNbX_>-4@no3!98G|a_)DC*s2l2qe8-@%`yck^^A{$u{XoCMqV zQ%k(?!eiJat4m0$hnjJ+nZSl#eL@{KgJQRyo`0sK_%~6vt+Ukq;Dk4AF+K8W7wR&r zi+jqdV3uF$E9K+W;-7QoGID}mbR6W_&|eh2j;PeYmxh@YDf<&i(frye%jG1O;Lm&} z`8XGdi8$fYUURSbf|h$eVLi|Eds2%=6!85|6Fyvh)2r^e0FVZ@la@42oUFnAJw~gN zm1;_;(Qp3S9FUNSX!%n>PGA81s7ICPYrI}3X8WGP?^gCaQ(*N+@RL#Lz~-?0CemI* zJ@;XXNgSP#@)iKg3BMz^xy}nGqu31yAA;-L4O5c8Zt8<62)i}bCtq0l&g_Zj&?;ns zp5}a3#*%Z%Zhc&zRiq3x^oxzY8vxr)pyeVZoV3@1MdEY{%Hp^?C*kFt(nYN2hX~g4 z5tYT$bxwEp%s;OUGay}MMOt%Ky^xs);j4XJk$eRh$HGICea}KhiO&)?ll_2b2^udTd&o0;@(5UQ2CLOx}nPK{)Asb3LpC=wB5txR@I=NYg zEqmXnvS?nAtzDg^231NK0x>HrHGk9+qL>UrTTNs%DfR~?Y;qMxySfvH?hQ$p7?rQ= z?=K}tks1P5e@jV8J^XtaM9MEy6~2GDNB)P#{n!5Yl`n>3p*<6k+BUssvBPQ2H_LhJ zf+P9g+7945S^k$XMd0Z<=BpXy&I^&zzAjN@f4nqL%>QZ^X8cu8F=-$+QJ=_DRs4C!sX9j+A-R$B8fWSQnIEi zU6l;Le@zo@1Nb)d%Q@AbM5cEfaq0uPEy!3PV=Mezzk6OvrI9h*>z5|e^M(FgCh3YU znG=)HkszINMF}pX_w2p(<(n@>`ZC&?@1#bM9%Ni!j2GDI<@ORN*ti4W5lhnV0jXe& zWXf-5`_-Ns#$*_;wDy~YuKQe~eDdE4@OU$LCYgvMh_>Q)I!4GXq`>JPixW0eSV|7O z0Ezh{A8@K?h>J<@c2|nMu&5g_%Y+Z5xt&TrSnIQeX**&ucp#-#Q9!EZk|N3MTl(hW zVRsqlWlxA=QK#;{&x^~wdiMN+f`jVjYdk7Ny6+$VuMt#_h45c0e3z;A+>PQX%OukM zbLOjxbSP3H?DeIU;hAatCXAci^)}KwiSL=cuTp*@Fh}Hw;K4t!U}abz%IH z&mKb}w+{2S!NV&_3+$+pNMSGS6K2h>)3?HPOpK<)LxtCwm0ynoJlQNwzw_gA(z{*M zeAD0{Qt`^wxfZ$B0&`G6&HHt1ARR19CXjBeld8H^>w%>ae;!7`G008tk(F%`DXWnQ z6>a7h5G!aRAT#z7TK{b}Yw{)OczL>(kwg}`e<&NB$u0})Gt<_J!dzc#E6sM0H%yO( zwyqf^HWph(r<7mQ&EElb)lO%XrLsDBrtWJ&4po_b* z<;nS++m49AnSKUe*PJPWvo6az2DsHgLBzYOUGV%*6huueYsR`>p{GY-V)n(OSD)@+ z{m+MY>crj5nk8Z^HFCkoxXQW?RVnHpx|E&}^*PZ?@jG|#KYRDj58Gozs8O0Hf4}Sd zk+8;n-saf7Rfu&v{9_N=>7h=J`8@{bX`cF{Hoke+OdaoY<2jcFFLE*O1{`wSn1#}w z%6Bi7NVBzq_lB#9A9pOp;vDoLBM3*-fxV8`W!VY4Dk&35U!y24Zp85cpJ zcwvLK4uy%a*xg$n@OqJhX6CQW-c#es$2{!S&IJjXFiN9(1lx+_G;=otcP0MO>nxV9?l?2l3v#l^qcWd&y{* z7WYR^`*#nT86RqqQqmI84=T!s^xK{sWz~pGBeK7BcC9gPM#&m(Q}EWVv)nPB=qwaZ zz%DMViwP*Ya8rp{9&q+$Er+Su==-WJk#SizwThg%LGdG$!clN(sJCMGfM2DFWP!t>H4sLb-&WaE?CLG+$~z+Rz39`;kQe4Bjl)A7o1vppZ( zQTjNojPN)XbaZ>pR#tHH_uL?}&@i&K{C*gJKb9AXGomS62urB)cokf?95~~+wfV#1 z=JM!d_yax@ZD!tcvU~qlI2%Z;(+{maHyffN)u*e@)h-KmE@tizbZ0h@XWv9#|DzoL z7UnR^JC2YutiPZ2V0{sF&(9~%cd73kX@5Efx%5#_@DjDco9xltB5919u{BCV$CF>#CXF%~G=31D)D~xFXwQJ8J0yRa zna^2tc>Icak+3*T4>z+4wQ0oFhA|fX)LnyGnkc7guoi zNwkm{qS-uVs_=`dQ%t_@nU(z6~L7tsuK zgZ!pShZ7-U<-KNAnDDLGdK}xaWrn)&svh-S(9JoHhbzkx|JsGg^R<&t7 ziNy`TBjd`_yb>^q`k#jNuL=>uM#tHVyR6+v2yHV@-;W>lY4EmhIpgLO`QbuiXYn6R z>waBOOCriMREBb7&-ziIQ$$h+@3VL5>o}g<9etGzbkzg*bolM6*it>io@Xq3u(eSo z@2n-+plQR!GV0SB)qDI^wX@j~M`rreh%_@_23%!@Aj+bZZaUI@6jBRn+$S!At{e7~ zW8T8lZ0SJDEG&1VNhix>m8sFJ5UX6B<;G0zSD?9F%NU!YtJetM_(gHe%*H7;ZlmxY z8VE_nx&pO{XNT2l_~nZ+0bgjuBZ`gFA={|fSTub2)Pfbys)WQI9zM1F&QYL2jDxKWVFycelOES@X_M@rY?C46<)u-LgZas|APCYCQ>r(f z6Ihi01PT)68WfmHB)OxxP}398Q5b5JC2TG7c4ksz1I=lQ&^1TEuvu_LrcxQY!qg3n z#yrO0uGZ!(P?kL#!3L%;UaWrq4@Kog8L*()5z&He87+at)X@YA8z~<;-Qpr-MSWiWiHW~PWf5T z?s+6L`-CR@&=_`{c9y!XCpgY?+aLu{|5$(Y{^08u+3709I2ZNH&$_k=7@nuVH5|Wm zH-btizvG=#9GCc9%S}y9dHjCkoA=M!`qZM%ZhJ6WaH4!L3T3x^U)qj+aeAUMCBMM)ZLaVFl8-(3meEVzLm&BC;=1_;%}GLqgOnj8FwsB&17DCFNe4a#Fgv zBLij7pb1|%z57MYyC_|6FSTlUyw(S+u2QvCVFR=!`uKK$ zmw#vfJWBt2okFOlR(D^%)zlGM6eX;&1gfNWea_9|{w*Jihb@_h#iTUfzw+jSYn`mM|ArM$DjCl2}o4qEW$-bzdE>ep!%D-|L|`9lsx z+^mH>qeA`IJ%xVo5JYvYf)~xQ7KH+?ua)KOPMcOwD&6kyICG`(I}@L`W-q=*c&U*y zI<)d7A>J3~l`YSa6}2TnB!a__LCo&MZ}zckPvg+V4izq+1EuhtCB@Er6uHkRtNGBxm4}|4APiNaMRJ|48RI+Ek()u-_&f-??r1Ja2GX|VN&I@;6UaQwR6u2f{A9^WOpZLpkxK5E?Rt%2z|@jDj>NiNxk0{T5Wz7c_asb#aU(Ln#g?>j6YF4=pT4|3DL4-b@s^od(R zp=V*v&AQsnP~uh#_0Yb2g{dPF4$aj*hnUXO;G~s&?Wsqg+%p7eZTQ2k-E}M&l%Vg_ z>@Fv+dg$p>m!pC9)Fbcb9ObdGP@aC!Cs-w0!OzvuAZJ#-BA%>(HkJnCRt8)p7=#m7 z74I@j#Pof;2~Y2SelUP;a&DB(t*OzyV<6j;13EL5OETb{u`VHrMd28b#3zyqwThUm zqoAQxdgbEUpACteoZqN@N;LHDb%M~5uq+fU3{hT#>cn6%a_ z^zm(-?%#Zr^4eu&g`s{1yv!p)kKG^!-TndMH)^rmgfQEh>{rrFpA=6&Sc=8F`NVa6aEn-Hcnk(YhNovWR>X~=*?rNq`okxq)REqS6dg^&4VbDrsmaS!V$|)c_zMWDR-r(_E`x zy4t7Urm|2}ow!w2;VUIOuEkWT-b7tsXt7sz5#q91DLSC2Az}mx@Ue!0HE%U0^FSKA z)2wi(@*CyN{6}gvccQW5crUB-bM5LMNSW6BhG-cTcK!@ea!VJdAW!nsE<@UR%cM%I zE3dJuzoJ<@wYdv=s|ic{vFl4&tO9lJSE^R5-;;9c(|&|VcBzOI@tV^rh&DNE#j-j@ zNFTp)3DnYU!d(JONYvn1lRPUghhz2mscBx%q(mglc$JC}^H@8yRz&C0LH-m(6N@Nq zD(4kG_;s`J7!R^>>pOWx3*6~ojE7|7T_WhG0 zU9DUu-K4$zR#^H#AodPPF+6L*dp(X^%qpYGh7k<}!4`b~ko1!5-=^q07lPd1kXmXB ziZot$nezM|%Ifs%f3~j@f>$qq^go`5;5)v1AiYL!YMp}$D&6a3XK#jAr;Cq$Mptf@ z>$l5Jdz);Uubt<-&gOdi`VcT1nQK|n=Rl|zp@GASau!tc4wFVvR<|hIu<_kI)*Ay- zC^9_A;eD~Aq8Mo;$^wRt9nKeo>GiZJkU&x25+FDBtW@`6ipgV&%ww_(f5leHS2%wO zecf#GQ_+sN)v~54@s44sovuouocxj?v(j6oX^gtCcMWltF6MU@nBfTb(;*@m*ohj>!RjiOgO*fHU(d8O9Dhev%>kSb<+xl zoW~%#x3||3aBeZuW^Q`5{u|A3|1|rZzH!7&?XhxWxtnifqnA@(MDy@Q^p008eR1f2 z)X^O*UGc?aQtw=dkm$Ad#dp}-<2YEel~adE{tFZOpJJ1TWwI7>PAlL1qe;>(=&O3< z2>$*x`Nau5o6QNoYRBK{*G^j-%kCn*1J$#I52-z++1XOJ^EqL9Z)B-3)+2tBX!l_( z_0thQFbNbmv?Dare+Hx$g5n^>sMtX0-F&tcKytIhcE@D*8SXWBdb&p2T$zcDaY7jS z=TM8k2lNFmlx&5ns?vet(0JfUkv%3ieyP|v19C?$D8%7)A#12+%r6=9JYi$~aw}g- zoB;8?L<(|VbF+Rub?x3$qopi8#to1?P{Dx#jW}0HF~l+7U+y!W#Y3v$+RcNI-{jkr zn|@YM;yx@zNGmx8YYc>(s^dNM+=dXNy6*J`$vU*M%`|I|VcDO$$xB3gc%Hnu%i=IS ztg$i840qxz=rl&e-j~(p;idQskg_=_*32>0RSB0}3Q$XlF~u;sw= z!-TF^_s&S<%P)<-^OW-{3Vn#q;Ni$`E|4|J6jaL$sFa)YN+RIv5HVrp_)DbmBVrOL zqgJV^zoiBo3Ln;dOD5C!WEIN3P`P`xd{S+`c^T|9Lv1|<1i9>q>%KMVw>@iZvy_Wt z88K}B6E~_lwtC>IRm#c4l!v*6J@^N}x_ke$sAW2Z!f}7J>$o?N&t!Oh zX28zr{~-K7ZCL9~wh7QZjj_XvMv^C&2NZl3^Eqd;Z6guN?q?0_Dmie`^^I=7oGimDN~h zJKyi)7I8L^t7O6zgF!eD3jEVayP|O0t@gU?L*@-QExdz~;%||eu|CV35CaWBwx-D# z)pb7jA}DIxo5!#t3Rg3MUgK$`|GMD9IHp~gfB+fY6;NrZk#13GlTsjVd|8pN0j=!E z(g)w{7CU{%?zPk?kSpLexm%K~2v?e3cBb?g)G_s_Bnd%3AHmYX!gR>jkfU+pjG)zN-9C7B)zC~bo$0!Ru2>sPK_zC z)vv!Nrpk&?Xm?kK3}BfXHYkY6SjW_{{3Ib;QkFG}lyv`6$B~L%P>7c^$6Rm9yCAMr zb()_0B<4Zy>^$o$lh>lS9k3Vu(>7~-*7m-IM2fnYwvoN0l$mM!#LI8-@Atf?OHWgZ z^W)!Cu8QbmI3Cs)4^&r=_+zAEEpU1I^{|s1h@B< z>-94At?d%Wga2ht|2Co0lKaq@;t4M~>M;t61uG}q>*cZIt(~s3iPIXke|z#T$K@&G zJM^&qDVgV|8~5Ye%DOEo$djal;m3#A6*Zl*^hW@_;_GW4QDRac;Uqg-Za#{eXL${cy_ZX=oV;uFR&!L+Abt_X(^| z7MB?E#bPdT;5{wCSZ&4DtQTR$Kr&GJt#r7|V8*QXBMH_i>Edvm=4tF67JeQU2D!Xc zI!x>c1=(LZ1oLYIOA{(mAgWPvP-duAAcsJh7cmyK#qD;1^kDTeGf|K&6vz~s*B?m6 zlCQZYE?@Nq*JY~IIi3WG$dBczT-`Ok%Z)fNn;5ijpw;$}u1|#w1Av<(9n~@ey>%wV zDF<1h8Aen4kiBZw?e2KNR9SS2x@Uq%6|c0)e7fFPQ7vl+pxQcVFwSG3k|~%aONs8X zpB@Q$`DxOCq1_xP2HT}BJuzUT-ypMDI{PMtErExDVJ4v3vha6NS#BJ;A6_vI{5IDc zjwv01kRvV{TCfK#b)jySel8V=w-&7;Z_7M5=Y^&Rs$ZoRnNpE3yd>gj(Qendd_UK9 zi0#GZX=d7Zu~l$#<9FsXkybo$vaXexv_6}k$v~Mmn^;+Q-nNkIdHGTO{SI2w25&u_ z%i$l${4b?WprIODBIe_mHa~NyU%##E)pF)I|7T3d7@y@o4}W*CQrKva6SZzQkZj~XYra_5#IND?NAnKn8{+-WLS*^`OM>xL;c6Cg02>epbIq@%iO0@m znKWABL@=ATPk&@9*3Ao4@_!nVwuhz6D|=-OU(+lCNIwh~ryxzQh_w|xGjQTJ;$|@U z+z`PDvYg$T7KCTB6eJtJXTVJ@D%8#NRVI@?qyu592eG4BN>{%IIhM?AE#$C<&Jd9S zfu<@jjvhZKi8M+C3=)64{G)c=Qp2Myz)3DT_rAi>MkV~xqx?WH zo_!HW?kanFky*`%u`Na4=$Hc_LSxqZ)U_m%flW;@Rve@eWHwpYspOA4W@Ry%fz+#f zPP+^tE_Bjot@rkBXTbPzJ|nO_Wdib_BqSJ{?J|+~grbR~z7x#r@0E3TOBVsnld@GM zG?cQ4#U{t2P*^R`^EM}zr_ccAFv&JZ_@nn-3~n4OyZi(%Y1WO^(-hLYR7Cc5XGMXp5WEG^Fpq*L;v)xqyyH#!mZHD&b_VUH*p0}dI_hJcCi~Wa5 zHt%bD$_!USf7jKLY?R-LpOY$JG+|{aQc2v!_*pr97hFGyD)7!Q4vyQLzucQ(>PiLf zlyq#qx<`AA*`F;QK=5t$za1Js!RRi`-ENfm{By)@bYkIFvyFu7+y-o2H}NWR3cCdr z$KOy=a;~IXd#Y50{U4~6h99^agVc|~n-JU?QFR%if6jCX1}J$zkR!zu*{DtK3q=I&URVwRJUom%)vd>SY^Rghe1HNhIo%oRSgX@ z$;ru&Kdxe(Q(Tji3dly>JUaW+S;so90?P{e6ipx>MV>u+kut5Np4A=rh=-ROBrvYt z8u+PdoRoR|g(bYC?ET|beyjN=@cbVQ`p>`AMwIgEI^DtdX;zLfeH0-PSSl&aI!>WZ zI1l`gPb3++UTk7%h2Cx6x<0|d_DAA?YrC*~Kau-;WBc6&YnYd$YvnROdjfj#q-LmS zCC=+=haFdU%9eWWmhXDri%P$_oZR@kwx~-vJcq~2z#;KVKyl(1?5){tZSErT$^AL+ zSz}SR#@+2Go}Jn`s#_zsv!y5uZ^;PCrg~v0xx)%&UXm)UI7UW8L&TLH^axWtJ+090*4uRbFwWr%N45cGOhmkBp4qfCE;-ae65TGjli0vOBUtofMh* zxp4Z9Q;21<`rQ(4Zo6+=2INE()IXGa+9AZMKRR8TRh~x?r@Y9tV-hHCOA5Hl$4#`g zw|$e50ZBUflNr99?*DXDY{B)Ni;mBiLz4BaL`3ljAZD1A@gdhB`L`v`BgMrnx8{*; z?EMNsH~Fnc+Fa*a1LV$TuECtR|MuYDj#F3BQmxaV)hDLPxl>x0aB)#qL3$`dBPv@x$&_rDqf!zUx!DNr|kI2R!M#^%EXFzPcL~EQF-ZD68qdcBDAt#g$EWXeIG2EShP{NA96-o$s+nKxP~~Ob z&``2XfCB~jPrv~N(~4|NIFHpK4qc+6Vx)@~Og4Ks8Q3X~X|h+@sfi`m>A2Yic(0w) zso9&+VuYzIQhxk*d|@m`lUWks%P;C2B5aZWq-w7D!!`vj&!CX37wsi?3e$Kxu%qk^ zp-O+Rd{2$CP&uvpe$i=>1BP;$as;)O1E3*Z2b4=8uq!_MGx#o`-$rlzleRqc54uJK z4yj(#FQT#Xn(=YS=JfEg{?L>Zw`OkPL$0JGw+&?yvA|p!F|7K^0`!D4@Z6-=^khv( z<;o5r**?`#fG2yGc|+%D_fDtzJ>V19b;xY@991crHD{ zLlt1*C%EFMIA@u+S4-QH-NuVM?@4m)b| zA6Qa;6EN1b{)(Xek8Sa%cW)lE%+^V$H+cJRJMxHHg;?KCmqd?9nwFLN>z{T6d=SIP zMk4i793R5|Hnc|{7JFvboo-QYjI0>M#Jqie4ms}prft{AMkwatKpn1Jm z8ib98E1}0#!c72&^EFkpH|U}wW=&Q%)j&(bT%%7I_j8eXw~IbKF5Xlw?gT>S8Wj4q z2b%w;*0@-E!B4q8^Gk&=ny4jzENkd(8%L+A2$`UWeq%QOfZ4B>4+Tmm^Ouv@qTY0fnM2>Kr_z|c-l+eg56X14$1ZsFRU zxCHKpx%qxPy$Fqx@5=t#kdih1?-fmHxG|9X`4xF~eQfUqgrXqNDE2AGp0}WQfCUez zCo1>#h(8>y1uUXpH`vXOu!i$|Ca;*S?o7!O7)L*|a2$ki1+Kce@zsM1J&cxP^x zCus6#iIU3LVFHTx@#@&e-}w&~!#{N^|H38js)o7^b|Es zQMQrT?V@6SIH1F*eE7f+`(qwghV;7a+j7G^OX9ip;FWyfR=+qrSk~-NxS$@!?hCn@k0~!|*A5DN6(;kTFOw_e$WhwYI zF=F=QoS5<;PwL1MH<;(kej;^wsrIio6McekjFy;B4q9g{v3vlrd*NZ9CNxbw^zS8~ zLSLG`VJK3y1#5)x19x~di>_FtaBXd;ILV;F|3-BD73okhK6BKMGbT(8=2H%$NxEISd3f^4Sm|KnlcAVcy<=mm5}E; z?B9RNvpoUJlmApGI)VA(Bq|}xZ0FH zT9;zQ_Ljlv^IGy@IJ-NU&X@lKQ5%(B?dZi%C7-1If0TW7T+`q8|63FT5e!NYP!Z`; z=~UkqjdXW+H#mI@B4rTLAd;gy2TCajBcx-5^hPrpzIP}(R6aj{|M(cV-PgS*p68tB zId_N!D;B_>PG-NJX)VfYDSzQlgOO#w;^2}Y+skG0ZRCA_i4M>=E6TFWDI6j3?_XI$ zUck0Vj0iJmvQq8RVJiz#^4-$?Gl>s%UfTh>$X@AE*>t2|)bCcQc)BZItE)_6b~2ha z(8RRSfs%7cdTThAspP}?uEQ8wppxqN1@uG;%H#c7UQ69{@42SttaoXGO2PYE(U_BE zCE?I*ZMYFDkmRrBb6mSm)ss+`f*RTWXE;2SZ;G*ftIvUb>guthpX^v1S zuUYmNWQ}y3qw^QI%T4Jb5y*4hdq3R@LM#(r@3(jU198CZ2pVnhC`v28$?nH}26G-?TrNfP_SePm z>Eg0bRdY6D;jqops*LHHTNfyQRh%HHl3bY@lfze&o<~06Z=%~~a2iAvrmND$mX?R^ z8CmI!MQi#g{;t2wv#ZHYDH%>%p{d4c^0(~DW_ZyG{$dpUsfXTYX)JdPW&x>-^kNLS z@o9BP=+^@A9EDdM4_`rkGeydi0%04E$I?7BKq#}w={y%dE?KG>=6=*}Q^(-9C;x+^ zeSCAhoPWt|pf4nB++SE-k$SFNLGuASGrn{p=I!MVD(fX+`ICqJB`M=*ye@Rjw1`bR zOpTthWTdnIFVRqZNY8~t0$!g(cJ@3YEpHC@J>eD++Hbk}*f`rD`&w-$byOX_q1 zZxUqhQbN;Uhh3?U)BS=S6Y@AvvCb*TsO{2}S7DFfihdhgYt3s|H`I)W8R&G^Y=8V? z7yZxstfpLjeGCjhq>e;uEWY*F*p5nf5uXv(pbHdGsWYISbRqvOOPqLs$yMJmemnHPBQe&*wdbe@@ZAx7wDk`Rl80x5rrc)CX|VA zp9>`*^i)H?EHS7h3?h%{8&dl%0yVee3NdP=`i^V#&u|5jyF8Pmuq`8?%JWndCfK3Wt3i`puO-9806 zr1I2k-&@j-@{S>!CugrOHJ|WQs3-~|7{7Mt7fnMlm|qmymN&V~wDuPm3IFQ~x*GO9 z6C*)Xh)gDGNcnHt7U3qD$a}8sE$EWUNRg^)NlLu!4WIW{9TIHNeZKMWSGWP9 z;Zq%4B(5U3a6{cwcuY(I|KyRw;oF1RrO2%uW74VNPvdrIXC-RP0b?VMmz8NpYbb4m z16pjzg5P8E^Sv8c@JALlXA6F5DmF(P!DI4W`gg{2sltDjp{7@{LU?$bywy><^auqb z9x*8~6O^d-58Ht;LL^hj?y@7HCGKj+z+)%2arBo**Wo>iz#Y}hq1|yOuSTcLN(ol< zKS}S)zKem9xwOM4UfAyx{zJEa#geC$Vr!NYDV%I3^997E$d%Ze8Sy0p-zI)l73w zq<>gTm*i#LxF5x{U_>ckFJGSi24(4 zM2w4&uwbX`z==rA9Bo$}&UUQkteHkEfPh3tJkn&22}}EXGB0J;CpL>WoC?*beo=qv ziN5)veix&fHkMvVU$J(Ecv!~u+Q`_?LWfIwt-ILv?-OHU&2b`K@7Yz2>kr?X%^B2h zWBpR|8a_F9S^S$_Ye^je@W9EvK77b#*GqdNb{F+QD-H_fB-nlv**7ZB19%v0_r;&-NLd!t2&1OEF3MZk}4 zj9K^IXU%>t7X@?ag=OnAx|S_|pHrMJ?<%j^7zcRs@^DFKp5BINLhaS*R}AAN4zJAp zdusUvv)lqOmel>}w%t-zmA%MVkYq3Q$^B{caW{LM;)Lm?^;$>F2s>jkec$MZY#EJm zA=iNQ>ML&c6vLvw#wHN^NJ%ic9msPgEq!+qKGa{PHuXzG`HK;Xy@Wx$*ku zn$lgg`=O;51iO0`H7Q)O0vfl;FJ%v`CJuhaUbHzKmrLHF1=J_%7eB*Ty9b=s(&~a< zz8FY{H;n!2O&}bW(_(~DZZFgEE#;2QJtFCeK06vpnpf;*dmjmsNz+>{l7{9_?0@+* z`f=rTXSPZ1*)5K=;HxXtna8A7WtD8h{vi3KqlX@Amk+Vp-o>1T^u*?>KLw{{o2z!z z>e~l~)A!mc&UL6={UwyRE#r`Gu86X9!*cpht^`W8S<}E0sb?KODOTLfz;s4`y4vn5 zBn^a9e~tV#-`jrZIsD#x+};hlnzf4-F-2WbEmetmbEMpL#x|<(jKVMZ#OgyFMc6!3 zdGX{{f5%7Ub&*^zs#N7Ff|v4my1zUH^JDoPt!}jZ`Zlx^nwYI{qGf))m!raTahm2x zl}khEV_VE)%f(}XU9R2n#h8MN<)Sy^iu}w~Epu9gz&N5@+-u~U)$QTNhNRY=2N{@3 zRjeRg&O|K-#=jJwkrDk+xc7>20gsoaU#W zU|wla zWqsjNc?!b9_8bYTtJc~DVtL}}=Ct#CPR-jnjJms*Gk!h>Y!zzi{#UK zOvSSum>&H`ONr2f#_9&f_aPm)TtSu(?_biVBQy`4kfQE~da}giHbjE^AX9T`VXX8W zEz|S63B?bfaDf7gw8gma(tN=v<=Ha64Qpi)zdxYk#pY~~69)Ld`SyPz-Z;5Ci-PG4 z4VJbVcW-%u>Pm}JaMMa+QW#dX{AN!{-wv{L6cSUuZ z8A-=2@!v1F9C7gxNjh3JM#|Fa_52p)OYnDSH}EHD-(5I|5h?I(HGvfqce%0~Y4M)A zU$RT)U&Q>!8Tw$K)I_96c-9c4MzW(J4uL$^^xqhq5|ksAwPj6+0fUIf7Blo*hg$;DAZ_rGXH_V2}trIfg>a zY**||*{^Fgyj34lTAh9bk+q)~JkQ5$>Xn9begE~1?=b+5%a^XxH7^yF^mfXeg&&g^ z+szcouDE%Z;p{#KVsUj&atNoqR6lN@X9)t@K@Pp;?ypBswlQ&A54cM+%}Dq5gIQ9A zG>bZDE!ioI#6mp{_l|F{rbhF%`P418_CmVImaF~i3;yfvrG+VJFg5Q}(J4=k&1T*{ zO8pP#Z&(6cMC0_TTTRyF`E&LNYu7lPEhST+1tOF-i4NiO$NJ${( zG5{DWiW^{!%+&8yEcV}Pv-S%7=Po-sZn>k;AZ7zfA2~pB$R8t!ysJ7kW82%!fc0cS zjlmJ~hGpPW?nU-KnCXx)XKz~l$>O$|*?Fo9za(TBJ%CFvQ3@P>T1tv#{Yb#T0N>vw zIsqybRB_RJD%(}esG?UbR2xMvByU}(Gaaj(>=CwoZrbqSryen{U?z|l*qUm2$xwP0 zD?h7w9u@y&M%LK1-8Fw`{JYX-@$6LC&Bm(eQU`X7YqA#Uq7yKvJkT!){f6MphjG(X zx;z&V6LWg;vN`tchYYQ z8hzJM8;)L$bKfWbs-oflVjHo@VD4_zAL^({HA`+e(XDA(=)V<`suMN0RH%aM=f8j{ z^!YB@J}3AX$p|H>-7mLq?$G6k!z5~BKgrZt(zF{5Kx^uTpzcE`#bvUB*_m^SyU}Js z9|xoz(?0F+$*SRm-GNOzuFn+p?!wKNBsBOH+Z*PFOGiQ}uQ9N0| zs$emx5*23q%0N%j-1eR-v~aR0si_yW7Swg6@dA&;u~R)#ZP%}wYQ-GdSe4tK!}5vk z#q83MLqmQslupG>^}M{1DW89%8Zn-A3CEQPpC7cNF$A5>(#fk$9l;yweYfd(zT0vC z?ZN!Vh4zhQ5qy1SDsHJw7i?d#M&-CZORFNizONJkjY|xXg*rJ)U>Z0cz%L&vQE}f| zCR@AF@a&)T1>fcRMl*MA%@VO1@veOvu;(o@yl;i~9dCRppTAOzS{WYIp6y7*QL_JM zpL5wsKxlgvrvNF{mkDf#UJw?;8sqR%lFD8@f+D)qBRa!#+A&>7IAO<0f{I;3c0;N; z=+H-ZnsE1ci%Y^u0!poZuy$^&6(fZxoLE&{-@0caT z;Vsom+2MVjk+tsvCQisjV(Ag$&N>H@irw&$l|!FazPaM(%}LKelUDpkz!Di;#5q#s zB=vn4RY$lKEIM*f&>IyKy2>9h{OpU)5%a~MIw9u(5Y~F!gjg9B&b8X!0Q8u<>83F% zqrHlrmjKH^T;&Io>6#uI_cvvATzDxUz9Y08eE>$su7K8Q!)(^aWAC#DR$znG-@pKQ z*{ZbyPFSjQ6XZC8)(eQBK1qHGC#<}F;uKE%Db5TVOOLg?PjB6%jM(DTzw)lobqSE} z%K85G=1mWt1vmz~5&~}ym3 zNHRlmoFe#I8T5LV@Sis8&TO^up>%n!K1tQV8qixy&?lKD_KHWU(DRMM3*SU%hkMAB zcC90kFfUCnIK*5t{X^q5Sj%SPU1_=o_`Crv6YAaOjZXL6pPG~bSV`pgGxR~u|Nax%q5y2(N&T5;6dSl36(ysmjnG}(xK((#r|mO zPRpRH?*io|E6&tEpq$)}A}1y8EzRZ?#5(Do9xD|uYiVZYWU6uog2Kqf$ujTN=5!t5 z#6Ut|KJ(@h_e{&jAZ^W0aYe_!f7p&1a;K&t@9h=EV*Tm7M6+^mL)FZF(c7@`OH{6f z$)Xn9Xhj?-MK=1Qr2-^cpMAYV9&+Q~?gzJTBqENZLOF^z{>hg8VesH`{YN&oW)OyU zL3TXTa)l3c4UB->=7Kd+D5FCvG9iGQ~5ihfG@&52X6%_hy5Xmy{+!z2CnqQ%yiILHiLWLrj8ym%Uq~r7JC+kk@%4h8WBEw zdin=vL$WfpC4;fC4Gho916rJmI|uYf)z|Cuwax8}5bsNDc6>3j|5f4eD?j9my}Odf zB1!W2G-Msb7HqEH+#lpez%lI`9#?!)5%s!m!80KmB{cRQSJJ>9uLd`yCHR7Y4hf2M z4u zruU}ANAp12$L}TLw*_sVyK%|g?`lGzyu(7XB+ntS{j|gJizeHt%6qc_yLi)Fj1xtI zpUalkP1+S+7sg9BHGWK^r8r4@Z;*Xr8Atw9dp#Q>d3iJBt^QQDoFRV|bP^u(WUp3zO%ur%Ai~+^esdFmvDi6Gv>IC-875|WC<@g7k zQ;?&bS*==Kfw@U=M~*F1k(Qdq&g#W24gR8!VC&Um>~ahkkd(Ma66nJ@3QM?OR~ z?X83b^WbbQK_VcAsV^*W=6oOn+1`4aOhtzuq)kO|1mwB_(Y6I3!&fEY)BWtTUUO3p ze_Cdiir^cF19qfnJ3WHaack#;(9p>w;bhPS>y6xzoAE7i?A9fZd=s}+bYt09OyO?K zHhHI|Uu}|qh&;znrmFlw><7h!%>^9AQTtEmVDji2|BeR;O8~AQ0>AF_T4F)x%*2aN z9X!{^uc!-c-EtP(U#PoL`|QL$5v81{~MYhIiHXQjZ1~ zlh?d5XUdOzD@`F}{6JYrIj>jw82WrHU>Dv=JgN6Gq1>tlw>kCA{XzbB1GL#OQKt@H zxa^*b4gH=L%5fHbNsLiVr+TC{l1~v!uv9d-2bV(nteS)Q+&`Z%zqK_6nwa7Ped^JL zgP@o(1AAKjRQHlRhAVMJ$M*LI1V$S$5SehR#y)oh^hl{b=pkK3`nvw$vSLe5D(U+^ ze%`2}TA%??3iK`~fiUGF>Qq2XNfLCm?y3_KFio6^w3o#We0K5>kN~T1ud{toDT3t- zQgeL0sMrKc&yLR!CuH>$g^4^tH@tZ#lhREd9n>x#2#4+2CpJQyp5WT8FwmcD<X)_L2W82gblXew2HP>z5W)Tbt*1EowIqYM|vraEW)&!F?ug1>Se za}!3ciT~Q-_?E~)%4sm9@btiDl*Ryi=4gF|AW|iYN2g<;`vmmI1NwIv&Yd*(JJgn{ z8IKokjiS^LV<&18{5y2%`rF4A_j;ZRG1QT7CRH8=fSjnH>E;vkw86t4yiLjTNBQjq zj429RUwVYOJT>~I+j43ek~H7Vimw|)sCwv)R@nD`@U(M!hx{5hGkq5%OXAJtw^{yZ ztrxRaFrx|upjz}?pP)+bExvh7PqICe04D0La~8dwq4Xn&OeA6o|NP>d`_uI&^HOYl zLA*-g=t`b8&`8|6cD2ws6}s&hzg;+Ao+W91Gn?OV>n_bJ@28wGKeExvele<+5onC8 zv-=%yG9M22rKVzd0WUn&7c&;m3YWSMZYEIkQ%;1qqBhD@5sn`Qm=tKJzJxwj_zTk!U_CKp#A%eG?@h z=NY!#O91u%FQ4r7cURTQJYJSFB(dc_e|rXTc*qa5IaL_4AEZkzbZ zW0kAV^q{Qa34mH-qKWh53Dv`{*M8;`$B~C2WjV|1O?@%Rsc|YG8k8)5q_F!qcvz+X z1fb=uo)lf#OBkEGaS$H!_f!;e_>c{Kb`zX@#$P#5O`f)G8#$gv2FsMm+7FZgVRkvQ zjsng3=3724VtI~!MLsnzL!5ugAvLdHQkS0S3uuJdlYJBdqtPm-Z1VM2XNut|QsTEF zs-dEtCO2E^X4Zyc)Ea0Hs+o#H`3$J(-i;SMH~5aeE)gOU zy6MqP?lPI?Zuj_2#N+`6vc7LH6znn3XNAsjg78{(RY1F1X1$^+Kkt!qFz=MP>ewgC^B%pz;NCAqdKgy|etxo>|D zMFiuVQmgYGp%a@vhWWfZ;m+KXfD&?ag?YM#pVTFMK>qb{w>LgtrTn7a575Li3e4Ec zX)yG}KXX0l2M)us90*Ea>pq6*YCkJvBio!WY>oFf{*qCoy~LZay!fM@^`B z-8r~DwA6$F%x~PtlJYvbe11?Uu_npOo#pDcBoDBNX*pvQxU)v8C<0V6ldLgHa;0V4 zV;y-Pjcz}ev{~MWr^&TaM#|Rt{8e98p-u103XIhh@FKP|wh^@}c?l&-%06NBfIU9L zaNBs~!VjqVMG+HWI8o&InCjA*e-SiCiTv+VbzH~gCEl~@N0qEN)Mm=>_(}UI9C(m% z=XTN_OzQT^MRFPzq@5L0tx645j;Q~%RX6$ACs$pc=sTYkLz;#5XKj`*l%>s2=#eRu zwcmCDV*wQohwBTP4mXTXGzHF_qF8Qa7@ypbgN$38(3o$htzt7qT6^s*RE6E&Hp>(eb?0x*x3387hBW4>>^xQ$JzV(S*!M!G; zDg=Bqwy85h=nyVKi{N_L{O3E`RZISp@r7{rf~vKG(AJV`D-sT-x$SJM300l!yDUxr^AJ0;PL5!2fP3@I$HK`&wPA4>Pe;kTg~t#!R2-lwc;JVDr2=PE9I*b9nWA+*gk%qRzvUm_u^jmKbzD9c zGKb+dLS?(*7?Cuhh&_`&wsC?TDSbU4`#eI5jXdz<4I0)f=fd?}!y-%GsE`+L(1$Q4yw>}XvhsB$}MNtg`|QkD8mjH>7a-C=mp=PFH3tsmp<+v0>#c!maqKT)Dzsgm&% zkvyrXn0)qglLBb63G{htp{dBNaOlPB1&CxWsZdRg*tVE*y+Fx|uOB*lA}!nDP3!Q4 z(oK>7BqXilAaV&cNvWD;?Y%|G5^UIFAm7&r! zVBgg{8#m*;X!WsEq#tgyTUkB=-{A1$`~7=E=1<=;aU8W zr?)!kOhXvf46aDhaW!-gd>KsEX{VdN&*(IS8{5u!cEH|hmbX}$yh7CikSu`ELzT;iua3Z=LCO&a7Lmfr3)aY%lbA=hgvmS3LuFBpQ#u<^?$ z$raT!_vhwydoF37#bQw>GM7tj*=&px@W zE;5#apd_cUxMIr_J#%DWig$;4mBRR?Lhy41xm%ZS-KvLibJS}2>0iayMcm1m#n;um z+R(K^dzNrS$%lo5nH+!mis2>2i^rP_UPhSsO9!M)1u1_s?DvZCtMfE zne2PqQIx}d>5`1uo_Mb|XI@rarmRpdONj3$k-G?0jF&_<8$?W$H#LRPAa39s5v5~y z9f*8$-BNOMrL%ZvW4WSYXS7pOr~EF81~-bN)8c}?V&6tfJcoWqUX8tfjeN7qq%ZgI z#3e&khe%w?j^wr*(LEbYXE!ugnktKozoKy*~BA~Ds6{1>5o7gJCS$nn>@$@dN{5_|UKyXR^#JFD`W?7E_x?53jP;T*gk zZ+LlSUm`ccUh6KTrL+`(a-%QxV z&cdj3U?HN!wT1yq4peW;8cO&c%G5ZY>9*$ZIhvc_B(qFZPSbc)yfR^3v^}??L!(#$ z*;in=WKn24U^{(ZOBs(jxi;8Cg19IVzQ*AZxrJa{}?;sKsHh zk}rGJk>;Gcqj(dl{x0jwaL=1hF}2g0{?Af!lXfyME4G-ZrHFQxX^z*_O7PbVpd1_J zA{bK|*YzFx*r%GD21n>fD87p7UMQT@9En|CVX!W{IimTjO{2`pTJ<9=`mq5w%Fftl zYj$fQyJ%wBbdf64y()hgCTK~Y?YS=Az0p!Zl7cLcwRO?_l9Fwb_*|=h%3i1H>a+pl z@x;o|yT3W>u3gICj*Zn)n!QKhe?nFmUXO&K(bh8xbc&}KO0WFzZ)s7;q^-wuYQvqN z6s0^X&iJKSU=Lv@xL7hC%kcHM@6c_N7^k`Q%=Vzp8k#$+`KZf28}o9U`DmCd9wQN9 z9}q!$#_jy-_F?N$RoY`M&!z0TygdElxrBR5IcIfKUl&N;g(yjRF`3#SZkfN>fjRg%&KG^arDq3pP zY9ky`Be@cKAx+Kct;4BagIjNX7gU>tDLZ*qIyK))F8738?0Wmb*4gszz5Rf?r-I}q z99%F(;5_==Ne>x|OoVq0zv)VO*P``28TQI(b@swEKfVZ%pudtAZ-axOW8vA&OxmVe9g5;uMxmq6h~V1f3m)6m8s|()>kg>oM+)sOYOm&sjnyQpq zkFiN6%zb^^6DsUM`H4gRrr4#7B`LlP>tK`oBc%(kI(&YOOW(w}Kuk|h7P3vUKy2Ah zHrcr(Tna}znB7QLW!(qDU+bo_H813l;T>nfNEFQSNcid+S?>Oor-L+xRaS zjoLG13ai!K=C804d(+OUIF>8E=2mn@FbqNOgS5QWqi=BFbVsb<{8eELrcGUk0^^w*h#(H_gk zqqMZ%V7Z~R;%&d;C(F&YwkAqEgmy+|zGRGG$*{M`(YPuX+EJ7o^1)@2zD;v%M`c(h z5t^pP#HHc>q()-W{?Dd#F-r~?56_FGJs6PeKR^~D%JAYuxcfDp#HMg`#iQ=NH-ec) zU1kHDnlzLW6}>9GEH;7Q#`}fwau@kBbo<9Qg5XbK2sTZ6=pc)m0%*}Xi!$wu-ESf) z6Kx-Dl?hF`*7;V2d@)<Ro5kz`)CQNr?oXr6*@qr8h%2Am40Emnp?dw$*rfo$Z>K74qY*pduD-jGB+AH?kdT z{PbVf7(B46Z0&9so-sq3+{1{qhP3^n|GUn!=`lW9{Wx%Ty{9`^j#NBq9X?r-E}2fu2M*9@LuGvG!PctD+sR#lISKvJs|PHzFU1C0eMm%K_+0y#fU{kh@Z<pEnc;LQNQZIsaC_Z$*l6t1zff#;@<@!1P#K zY(69W?_>jEWU=b;C9RH~zny#3l-r1saK_$N@-^`oJVsbpGu@qP%hWI8+NIudMd*yR zGdy>(Tut~dZT7U%Ey#wjhXAV+DjeBfVOl5_nLldHdyk2pOrcEYjaJv$flHxh{!qO9 zLHe;GkWaaZwa%Fu#5Az;l$7Kdly#0M3KT^~ux-#o+$xuXo@*Hs-4&IilahZY_(M3O z4AQAuT?H(KeoU?>yn@G1UE}&of#VU=Gn1i}fViFA90=ZL-6~$D`@^RHuo029lSE*$ zZX!i@q<>cEV}?m+zDo_RYK+P@^v2=8eERXPEYi~K$V3^(8v9xN&jcO=9ZA&|rskn5 zdVFX9PMnXv?IVKER7HRXJ-Ir!!O^0>EZG7g1Sq5Y7c>*j|D}(LT(pEdeyr^rn0f%Z z|L)<;*8$$^-?h~FOFJTBWp`LuHR{YlU1lS~XXJy$zr-kARLL^C&7VQ(?27&nonGL_ z-7cDf(mr0S?bKVT4fS|%hl-fIYWOczo+WTRQfTw7(|X%@ zuBx%TW1~|V9K~Cx&@A(+&>zOV%`x186ggZHL*$y4TeYBNK=_yH!fX(X*EHXxtA1vA z*O0ryeL8)l)qF$V2;UbgDg1+1{fGjU3m2u!bd(xOOshxSw}Wj>apm!QL2fwwWVTN4 z*c8m5v&dm%G4$AV*I$Ksz}ZEfl3*bIu{QBKO!dOwDW)O&kye+Do2|M3kA3~G7a#&y zHB7h;_naxz=dO_VZ5$3qH8uFi0CE0^EFuA~(!vSfsGs|*ZD%dv3PqsrY_6r~>P{gi zWiSlEHoyoFTI!2Wzt2kW^!$APe~Izub|JJR7?8^|?Q(e8NlJtzOgRoSpIQHtSpM_B z_9ktC^H_WIuBp~v`aClQ230(r$KE+jPs?-%qfOccS!bQmIm*S#fB8s}i%A%Y0x~On zS`lV1>k#YFAK)*9a=atjLwp~UXwKh~xq`%4+i;Xv3tH~9#h z7AD?+K=4H=<*r&*V@eBDekGEL6d!to`^(wC+`(Mt%8V~`07iSV#l11Z8igJo|L+X` zV%hKD|Ht3No;@D9ypS-zTJ*jiO7z-RKd&MynZgN+WZJ)sX(H^OuVX>%PaFQ- zYayfJv3-xVg`3tGsK^)P{Gyj+%)0c-yGlX$a~+irxh~Xu>F<^jsX2!s{pKf%40BGJ z7EDcX7{!#@u;#_aUzyz=94cCBoX$NQm0iP;E#=+M82yO&FO5Kp_Z$Xy?hy#FXlzZP zm^-;xxB{Vt{EoaMxs-paUjaz2CXAViVg>xga(#fTFOmWSUm{?klum61Q6pnftm#fT z3jh9{3Ga%G(F;^9hEmT_f7Ie{3rQP;e4nu~JxBU4cl?!(eQxX6aDAagy~9jF?N#f3 z0@ATEr>AR+<8ZiT*R$KlD5+^+YZdWVO1Y*laPx{@(-hh|0J(qFe^xb)=GLkNa_6fI zhkZvLNCuY4vf3YH>qb!V)}6yw)iEH&HnfYwFRkI#`j%?c7{c{fMkk%*gbNSJfd>&O z#`6QzOAn_}=|^5r)l)7k%>(Lv2TE38oZDob`&@g}<=iQ(+*G87_zj4~T{8pjx-*%! zxxQt3y&l=Or_-5c-lk*m0sE~t`7FRu`C+c!D_FlDy)?JW@m8@hUb71f=^swj7!Rtm ztNqCv{%XwMGRWldre~oQZrAypRya#*^Lt}re#s!UoX_6J&4|EU4FbwFr3cE0~x@`3zNu&mlA6Iqj27=8RLa>1s(t7fp?v)j%iP-xUB$j(* z)+8MV4%Wf;Ck|RCFm%b5fa6_()%n-+Gw%p>t7<9ZK?0-OK9wJ8WwNiBh+V+@%6Hf1 zy#*iVB$*@9#eTFuGWfvw8LWX}UlS1Nr?5>s*6)Z7B~9^MbLXs+j9E*W*!f0-gV9w) z8|db#^P~ipBS1dSzoZ3AJW2xFe5+YFVstP7)T?PBgXUW@ZUTOSiA-S(c@#^5TF#_O zUpQW!L~9h@jlz_#Ey)U1qfu4c2bn)4;mX{y6r$EcYLf*}NhF*KsZ@sx_!u0_+Gxg+ zphTQZeB<@gNg$*5w5;pyvvy5_&IJov)iHC_aKz!j_{J;?DHU>}XLVilLXu;MVzt7m zr@Wn4$O^JNHOD?cO5}} zM#elNw8nO0Ai_2uq+d%{RYBX_H}q2rT?mC)yT8ng=~|xS(JB{`?`a;xGW31f zFQoOvaj)PecssW6R{blrpGZC{Bu&Lp0EXbS1a>)T9aM%pzKI~=3Tu~ zM?g()HpskMTbR7}6O=?~iJewT2}fd+0vK}E)qBu>CkuGft0|+t7j?d69(5Ut_mZws zGNnA$|0$Y%Cp?7HpfVT{LsvshC46}gUodPje<3Q&R{61Z!@5OS%ug$T;IUb)MR3SoH@;vgQED zMi@%@a9<7hOi-l#dRZs-xi;|zLlz&Fp^LI)H5@)xTO9-Y6p(8WWibD7*1|VfF->8( zD}~1v({F<(y1zuVm1y{Wo_~En%NXEOY#Hv6s(RI~RV3(#`d-x98b4l%lV-dUEgN{>!rPK?aN8JGwsh8$$s9j*Fdj$ zo3GaKn2bEht%zYq`A%%jUAj}-l`Rp7VVd3@^ejL=UIih^BJU^`KE+-;iJ^eIK>wJi z`$GXsX-#l>6T?Ej#L<}x6`Si~Xj5_{gx)H|b~)_uCVd9C zR($&g+8sHEY$ZTUNLf3LAGT$cXHUC4aMKhkXljL>Tq;>vnXmfH&~1Mst^Hyv$18hi zriz|S$%J2 z8k9OJnWKsv1E*~@Sjj>%Q$4UOQ;q@BRA{NGR{=bS9C*$K#goR_K9*frUVg&0>VBc{ zK?@Cin{QIN(@lJG;Tye{f%%Tl4yi`TC5W4c{6sr!13G@1KB{spQ=O8~ZBx(Tg%{^^ z-N_BX7q-&xvs#r)!+(AVGAcJ17)X7ddYhd!w^UvsgE(}F=XyzI_cC{B|8P^k?igEc zj;65DmisVp()9=rOYsZm-D3o4H`!fC)463XjhVXJh) z5s&ai+rH(Y+hKy1`C|sz8u4h}m5Nby*IGE}Q)}{Dm6wlz$st>Gx%L;zu*|~LQUlDu zO7KFSGB!G3V@mAOF3|`yBX9$q;xyj-&7=856D*eH#VT}Y_PzNP-qNaL%o{#e7l&n9 zd3@-*-dwW-SH?M^HrK;xy;B!xsFt#H_(me84qU4IZRplNqZiQ z2BeFq77}5v#HNPh*O3x>JJO(Hmw? zB-Wt&RLjDzsX!}j{_3aR4&f9Zx$Ci^u{nZL9cfG1@nPwhIE!%~&>KAAi~YWR50W0k zAuU4(7VV7C<&1G~Fn0$ucFR+2eBW+F9*OXtRNGt^9*GX5YHVsX3ijgALu}YIm3-I)<@b0g6>-%< zaBJX!$H6wWLpb_ObIAX+t?RQEbDwXD1}#PhY)FS8Y%C}3tA;%#3q+lUeq;mdM-Zu`r%AkWzV^v=(|B*t=t{kz}) z_}Gh}U-7ue4hyP(CR4x51JHLMpgAmLE$YnuN19tY)oS;o$bOuOaho;$1@_xVnb{2% zl3HLxDx(6vLkH`5?F48Uxqh?nos*9#+ecsUq_ughHNGRoHiD4b5X@jl5nqSW7ob4o68YkyHx zFE_YKx?Xp3$C;LY>?H*yktfPcB&=%@g}A%(FP|RBK{xe9O5= zu0oIss+E{mjJo%tu@Y5mVI4UxX9d)N=kbIY%LXCOS+d~qp8nRdSM^A{0dZMWHr`F_ z+M;mqDk>0=@14O+Dw$6gJ_j>(p+$u6vqW8eS$>Wm`~!J@ac-k-y|(MqNUm6S!DxuJ ze&#l@wsFUScEW6k6C=LHjP_V!>sH^=ui6^1q}bsdXT_}w$&Ik&$Fe8@L&5?c|GI5f z(RDv?&Fj0IF~zC%|A3(){Ut$b)U)0kS8)3&!bvML!e&ODJxx^LEZ2jdwkYC!Vsn0; zW~7R`R^J;@QZqImh-zZVobl)Fns|w zSv84^_8d-L9o*pPhJc8~nbE(!*-B`31Y4#1wa#(qJ$vAheFplf5+X%KIV-Ww!`~ZX zYDT`(H}n|%op$2`BUb7aOmAGd;&T9Iq=yDZNjc4FLhV|JV0p-UZC{BgFF&})ZuY~T zYs)V`$g(_OZ_?X1p=H+Un;TX4a?XJfB5tp(`MAh)q;^4H&&3FkZfTTU;csqHC-Aw= zk5b&`zqYR}3kZsKOT^j23P()`d(Ylg_#@%)So&$ zzuRf~)Np}%M)GHc4lD`(19=F(4|+ZH<@RZ=_0m9{kwl8>$p~u7Mg{e9Io*;s`}Kw; z>K9^z!<{?lSKOA2&l%j# zXvb89t8bS;)6cCSt<`xg>*oG;5v%S9?rL>vfuN^G(zzP={j={Q*_l4a2zz?K^ccNH znG&e6jp88PgZI=c3>L$^>K@#NHxl%Ko~^Zf<{@4>26i%qJRK$P_KpD8l_Hp#q9vFw z{vWGg{TC>}F)$Yi7^Y6TfM*3D6~^e*=1=E6P{BY=5WFjxj(;90n;)1&{mAMGhDmEG)iYB? zdb9VZI24dT&($k@X$9zYeUO0bLWZ6TNHyBx8RfIR{7g zswcVkDLycWyeRu6O@RiCSnQ641}|q>A0(Vuo#P;0MJzWUF@0vhfv@QF-$m)nI+Xsq z6~4?^E1C8f`>YKjgW+ciYtrLqKQ)!v_C{l7nOU9$zuA{yVQ=f=oF``UmHPskhz#VM zH^H+@G37y9!#$@|>Yljv`yDWS2rME?92D~*3>EvBN+ix+rsqIm%;FQXHw%D?6W*?f zhuwwxTl+SG8Dm;4}wkulc$<=h~$UQyI=NwN@iqX-w4 z;g^Vhg?Unor&#aV=ba)6=iUgInjkm2hB%CC3B&RUCp;b^vzrKbS1yq$Oj=~(Vx@M{ zFONvN&Oau*;bG+PAbl$1eBg3%`DK&pdQ@h$o;>@NB!i`Cei@5m=9Z@nbtT*+J~n_2#PSCtX0gJB67n2o$N za2&h=J5{V?Oso6&lIvuv)9~x}-Jc>dOI!rX;pzRA6oSF-z(aiJKPLA5Nd&Ev&3Ze~%fN+!kHD&3eS2N{mfiwBb8L7|vAtfT&(mw+lh zAWH~m-Lx*iT4L%=scW(KBJz~f$*pgYXL?jDce+nMUBmi)N0P#-z3Rn-;6r-t2%w)G z_GER3%c$?tOdl$|o=&d@+kEJ0cIU%wHDCe*tLsrpj`{Y(Z&>?AjdP zjKOSO(rGBt2@rCIhAO2ur=6@aBf1aN1omTrVS(xaFrL@5{zo3W?-N;h7!NEcTEjB5 zefIf_C_rc;^mk@#a$inGGE>}`Iqf*(Akn0(_(m9zo^GAC!_9{{zUGGJfUnCkqF9-= z*M)^nAD9I0b2DI7V)S!cpFhoS+}o)Asw{h*1~aENU+Pf$^@0Su*@>6xe_L7TU)r5i zQg-xDR!+a`(*_16_=ld_4zKN$MN*ofj;4>iFSpPjalKCW%jxzTIhLS;E?y)XYx!S- z!TM;W%~cG8g%As^2ANDKZC5NP0c{ISL$06Q*UuIi!Wl^vf5tg|!&jx0l-3 zGlM0~npfkj!IYq0XmjD>A?xb9$Aafy;bV&zQcuI~-XxWcNyaTbql1)}QX;5{J(V2c z^|^4%91lM_Dv?n@IT@!5{osm~SgLv&hwj&mViL&g`UnJ+pqn;eA16*xN9oIfp?310A zHsoF{!E;-U;>#Ne%M^~!fHPdBd!_Zp&>QLJ#%_pSS=(h`@|bFsE)@yRjp9XmNT=}ya!>TDmL@N#~P{YGg#i0WO$xz2#1^pD3a zsc+DyrYofr#g_zOKOM_WPG6i;1@(s1|1tK}aaCZ*IKY;WN3yq2+k8FQ4Je+`2LUxe#XJGLJSzO`b<;~IRV&%Yg-3;&ku0Tm4{Xnh!zpd@p$7qN`BUta&bnb98 zQv*@k_Q>(+Xnr=M2mQ5fzFla8*T-?L`-hNaDAVGg<2=xJGgV_;o2OT~u*_IoQToso zXl_|LR5J=yN$Lh#v4q^K<}&&g2ejvUMZivP#op5(j1*LpCw13FXvjqh zfE2iLRV@FRW0NyDyoC%OpuY_e#p*O9ptC9A=dJKlHUhTXfVlo#4BBXh{NeBU5Cg%B zjeU0rzWSQ?#}$ATo(jU@{3n??j$^t*EFDcrKdh zl!~2{AZ#N!?E4){EIrbyZ9y|p|Nhk26a;H50HG?4kwxa03-niJpv@?AKu<RinYLs^o=`6>~VId^S znmU8nBnZCXPBG9Zwo;Q)`YF~4qHc3y`W!hnr|sZKLxTvITfW@ep4+u9MP?j0UB6ue z+Y%Esz!g>{aT0?3~PVwT$5D*Iu>+(3xMG&cyBCoRXRP{LmRw1YS9S+kVA=rA+=3 zbZ8N0bp{X!L_MwFg&MX ziLvuF7Je6MC?9>M3gfl;2+(88e+)lN_&o05DmHR6y5e)EJG+&L?Qz~5(m^W!wmCa_W7gzBW~WV!eCP*p)CJrZGpX3wZs71D+r>bXHJ~ z;>)6{;^~6Ip6y0$LODw=#_cyerDT8~Q~Mk&m$0FZ;77CGtF=g=Vx4bgbK9+d*s9~J zfn(#Ja%XCTHNgHkU)b{vXMy_nvqtMxZN+THOkWZSq0iQohuu8z`4tGAz4`eJ)SZP8 z-QZRb_d@CNR-BhoB8Rh!ja(bbdK?!{e4JzZ6GXma_mw9{`p4pNTWegQRb5^sIU9*I zUwMB3s>o6q&O~#C>FrdN2CPC`Gk}cFYh=S4qMNY7t+e@v)9#lQNSP<1aY4ohnq$XHgf!gT}$Q~E1o^FUx<5`mc9|tWz%C0<$-45#47sQ98j5Lu|N<;G*#&*2Tw`dVxqP$CVXDeG)#W2E;69Rg!7#`Oqia za!X}N67wWnAq~$jQ*8i`I?&}9JOI5k)Yz>r>XuQF9A`KM5B^{0I2I(9E4Ln~Q|;ryCZkh1hbc<~W-rHS$!+Ak9K|2E!iC*>@fE z=c;@C9v!z%^rTzTdb-7`{s`MSo27;}o%c1u$vNPWN;0Vm9B`Sq=}m+URo>G;xM}~G zuT?`7AW1UTq$gKk^=!|Nwdz!kj06UVs$7|i{)iy7o96w-P1@~)fXozymaYua*Q;++ zT@oo=HZ^ve+gT1fNA4WsRm5#e5}(KAYW7g$BzE3Iz3gU5B330GD2g!3={JXeS`3(d z;_)y9&nZ!@_gM39F#G4o_=W?pa29lX)yN3-f z$iaT@{KlWf4Lq@hd+D-8LT%&xx;~&4&&i=%o|G&|$U{2vIP2zIRh{-m^v3ZUq=n?< z6Dr4M*C*SBV3ug1zAd-mruu2)qPmVR<*L`JcJilBGL15wWsaw%B`zxZ7KeQxS7^yrkm4xasG{8G~krtMt{H--vgJ^?lZvt-^azh`WJ`X->< zh3WrVW)-|-wmu053YrTO);syx&*%^xWAcDaq2TP}6x$snN_2$%D~!}$JSS1r0wHg4 zZ8=Ig4z5gfAH0)G&&NS(xY*V*TCL4w!)-xm6pWkGObdx;HVq{U{wmMc3$6P{cfZ{@3&PSOE)^R?0}B$cJg3Vz$6j`lK2v`EH0)ObOk+xkwF z^8JmcF)(|&i79{TCIae*e5JjKq;N0GtP$;hhsbYYk-|egIETH@E4CuwOc#swvm89R zfZ{LXW`s@t29AP=vp1Y)M+C?7_Y#`f0hj&G)b@s1g>)u*Z7t4+I;W#%yZK%pv4Xa$ ztYVH4xp6mk2ea-Mv%{e^<+JAZq^+J1*y;{?FD04<`SPs2RcCi|}L^gLT2NyCi z?sblNwOGwo8a9Hppk4#xrx{hX6Tq|q9JtwX!5BiUPi8-wdp7HpcRB=w6tGG_o8Z50 zyzCe6p;}yBZKq_t!?EbCP2NAEc}(#iXi0N*u>-OrmhZAi=CB-iKv_T|ks{wS8v!-A zfeyY(D8*qjW5B~2n{qd+8LQ6q3DL=XT`Bww3YdGH$c{f{Ct~MY= zjf1DE*n)IQ1Y6T+oy!0Y8dUC)84)RoEpSX3zy3A_%x>vFD~aLPc9&PGUjk&_Z}%3% zhe#1}6 ze|YQ0;}EmfvF+Pz^Tmm%N&`&4_OGpwv53Y$=vLqi7C^ZvkdsWF=2C}$#(5!W-NPxc zyi95uGk~OcpW(XFu&C~}N-_`{ANhE#WyLGDl}B1mm;-WlJugx5Sef=o)eHX|3j-fA zdy}lp7fLeipMbh2cwyUl*s=~`Xbxs~82lN`*;g2QWp+N6;EbXJXS90*?o~tNm;!)&ke;C-PWEBX9 zyDlP}@TnJ$k_$a;q~)U`qX2&<1J#f`-HfL&?V4R|dJ4CV%6fF<_nVPs+nBbDIahKE zJ_2YzlfZxxv7v-1EH3XNYJz_DD(<60wdFe4C4R!ejezYSvNOlO19}bg7)xd=z)qg< z+z_l|ogxWxM95_H3P@da16S1miMJ|#54Wic2R(t$)&@buENUnDQ6nhUCs$IFvWl}s z)8cj-Bb9h=R)+h&W516Z;NS22VYT-{MSL2t>XrlDc^MkplSbgCX*;k?TsI|hq`&w3OVrCf6uZw7<48;i%n))|o241wH_=Y~8BwgsWFGK3bqgWqJGDwY_*5wfws z;$yv;DtEyyTs;Js1*5@-Y28idhZUt9z@!VX&37t?2UZ%SB6U?lbmN}2O&%^ep0?;M zcw>l}0*ILmj@ID%h=JHT+x^Vv!4G2!bdI+@^&NG+ng?M98vIE#A5#3{{DnMN#!Pis zr576k%9IRfj0T+-&Hsna(OtMC+8^?;>%h3ie~PZftnc2Ucy@LscdKcFE26}_od^4Z z+Nbhh;1X}+FG}Sf6ax>=|h z(@AFw5;z|r3DG7C?~_I4e(2NP+*1C|D|BU`P0Vq2B#V+clxRz=cEsP<`!MxhC>ap| z=*>BCYV`N!Aq#@TGY4mWVRh1XKH>-x9ClKd&(6#O*@q7om>u&Fd^#P?)}A^+Ysg%5 z_BNu&c82Sw>Sh5YA$O&7=W_kCY~yR21OBDfX7J;?6Kz;)4`vDA7~iT{xNuhnMqVXE`f25ZoS5kY z%1c}~b6g$g*b=0*>or*sCJ+|=!svc_?eaZ-xY;Oh`L=`!=A>EyDa_hQ8c~;7#o@f> z1#sfCfLC}nG_PHN3W$}W&r{PK>-R}CyDL31bDJG2K<4E}=%`O#4NgkChcnqQ^s%YX z@K?^3LzfdeY+Zcz-fQz6oD3eOuE|K?<(%Vr*WlF-YnSGZWZWs$##Zn=&_014Z7r0o zbWMMe0!HZ{L{r$EYjM2`W2Dc|{vr>*-wBjv%`as7+=j-kikcACk@?9%u(x%e{>pZVK!NP=uQB^;M z9yhHWCRFHSo0rHX*%0*DT*QS8WgwZ;XgdRLbGGz_S6YP)mL{*JjqT=ECZ@+TVFH)$ zghUmOtK&|5vSdZW0(KPs=KuLwg3&+!eU3tY!%)Yhlp=?Jl5>OOxpEa95^_OS8(?J! zUC~=lJNNaj#x3kpX&(N(W9iJ$`%GnT<8;hN=qWIHs&sLwV|^maXzg^@`&49Ys^sYI z>N3MnoS#`^akY-fAwz*f!Go9ryU>drf7#kEKo>ONB_kGC4Y4YtR(T-3sMfz6bQEka zbtgfYg(e_OG4Jbla5fw@Awsbv^EMWI%|vQQKcKH(5bv~%akoNc3J+QdsG5Yp@Dv+e zs>zRdsdNDP39Z^W|3k{=I7Q=X zoD2ora=a>>Hff6Llvx|CcLHg0m(NI_a&db&Zu{NP+;;%lDDX5u^l2P;yU`s5L4sbM zvhgQkC)pLe?#BqVG%P~apk-n#HGJ;u2f_@Nun24q<)u9!-yZ-Yg*(zEY>r^ED0SJR z{Xiy)+rJgKN7o%@UDLN!C^jEOH_Q-v%^3fD`|=Eb7-65MpR9rlI$1Q?Ai!z4+yKKD zWXwZMrwG$b+5NTGepUZ&5KNa zaRtAMeP>#WGz~2!w&vLkqEnuf=+d!Bu~>DhaffvO600$;fAVjt<=^BjUu`6{W-pnm z&F0GrCJmHMzB8yQM(3 zV-J&M3T&#Fba{hrZ4x2*UyOw|ChZkxnI?V}m`k z)aQcH;oCcO+pLvEU~bQ{p}#stI1f-n+HFNtvbNOIdg4CBVkGX?)K)3MscHfoZRK*o zLo)0%G}^HXWC+tyIpC8>pq1}zb$xF0)XO#`;Y#Wxfrf_Lz31|w32igO92THFAI-0~ zCGSp0y<*E}oln2TPyt=!+}M6tmVo9k<-Qp_=F^VJzp2V%K$*Ka7?g@Em}4-*P!pCF znL1jG$nV!FiG3vozm5Sb=3W%+57+LRHe~l30EXnsZ~a#F-J0Mu5Zr9mKUI(l!CWrV z+b-#^L}|wLIOV%*7sSRD^kt8%j@3A>uEMph1Q63X9O!{^Ob3tbnrW^+?=o#6Jbz=Q zBfS{qdK@+cln+-_?EHWmGJ$-p!(g*3;8l-2;cXH}cG)h+Y)abU8EnkGF7;ojx;Nn8 z1&BXKyg-t|Hr#<0Ipb#U6>M^0{RFh+%MBGo;}+{-F4^mcbI=X5D(Ox?uo8b+w4VVm z5$EMav!m35%d2X!Fcn2fqa_wZ$Ck}ByrPMAIICZs`k?^R(+xR~dTSURYtILjkE&M$ zl?PbzvMC%4SR{^yS((%h)i_AJS>xXOuw$PzOjvf=s{&?+PCl3JURw!#G={7j1m&fK zW+6fsewk)j+$AbV^Bc(T4NX~9);1u+gs=taX}@?ENxMs2v*e9{KM7^4eyUf~(t1L! z{$48pBRt@7@ux&E-inyUVi(TlU3mj|O49OKK{I~(Q%|2Pey-GQVILcH47Px`0L|Ry zJa;wBuerh-_-qKABqBW$k)GjOJBYLE8dpCP&4rN%Bq{FMiqMfRNcXstvaN2pdM$sl zN@LeWugYp0&(K7N@pG^B4VEXE>%s3gr_5X}I)Jz~p7G-Oa4JD?He`AXbMzOZsgq7dAH?ygXj+Ogj%&tK>kre2&uNnD99UUj7 zX*zN=->!#yC!jJ|IZjIN7U+i+tHZ-`#L-+bT@t``w{}~sTy8HLN^S@{ge@UVU~W|u;np*IgwTYyThm@M7*G&`YT6~hLFwF{xUEX#d=K!YIo}}AGACP zmiCY}+c%O|(lCSk7`78G59DSNqM1sBy{aUXw^g3uE`7^uEk7Ac%SbfqpY zbCu9NMwh|$W1PFFk}b3Nad~R$uWP+g{YlHE{@IuS?SkUV)p&|)Lqz`p({_VuDWEmi zCHLU30d{VomdY~$DN3!Yy=s>R8TZs8JAN}yEj)-%l9UDuVMRh;oE>+sUT`H^kto(Kpu]SlX(+ zz=?yP#=Xi7Xf@3`y}@n1=rI}M>>o@%;dBBY$sRXBs9T=cdSAAsE5(M2^2&<-;#n6J4%#HT|PYH`g~*1V@VzWY$q zCrvi5qDtZwBZ`UA&k*JBs5n+#{|vvhXEEq#KzGqPRF0L}0cruX)DCXFLPCz^@?Gv> zJTY&zL}bnOS>l6f89Tt9GQFwTOM{|~SGBB?0}K#5<_4T06(SWl@;pA9O2LDT@2Aqg zqYL5k5UBD?R_2i_v~xY{L4mt|6HOBoq>{bN=q*HeuPkf$m^2}lF;H*Z9#h1Z*(5B)Y4?C7W8m~l#r z4v~I~EaDzXHHPx`3lh5On{S2W_tPb9eI<9BKQ4H9jIfpR?Z}%%&%fh3z7&E9>^8q! z6S}=MHQ}7A6iUrady6I+q{@&;%&OSG_7Ywpo>J?u{@XOpe%v{Xi1DdrZP4m40S--k z9!1^gQ>uZ|%iB*Cpe&Ac0{r5)nO^!SSY_@T;ai#!8v>X@m7-(0Qq+?Ns!{AV1>ZGh z-FSU!%*#ysV@r+g&me2)M*`zDpqXc5HtSxKBd@My9Z46IF=ZsvK${9Su@6;4@i14X zF`d4ndt96^X>$xEd2ECm86EJ6rZHR#k<5Hi@!2)+Dy_wHjtj13Y@}D})FBupQ#)r% zkhR+REb*`^?%p)Yb~gvn3nz4CHj^>^`LB~k&~VX#2TzYT)z4MjGM9rROWvs&lz6>= z>c&=Ay+2)Z+96_oE1@vzk=GfG=hIV?9K(gz!j#J(LTf9C|9HkU`$qTNewO~+88#AG zV8QdIT4z(My%_r0Fx0AW(#nmckfm=%x<*5CXE00uRf3=-w!;$1ERqeCbvUZZ~aC( zum6FaQ4z8^#H}V`FR0OpIoyucGqN=U^#VJ_%UlX>&LrYnl6R8sl2|tu#Upnf5o5L^ z=L|gFzS3QpO4i3D=z5U{g#%2u0oeIN>M+zZE#h#kvaYaA668g6IiAb#7L#!jO+l03|)zI>M62>UkEKYl@K ziIU;$a}<*TJ8^d}d`<#mTzyxJ7x&%YV0z3LG$JaVU`DVn5U64YR*LSimfsp*Zl^GlANA zwzm7s_xRv7Oh4XXeR9D3n>*9if}H})Q8)VeYoZvzq^bmn2D^5#zkYmfBJof2%m438 z@Bd6e{|%{63Yfoa-!Qv{dj~lK=e^Bc{G?cAQE*5!kH?xsgQ_w}vD+&CncnVMA9v4N zOfgZUFau2XX_6HihlMJXSv#BXrslkG?Rp2529-_n;sL?U)7);xcUcsVthd)opxUzd z#`q6{&42suo#n1QhvbX!!cb*%AJwKOhdfmy2KS&i7G8GHd1 z0%yd;fP^_(8Da$TsQKID4_2t^jO**fQQp-q+7g^i7IGEJq)cNimfT}G_}(k zMc3L1q^21Dc%zX&i3&-%A5A#x6El=sJEK}Wz9@8CyyrK%#-_9J!7NQmkL<@4;TpVg ztIt$f7FH3cMsjF2tLA6W(sDWYtM!$VYbyjM2@3`F`S}mX`bSNF=?@?AOr4OOdWdxY zh0-0E1rN(Lk`_GqE;d&G`phPe8l+rxf0V~&u9+s~)n%f=16weRm^YYlZ;kl=-iyJd zLJ6_$Mn;nLv69mFu}G{<-ehhsB&76oy7MM7+qp2)xy5Du41$=*qPg&$<0`p^pN?}c zviZ#U@O>DlEtPH*a_`+aybL8%8$ACcYjqL%mZOKah=DMrQ*YQ;Qh=!eui)bJ0}z#2 zcR{E@(#A}T_n1iR>V1weCZpbBUdSZI#XpZ zhrpz{boi-bRmtS3>5l@IinBfR!yaeQY*Ct8IDQd@5s^1}{ygB#!|gpV9e!TByBO)f z#TXCyvg5#B?xniZ?D|H$Bx#+KJf~e9lfhzOqK+6`5Ol#qtXSm>ktqw5Z$58^dX{76 zc4Rx4`P94Z#_sosRpP~%D}(@7hwf0;>Bpl!RU=&_ciCBtL=CVER^>7x|MPJCcRczH zX`kkY3iDQw&TaJ4rXUAow!l#au2$&aS(0XnxN=hP$@?2t{v17ht-II_+?Z*v)x0cM z^546e+6j&JzucLz*0?J%isDMlJYEBwfsULc`5nJE;OZXZr~JjLM`CBStUR7MGAJoA zG>ZLfl%iRdleJcN6&4meEun-0dOqRsT{eZF!1e})r}SDQTkHyU$RL}AX6WM|htTWV zk`edToXPELsb2pi?bF7_|zvc97vKiSMb^>yu#|~VNXHB8LGC{^$fg{G-eR7espUh{+(vRJxQ;;V4 z>$?Pbx{2ObsU932Jp)JNv`tpD7h95$AO8pFAwz@G7Az^W4pNbW!B&6V~ z)hWRF_K|sK0$`A~TcnHglapmZlVi$e!}_mdiu7d>$xV0$o-|&YHzcDqL1EZ2f{pBS zs(QSt7Id1QWrB`_-(RySbWmVJMkHs?byklSm+m(*EvL}=z?)L5RK{0|^bSt*7T)Ss zm}8q4`%Pv^3J^|zO|w>^@|nZNT=i`B3iE7mYmnKKEQRI)U_Ad(eq?Esj4-S}#nOI# z44cOtnw8vQCx2kt&v4EP<h*ZIu*M=a_k*Z?b3D|pcJEsghEN~h}7i|&I6P8ma`-G3j&RfuF;#U;KGq~rKrvpFY7lU7-^eQ2xG)c7Gj z(Gx%x`+PKvfgF;W&-HK#7hT^}wM#Zuhc2?Kw9Ii1Tt(^x@H8GCX%jY04vGyRsR~?3YUtFQ6)N0f|3Cg7+Yz_XGJH`GJe}p~JTyY-+ zmx}#{LQJy!Dg0wukXqc>3=Gj39jx4(}^D> zs@khU$>1OWH>uwxJaM&+V-5;*Z(w@*Ofy|72=#r{aZ=5P;LY-zRg+O?ALs2WM+S>T z8aeTL$4mRH4c=i(l|vTHd#};i%q$xq_>R%f)xwg3EXmlirL@pZ=)M!VuM%VbRt-2s*bWp?HwcZtJ|)d#u3vLUvQkt7(eJ$9997E%YP;HO1(zSitb)h-_C;NAi|mc>vtpGC9_|z*Xh1gS@W9hu?mmv&O0UGF z;agn-)Am(8>X0NsC*J4ZGw(EfWIjsRDYwd#q3ew=l9AJUc^KnZ2wt$};NEnA^*L23 zPOS&t)oz+oc{1VR{61x8tnB@^q!RVV5ow*%x=_8|kZqwas)7^fYMSTYv7b4^(qZu3 zoI7+93835~bp_2zcf+kd7I2%16;hZ7BXNW?hDV*U*La!9|otJa*Pv?w;PrKQaHXY5*G9JpC@hflO z73~~avQYfr3#tAKl-(!)k^dXupw8w7E=r0D`QQzo^MUpfptKj=2jiI&l_l1l(CFQrmMf%+Z4j&jB#KQAtid9%>J9sTrEXl=Z{IxYWhR0D~%7~ zJ{|m%2dD$op+Vvn)C6h|eN{h=)&z{#UjI;J=uX;f#0Z2F4}%v5;ht8_ejE=ZVEQqL zFUiXx%w;;_c_^U?G6$x4zEH@~7U%m09CE@yqCkg`5|vr0q=RDZoJ!QH4AxQ8QjO7z zu~Jq1*@$4&3tsvSN}!-{OrWw9HgPLn4nEjrRjB7z^HO`jDV=zdqQaFlMA5(u(- z{Dn!pc9r|Gex8T~AL21va1?z|syLLbWF|CPlkePFZ=s}&5x$`bK4dS;OD_Wun0Vrt z4hJu$HV55BU6U4YFKH4@t()$vv!W`GTu0mI8>b*c-+;{K#S6EqlMF%o!v%(wsXSUF zaXyU)SnqGFKFbA*V>V!FU4Fi*^U!^ux__I?ohpL~&X&1W;8SbVk5^4h`hF(b7eVC} zQi?zR#3f~sn(1a{6zX_A_$B;xSgG7H0)6d{D zEVV)r`O#kF2M*blp(>)+-#G&=+6yLS3f8>X6~$;VSsv)H$=~qT$O~E?r1xyMvP*vF zar|KQhDc2Ec=rr?myqdr3!h>S090M}Ur|?64lA8vgP?|6;njI6d-)$ENui29f zi(&;T@5g5@AW~R#)r=e@`!413u5*s_6N|b+kK?eh4A;z;{>nsHB z{yGD^n08KRB>KpNmyiv^rl+YPT|sJ}0*9+MA`Y9tVCNRkh2qmro7U8#`Lv!XmG@(t z=4Uz9=lEGh&|4yvwk=KW4ZGC9jwAJA@?45Js05^8J}P&zig*`P<5%5MIRadO#Q!ub z@{u52u?jhf<@mMg&|x0GPDO%Qgc2P&??dbPhNE8E^TigijAAqr(eTTx{fL*hin{|a z@w;6YZeMp5VyhC~+4oA)BxSw}VO;8S_+ev+Io_<-?2mnaQaWt7bH^p=SmV>EpOI@+ zi`MOpo=05e@o63Ew|}|A@IOKQ0~5-TM^?mi@eqwxoEP|v6k_w}!C^MP1H}hnKm6~` zDnv6EZo7UjAq5nlkrEF3GVA-L0+O?{-^5-i@_L}5$;Ne2g97GsJUjE>!&|J%_;vGg z9819>K;B)oU(XQr=||N1quR!yc%b1)rHq+~T-*wF&pml4WEFHUa_*GI;`W9e`RivG z2Q_Ek@Sz8^f7?=g@3pAB2)t5i$TM6jI^+0trvj1HXB#uhr^S-TP%zoM$^Fo7?VUhg z%cq@S<6BCQeOxW(M==)x1#i;%>)PRT&?I|PI@M?>BA1THPJdOjdD!d>Yw)afD7mUK zZ5hQ=3fgJbenVrf(vS^Q?<3E(1QcLIjr$Jg#*42qdDBo0Q)#AU-65V3V=T-o&#IFr z=T5hVZlxPq89wIwUGJ+T=6&`fj*4z-riX`0y3(mRQ;YiCg|;+1+*!kg!bcya2O8bh z?mj$Z1;2^E5ng;~&BAMc{W{Y^&v(!*TQs}I=5FK;xad^Z-vEKTHQ)B_)8ha=COTvvRzv!WoQW zqRRE~3TqLXK~Iu|r>%VQftwGujDj+qNdTk~E{e#8ieeu9of=VvSNgRYFm4c}VQwqKN{j(lf4Q0$dFPIhXEhR7oVOkcsuW@tpzj)UYpsUnAM*#nL1^3g;mW&fK4 z^;C%!%Oso9LWXCTMFfln*gk}(Sz41PV->HBX1y$-bR>83Z_JnQ7oU!OA6vk<_d-V4 zqkq9X)))U`Ghm|XaPBemFi}rOY zBcWkc@4FX*cQ7?%XD|F^=dM6+LjgFJ#6FcbSzC3X-OWz;c(MDc&JYt1^ADs9r6r1( z(C@dzy?uU+uIhp&3g?+2I`p~3>tF7q8~S1~{KQPG*twnQ(he$1f{%n)k5k3MAi4WV zJ7c6{(aku+io)(-S1pUR(#N*G)C%A}mS;dkAL%pX)fw=rGhOb>g#W!?F{XP6Ad+U! zN(i8p2uI|S#b%16=QV^Xn$o6BjAC~&usJ24)vD2{TMISL-VYFn&CTf9K9A}>X;!m|-`{AI67BY47^fb%GbBgsAz$rlRJ@~+6nV4o?n$Y0!Wqm%LPsLpoPHwdSlqU3L1t<4aP3-7BGYF(??$3DK4 zuFN=&CPU>D{~v_!pCoWA`WXj74uc)Q7ZHIZ{Tx7to8s}rf4=x$+=ooe`Ix;@F@Iqe z5{G_N z)u=laEkpdXlkSt!_Sg9gV>Sj49iCl3R-RR6`ST)f-)}$*&j(PJWJVCRRsPdGj1*qs@%{>XuE$Z_HGo<>g#4tyO3+cHm5)_$fWeE?4U=iievqEzW*)vQdML@k**_XZ^4 z4-Lfisw!Ko7SaQ(KC5QgW8~lgzRv1LlEg@@J6bQ;T&`XM;6097hCR}Swl$F(Z%kHlS_<#QDgDr|G$s+kK{f?zPqFhXvv#Q$` z16XWu>h%BrQ~h?Q741lmM^Tkjizxu$tHMCD(#K68t5PxMLEG97pMSC7|9o-ZGi-7K zJfR(Jfa3lCet^9s;M}*4vavxfVF3iu%v5eqpr_Ph` zp`Fz$l=Oy$F@0|p8&s^Rvn%j}Yr7l_{je z2OxU^%BXMRll9-Ty$uS2>#c5a>xaVrV8GJ@f3XbrzX6}7%rz6%@$*)Uz(KZ|Z)D(T zv4K~BPDwqKW>j4%Xsp5o{wIC)pC=PWdctkT@k*6ck4dZoFM{a2piYORy$soTMg4D= zhreL-oju)^JWgQH#+wIf+SN()TY2rL2Gu`5z~5h%JmLq+vy!w(1kiP=VLWAm1DxM> z16m;E*=!%u&zqN=0%OkWy+XmT*-phbc@^Dg2wh8jMBLEKFSaTVX?B0eqd1giB|!RG8I5~9{7eJwS04(^ zSD_=P@2FTP-a{&ywc5(6vCY{Qxijh;gXxpMUO5#V0dHj&Se5!wL9wKhe3IpD9jU4P zcK4^y`CZT6(E3bHKut;LszJ4Wdg6ax3jaUf>gOa2tm9{`A2TVN6?@-V^J5yDa30b} z)p13KsNMcpx~t*l=}+;dR+vL7;h8Zj_|rLFt6FmX4r_?BnFZWCUCFPBxli zE!v(6>4M&C`de4JS2pLE;2Eyf|NQBHM#?lqj;uLd7MXV3V4``d+<*2g^w0B%ruJha z#d6HJ?>DsIDXN4qlbGhRp|PTskvkTH6!uKF@g}Ky(|(sX-od&>-@Ou`@NG6dTs1Kl)$TOLAPyY zy>@rHE@gQz)M3m-*YtCpcXu7LN{Qi)trs;rGfzJJeSSox>>xE%7WwsReZ6fzz%R=M z(+r7A9(#hyfAwt0p0vMbSg;q>;_gM4wpDdg`+-~U%QVK+zTa2-iu!BuIsiNRQ|`HB zYc5DfQ5jp@>Vzab`S^hYnGKL{&}e4JMT&;T#mrvPUL9d&YMKKzi+})w6p1NjAP0@2 zf;i{8L{}NX9F%5hu{Vo>ZIBHuu3U}dkP zhmJ^zPD|;=W0vR44q};4A4GJEZBTv16>r8VZW*^RP~ZWWFdR(!7kVE*Nm-`7lt z6$b08f3^}^%{9FrNO?gWhzVR9U4i``>opLkf7g@&)JC0jP2Cg9t0vJOPvtLbaS^FJ zsKPn=#)_{|3&BA_cC@*`{AKc7ZaMEHGc;OD@hqHN>CTMd>iNZ`ISQPIb9Ej{ya6Ym zvQO5(KUvc3i1;s`>;=W%^jhg^aBB<4X%wT=hawN-)a?aDe$TVL^xQ^opK1NkSmiJg z=0PQ%Fj3&Wmi5|*KJ`X%M$2Lm(il6$gxq$OBekM&-|MW%9 zsA++l`WFEo$SN-geN}q6dijEH;f%@xdd3N3Rv))-3~?WfGsXINt{uL(+anI_Ti-1D z_t!g^ZXR#R(1a>Gr%bF0R+Ag;L}qRlH0(`#X^fs?#hHsyO?OuHkJ} z-1K%|j0R7gd&XgT<3kHw;K0>gnUDc!U%ZS-4KY(aD{|K))X0kjGs}{Bo+Zi>L(Eb* zj9h7uv^X@k1Deuxy?Syz%lPC6A)CVRFE{d|Ph`G#3dQ@rWNu~we!~N2=H*wqA3J{j_QNnvNT@;IJi2K7p5NK>9I^ zUa;vE`v)i3jZR-0Pk4wWJ9oP>)ZT`S=#PMECe!zg^?u;>ZHm3~C%HVf@;MiuVu$}6 zh%(9XCl3zexDS7gW54BoP-|zlhE^T*S~xUkSKvlZkH~%=Id-uA9a=m@DQYkKpnT`R zJUSgwAqTNtImA0K7nyO(vSnZI00_MvV;0;5V%)O ztecDUh5PQ4fil!iQYLlLZuTqkX%y^S>uX)Ak8)f#T836=iEmQ5VBmr<4)ZtmisRvk z>H^I;q3D;686Y{N-3J&dwCB-?jfDp6;0NQlYQsA1d1p>P#;`NHR{Jb-p<~4Zj7fdT zoYDMBtgjcmww$C6T~wH1`szQCXdM!J9dweFKlU%fKD!iLF5Vgemthq>L!U$J$(?m6&iG>4C`o9`Ao|1NuFDp24bom zik)us2_4O5KRN?WHgF4XddlkD<0{#($E?}Yl6^!GJn0NG(~Ed(MDR#%#?>=b zODhYtVGQ~W6L(mF@QX%6efYJWXzeAcww@3&Fj(qy{>i6u-a?KF$1RcS=s)=4Tje<4ISYm(+J#qBS z*ghxjoQY*+sd*WK8wQ87Q2csT9l-CC~g7thjY3GTNH(>NmIJu01~G>Nvy zLy||5Gi`o3(*a>gWth|!%$vu3{;Ly84$Q0%?0PDIk05zG>6qUQaVJ!owx~rr7aSxu z>F$e_4%-oLX}eJ~YR#Izv>r_tGM?`ns@S{|tiRoBc?-z9>6!4~H$vxZemuU!?_ zmp}(M746;q2$W+4VA+cR3(EMEtwvYL1x%DeMM zP$`!Kci7~H=S9nPg-iOVq5AA^rV43t1<>Lb3Lvv*0GIaGmyxP0*5Q>&>kDjo5lF5K zXS9gZ!fE@U?4L0Fzjr$DabM$2`RtdipXS1qavcUlnM0@%jGp8(B(0I1D8}@=A>_VB zX2#i6VjI%u$;MglfrR2VAgf_#m2!SKr5y2PQ$M~&^vdc`jn!ol36QRD>GU-ufU`PA zQ5V{z*rJkAOrCvtb^YTpK-QDlZ}m&nyTcO8@vY$+FJ%PHmFr5dclzUKT&fgym;hd~ zO~oi=g*AKVWppAI7i}kH3?L2!mJm|6ZtJ~xNGix4o2{}%I;1k}YIA5Z$WXhp7z>E5 zeM26#A!(PqAM<<@woOla!#%vCF{wL4Erf`^jjG1XFlza;X}(pMEy$5d;MH9Z)8(Nk zWmu<0FkvN&>55wBK!nqSMByxQ0jCF$AAl%&f5lsZ<7z)Sue_$Ts?gcxq}vT%V^s#I zwd3ls-ZcoD=cZ~|!w%pKGCsPH5@#Nq|GtP#Ca|D|ZZZsmz-aRo7Or~YpcB>OYn*BH zqsoC3p=qieQuB$|6#uAo|N3o2GRhI(D#A%}b#K$l!Z`}RpWj&K6*E1L5X}||pyejF zwaZAn+v*+(J#ioUGRf7l>VieWeJ{ZAum$jM4{5&15nxfR$i+Ac&JTQk0np(R*%MS!#0;zZ^$>BKpA z?@{PBM`Iz#*&KU61&H1KsfU-@#-FJGzWocM%3_f7!3w0TIo)o{?(Al(+WBSzApItwzI=;r(nnnyV@Wym zMbM^&%o&iY>AkL#&141)EQPSI`Q?*niysuvDiPu5TMDvW;GZ+fph~KL90qZu;bIi0SzwX>1f!bigS=I= z!)Imr2eUU__5kv8V^`B4)UIvp0hz~=YB1bY4Sk%_GoHS^6bRm2a&jV3fSn$37lR#6 zZv-;YV;j?ZiUhDtkj41XS~Ot!QMrED^nI53^8$E;5N&|V2&6X1S=XMw=4WZPO+`-I zz%QT?-8f;U_0vep{sS6{b8t534sF_rNjWYUUIzgcCYIaI*AfJ)vj9P|-9wlR>K%|R?UF|RP=e2XDz?qr3Jx* z-CfcpAt6dgcZ1S3bhm(Sul20wS-bdj0#G`V zE!S=bg@2-M@_@wPX9bwiDU&PR?Uhn2v33)1T|DXVDS1K)Ax@SxT)LmG7oRg=a_p!d zvBV6hsg#B?DPgA=cORbHO)#X%ed&D=1`fd>vJcEV>8dHUCJ@bd26mC=P(30NKvc-3 zg?Jpr`VO%v7NJH}enMUZUtYE|!-8n)Kt}4=VkL2lPEb021>ZA z;b81OFsaO{S5@ep=xXt+WCh#>^+Li(D@^0hpLMrBXG#lr^_v@@c`i;}E631^EvVem=GZ^NQnQJhaBDm`hW!MSW;LPVwy)3gvUZrJSA7eOU zbD85}nD*X84JB~&-NP6OX*7wZP&bLu6$|0imWaY)7&b7Q2Y1A%h7Vv=(GK6}GBq%t zF8)B<>e5z=UtxxWbu7lJ#jB6Ot5u&zTda6+dw zeIL`Z{$wE0`@x7i94QQ79+-klr?KNs+4oz6Czjhf#!s{EsPAWtpv2!m;&rNu2yK;X zk5#^I%e#K6S~GTNl}r~UXZ&cuz`W>tOqE*tEZjVbp4eP#5Kqib#$gT9U`jADG#g)lPl- z8%t>!hre=()3gC`op`IMuk`^inCFa`&B%XvFu@EPa-rU#&$jcu#<&%s$M} zI48RN8)5P-R&v*jFG}*s-11XZc#@^%XUHbSG^xP~Le%tOkPuVRn=REh z_kDSB)M^7njYXHM&DBC~e7^C`K{6V{Y=Nb@5TJ<aapiz@fcw-qt*%lCgw zq~+Fnbf>Ra!QH)cS@hNV$Y?d9PIGjC>RU;rH&W|QJ5FSM9Y2N5u{?hjhThaK{Hgsg z*{0m3ML(uCnD!jmck)%!~wwF8;SzP!c8{qRnrlkZqQ)&Tf0Kd)ll}yf=KrX(Ii4YuGSC&g&oc)@>>So5v;z^9E_*6uK0P@`Z&4 zVfl5|iYokUN(B0=-gFPE9I{2*S`SP~9s)jX)zPtI;wqtyLRO(_nT@d8d36@HWhPBh zlOc*R|MVvrV&l&S32;a|P#zJWXg1&RW1>)LN|#F77L=tItJj}>DWRcB$ z(d(q_xKt=C9~#eqqnu4*m~_}=7z7+3%NQ24Oje=7PniS?c(iTFZM5MQD}Q5or^%ZL>%L6l7$+|MwU`mPs$=z}uFh91qU?wMk|-P*U(G6~2eP{i>^ z$vD%WH?qbOabhh>Fjx>3{1=9pK3^}@UZ@&&uzS&#bR&i22CUFx?FAX*Bw=tMfe4Y7itTZRYa zDobo!n9fh0@~y{hH)7;BWWUl0(uujN^zQ`)%seJ{U#c-HnlF?)>d6dA@bpDBnVQEO z){-7kUE`?jk|TWl#x^5x`(8^Zi~Fd{0bdb!YRP9ST&%r^FX!Lb7d(5xYb3zE`}@@B z`523AEY*R9+@q}}6W6A++R|er?XC{IGf4K1Y1lx@ld0hn6E51Z8p7*x{tOh<>;g^> zN^x4?%9f*(<(lD}r9rIj%8s&|ZAxL}EYynJ_I#^vH62`(BTgf#WY*XbiZaHJKprBh z{8TWiOxNRK-Zx|Rx9m^c#sMlWb5mEbmWp>Bp$wPuz3Bh|P7psL z`l1?-bh-`bxjG_+_ngfFmRhV5FavtJI)IJ*AosrS%7i?1qcBLlcw10r!{mPcj)?Q> z-sLSabl{=a7m@S_6J@nSmN5=_dA9AZrD_=djI27Zjo+weQb{E6Xg6dO$Yhxw6_HUc zs8-tK-Rjs&c;^1x|C344js8VFx#(UIfz`7Ga?Y+*=763A-WLoFC+q!_f?{ivziGcF z?vnITw5jt3n7WF3*f;|BvL?bdjiQu`h4g2zT0_zLY!!7Rs63NC` zHR@fE)}SfDl7JpOfOS|czx~=F56~T^$|Pf)9uL2z%lzc_7)iQ=H?M)Ky<)y$uf~w% zNt1Jt(#S#M(`WUn(tZFE76QCIqW2$nuq(|oYZF3@?Q$z2bMrLVnT`tuGuI0<0oVE` zF=no0l}DNTO$~OF;d6G#odW}=uCUT=(x&Ns6*l~oLJUcNW&N)I8mImOrcrKQbI!s{ zRnMg6E*KuY_BoHOEJo!T*isvxTv;K+aak+B(+H@L{#ndUP2x(DlE>phM*|MDGBM5c zk5ZclIGq_Jd!~7^2igwS`@;>fEP*dwJd4En4;LWrURlor*Q4zPfm42FUBEc%@kmIF zX~N(=2W+#4Wk$aAVa2C0s&wsGdmGkBomZK-1?Mv&mxF|+(99?r6z9L81eXweL(x#V zYo;W;##SmguJ%nZXSAH+XEsDPfjPn$>7>D0ayW4l@_uag2c)fWls>NMg`hDVm1Ls| z!!Hfo<6-h_H^TW3#@?%M3TqhS%Z!;n18=G&pYr@2HuD|Z-A)@u9bKffqht1-$&~Hw z=`wGn@5WMbYl#G6%WMv_@2W~ZW*Jb5xmwe6vzH2MP?~Pv*Nat9(SS21zD; zW%yS1lSGqwfF1Jz47t8a?&asaFN(LDLj&(6UVX5f=Rp&jMXh+9wbP_SLY0#mjm3o= z7_~vUv!1|q&M@_`a1t!h*8Ak#ESvLg(z1t}Kqv$Jm94oiYnCAV)~58oEo9F)7wqrj z(AHmA*-pK5+IPHQK(t`Gjz!ZG=5jy`%3ln?af;F*ez$vru5&?o}9)iT(9tt87w*3?oC;WDM@JR-wWy2 z?>sEGGd<0e#lyf>Aq^>M_xffjpPQvwG<=vP9I%A*EUV@6(*O>&rmpJxr(UzKx2!%5 zx9Ifug_vN(|Cj=#67I3!;xL_zm9IK`eW2*^?lc?J*{*>v8WtOw;B52Gy4NU^c>9(n^$_cJU;js?Y6)f8rRFFCPigU7q5ik=V)v4?vzPmY)oAu(tszHpp#(xm z0@c-Jxoz7)krS-ObSqy4D zxIW)-G?Zzam({vZMQ^OocQ{m1VL)f6v`w!!q7)~XFvd90Flaygg z1iM`_%V&BAA$ElSdSd_MI^%c3?Knc^KcaxKz??IbrblM~sr^OC02$^3{nGb{P`GE4 zRs#v;{vUkCS~0emB3Lzy)Yy?Tj*s%Yb@oXnH2GB;^WD`wz7nLLrZFLE%sD;y881FY zD`W*C zGJei=iYNo};;kV3?l9p}O!U5!J|^3nEQT}NUxYr@jD)7UyNNV+`XO^Sfi{k2tb!_+ z_O&{)3dbvntaYVd0-9wZ^wgcx?;A6d-|^aja91UK>$m!~ksZ3JoSnIYlai&mX}VEY z`ndO%c-pgXrC!1MfDIV@2I%ZKF_LDCO8(1_frnZp+O? zS!8VY51PXJQ=0mKqR0f&Xt(D}EIT>J%_&qkJoi|iqVtpZhNlJgeI0d&3(S&jfO8Iq;#t2TQ7xM||W%5cJxgTCpxaHFX;#8wGN zrVQ0H{J5f*?-e=(k5`7sOSTP%XfoKXUn2^gO^;98PM-n>H$=Xb7lW)TE!-UkoN5Mn zz4SZda4B5`z-1OqEqdt7TVp%#0n*>1onuufi+dn)0$Fr^mx-0k6+^a_>P$1?pltY= zqqs*~-}i=~Fjyk>eVqC=wMDBlQ=p#c;=t*rP(E{#$X-+WEa@JxD0B1Kp$NM?L!TpW zqBC#LMiSJ?=prW|dME`f_1g8N8O9hyv7yM&ZX|xmlk}KN&ZAdr-t(H;QK%hQEn{s}#O4Ub-2R+8QyU zXJ27LRF{T67f(4x*jwFuy01*%duwE|kSD$KcuU7SwQ&>-ZPC|P;B4{@ujv=jlQwCu zAQj}3xo!avp>T-4T~r#UyvH`C&-Ke#2vAfRn(G;NaVADYreVGZ&~*vDBFu;xsX-{% z6Br7=LNWEMSiO=$!xpj{PctNT1QZuV9y>a0H@UFg$W4~_8o%t%?o3n%+TcoN1=?G7 z9I!Q(b4dLgG$kdM-$yzD!>Xvae)fI2EE{$extAE-buWMhNmnDzH0Z7SFfL<$wV5L=cIckV&V7gy z=2*AJVz&eLb4tr0BnVb&*XL-o7+<`BU#V~%Ilt|Bt3-kvX#T@Ii8Ud{=WZ?vQp`B0 zn4nwTU#DWd&S!0V@-GAU9}d(%XTr|}F}~p~XcqAU*^zFjHb|h#4a3*QG7%5LLq}B% zt}5f5w!Cih=beEloL-=|fhJXDrOkim2sgF*W^ zV2%qR#VD-lQ6cui=nd(oR>N!g5^hE`f`(Z_5W=0`Y75KVPUg~xzPj)ms!AaAdfBQv zKHVK&(%%;z3`E1C4ZDn{^8=0tKuFahs+)ig@ePymNDKSUWwMzLVPjs&*~Tq*8a3$_ zN#Hf9Jbc-r*PHnb4GQX4g{=HiOg{!j+U61IOV1ncjoLsZ2SBgBH^RJZ6@fWXnj$FI z<|>=ip6SuwvB;gJ`Z0PZqlbL>N`lq%YJvC5&0a@uRKGwFR@p6z#QxD`+@tEVIM||3 zLXG#}!yy7;iGWWl2-<#}Mv2oPcIVA{?IyIS>)W;hJ&VXXR-CU$&>fhu{wpJoUjqj+ z*oM0AK^cNakZ4>4iZG2Dd_P*Q93N;pQ_k(T{y0^~v z)Yv8ATL1@W09%PUZGP)6ly1gQGIg^Nd+n`p@wR-Jb1{eDqn+;O2Csi~B^mpbI6@_U z0Nwpi!izic+T6E)aknv6V$eY^&067`GLuH)Q%Y9u7YxL$wyTnsh}% zq($Z*(*Ws=BleBLO0OFY;3Opkg-uvq$)pI8=(>xDjQf_SICU~45>&rqD-wz0fX4K; z_EH&j8cW-S1$b5D!LGG3) znIqZsZcliu7kDJBK}M3YPwm#zxM;a!^=4x!A9~c$41Dj_SzIBRDgJOChLI$p9a=(1?b zH2@wkzA-}=q1llJ!jC31Oeh~^lZvop#j>0ajcc>~%>tb%1u(z6CSqO9=xrUlX*VXv zYZaTdx|7O`hcgekQj8@HWRK6EigQGxL(Gb@v>U|uD05Bx2T7rFmdrYIB&rz;-SkLR zdi&V0m*b@~7Nw)>G7|xAS&hv=8%>%lY^M}+%dhc)b5Gx$AqE-EN~v6InP&K;%^S(=F*01X~d^shDL#D~^U z3N-H(Rwg1jmn(VlG%JM$+t9)Dt26;w6 zL-(+B6LH8&vsUnGKAg99;Qp!(kFlr*2%2tvdyDniNoX2e(q4#a{juT99Y z)lKuu*G7DfnwQWXb&ylcP!Xw@#-hZKvtwKqns^b@o!6l*R%(?U&I8&{$dIIDei$KU zl`b?7Y)b(!^6@x?sIeI%>B4=m=Pr3!>r^(l@7C-q4L2JYsJ%&G^h}3 z{bqzn^!QbqF?&@vZ+DC4CEqb@S# zB_%$i&bFn}#dIg_$mQnSiRHzpLmx!^oWh`dRsrRHK(Z$bGihq$8CM*YFQU%U4n?G( zp5Qq&tjd1xa|tO9`16q{KzU(uIzIZ*xv4OJ_1Xv$+8xn~uB)8CXjh+H2cc!?d5i) zunuXN8b>3o-XVO>OIW;aa0GmGR8K~4Jot&<2ZqA!sd5pUQLoT|QU)xgvQX7b@9Xqr zQ5=+6xqqoXs}BMx_;^ehUINCDgb3FPJk+b*yQUg`ClJU{qmhR>4uHy^#1Z2P?0$kd z_v%WoCAd(fGpyO2ZNbHbX8gS?FiffXZ%tc-Z)2YOC{yvn9ygPj_)b z8RKu&_l_OPq1()rZxjKV=`TX+|5S@s#GwPqjQlXYs&W~q5d5BRMnl0h+l;Zxb{%M9 zDySafg9JP?8AV9ayOWJ?$vSSWq`#bcsjDVgCm{S{c(c3@PWUmQ$HTSdPd*KA@kga4 zYlK%p59EgcN{13xxWYfgaEU^@)O()|5U!u8a1za0g$qYN(vknQD*h|^7iImIR#An* z=w!pMay}_yw!b(>D`)>hs zF;CDkG;}>7+KcU%eFpl}EYF{1XD0UCrG?u=f_~+Al>m8UzGVvA{pXKg{*Nm;-8x_T zjxzn1lXZYEM<_~4mY{zC3V;H3kybmvK-l@YT!PE%ia7^nD*?0sa(J@<5|=hAqCT&( z&Nxa<4!qx@;LYnlRuhw*W?uuLxAOcTe1~R%wb?rdCQE$TLVh{`)ZfVGBWDH7IDDf2|MZwZaun z;D{Sx2VSqu?Nu_$00=8tl9qz+Iuh9>7W6+x&Kb>1m5wSs)MDC|16o; zw4k(6HYmVTu+W__C!0Bwfe%Xk?4D)D!lpHepSV?HAd5_8ACln9@}Z96C|^R>c55Jp z7EBs=Ko&gAZ(wko!iq!i*!1Vt>1^>sF1VrUJG3kF3f_}au;EM-!^B-6ee;8{D} z+v?)Y6b_EjdvVEp1%8AwYDpI2@}Ow#pI`+)9?k=D`V54{9aRqCpw%7n;r-fYR7Uk{ z{r~qC@%ia|)BdypedU!>C%8=7oUK|LT?ltWCuo+o!+SP6_9sLSyI}rI*Ruy~HmgO@ ztqfK90)_97a4!u^Nl;hp&u8#aMBVN!^MtJ1ooaOklbDby_BfbQ>CaljJOylD`08Y# z6M+ylHhfZ#foCrO1jHlZ-v6y>>1DFkQqFUPx4I%R(sIRTWRD-TEwTfm+r=@0ZJiFT z<`)Irz-R{O0-(i~AjqHTaIs*%gCBk^iB&Az%=V;@gF!p`PQlUa`BzIh2|?J#F3be@ zg96ylvCMZ;<3-Y!lV!%K+!1TsV;kNG>+jGF5Yl$U!vt(Vc7dD~APMLP; zR>`>r+98B(hwc8s>pl?DkG$Ica5dwa&YKu}i?VAp?_n18g{c7Z*z%;#>~r2DJG0i< z>TTV&#Z?)Cd7>?q#Rp(&QfD+;abba;(reSK;!wIcgG)S%EY?u>oLvJ)%a*m6t@ddGpJX*fqgBiAQX49t+5f9e`ll_o2GW0QtcoqDwWABt6=)+Y z3bZgw33eS4n_sP!y|Y~N$_r>GzKlBm(mkcab$;IZr`cb;G*J{AxZ*lQ2h%kTvb^;l z1R!S4O_q#9#)nx-f329hd1(Go56MHeSy7;d6mxF-QS&e1)5UEA`>Yoy(N%V`utU8C z!{v?fnFV0U7OaKYmq?N0@;DuIzxYy$u^aTAj3wF=C_-NPPs5JHW!<{sHi%$QDoDLH zi}`(9HXKOL1|_gS<-o780nP2<2@R&jv3{2}|EY%B9*y*e!~( zA%t)HIZp-L>E*MDtLpXt^W#82B>*j^2m}KI_pxB_~7BZgtf>vRy)fXj830L2+NhkQ$`(nGUdC^zPh9- zuY4dZ#_Y2ogxOmg=huL;@wq_uar^t}t_bJw?5j}(u)pSkMj6_Egd%_xPLca+`WI5vT?lOmx%nvN8_52+ve_i9iLl zn8I3>HFo=2CC8Dhdv~_`#5I)uJlJ538{{7@00_x=t`*vyQ`GlMm=A3fV7LSWOqLW6 z&KB1B_|q@wD`$)Y+omoCqk7_P4k++(2&pNlrU8QZ31pgo9{~D6VO0$%a5gwBsZvtD z^A(T8v7#=y#s*qV_`;*YHg55l&bjX;w3_br2dH8Q$;7!-6pbSI7(0iHCY^cN9q2WXqdrwdkxk4)2^u8hiNFQ4(b{;M_bU0+&$f=LTCly zZaELy%A<>)f@$cg%dG=&$vw~jMitK60S%xkC@k{~q%!!Sm4*Vi>BwLn5MU9``sexi{B211NpZ@140Q_PDBe%1M z@_f+f;6;BTB7a%a|MfQ5qb^OePj{(*y`ux57NGJ*lzwIT{kwQPP78}ABh)Jd%aSJa zcgZm4GO(KnT|$pR(iv}$n;2=Rdb2-v3E=YA<2nGT z%4#U9^Kl)QP|7Ra*)|7l6J#t%LmoqvXmcqN*)v)t|I5t6pRND7Md)`u-4=$;?g*98?x8}ErDeha(z{YHfaljh@L4@yQs4h zr!YJS>wo`9v{i?k>(RX=*xg|vHp#z0Vpx_}$iCFzjoP*z7j9X7zp0aG3U+^992ArU zxKWWiK9Y#~)EEET)in*SBz}b4>}1dFyLsUw$*>;}5gmCTx=-gj0#=6FI9#s_Xc$+@tH{GT+)QrpXQS zeq@njs#EPb*|u+uuoFpon@G&PK+Fg3WF8q8_a46U?Ujl$c?SrNL2;QGmRKOzDGgol zg%lT`?qY9%4S7v^jo5=5&6f1}g$Z$cN0G;QV76F3hm$qy(x*`s+9AX6DLBvcl7=?k z)-{Z&x7|I)h!ortU83Yv+v5A07w9FWc+y2NWH*}^+WJyV3B!n(JS1h~3o0fmi~;lF zyyp0JUo?REdiJ%minBBs9%GGBMYy2?ysvN61r=&9^wc!$nA z#i>W$#8Sz~Eu!pY>UT%MhdE^@#oc12N@w4fLuP}x7$ggGP9=j0>^aON+6FJKFPMb` z&e=LR(zPQIXSTfVDx&}rk6o~yq3hay5{sRpWd=i#luj)inpXW|u(-(SUT+&HT3IG- z`YDxXw!TxU*Syr_Zpd59y4jo%u@~bG31pV4$vn>xbl0bY8Fu`f+n`vyI4+OF zSq4c!0lPO-27!({(bu++Ev+Gvy_Nmw}m%LXCkZ~H8P{Dh~k@+45{e7fqt zb1%LbxpEP~HDV*d~JU#c;3+s|6 z*D9{df0Rb->B0is31e0X)OxL5a$*?x(+lz1JU@-d^mEm0ejM3TZo}C1uVpAa^p_bv z?0aR->#5cXKi<~8Q)SfYcY8i3^@3QG#>HCAl+n7B(qpyKQUO_o9n_7A z2>i0R-2{JE>lutitD-PJ?3B$oesMF4-9v7bmKu>|XQ}GWWhfnjD87-F0`Tc{l;;yn z2!UZKnOQJ;m~WD3|1pEsKX~5irI;ux-bFwu>F6)9OQ{ zdi#UK#YOf{=l|#bNz&%63@SaWv8l3m)p~flC6=`B6>{W4Pm*AON?fLPXhU8!J7rt) zFp&y(tn>eD<-h$wlH8n=@TC&%DoMn;3bO#FOiBVUiOq8cz#AiB@VsPu!gv`_3)RIx zIq3HyxSluWfMmCrvl0qm-v9B%|JJWx|2OzEN;};e zm~93|?(?8Xu>7UU@xKNV?UN|Jk0eZ(IYH zhJ^WOMi8q~Zxgtn^swsyf<)hSu~3U4wj(*7oUVGX-{OCQdJN&+=6@vwCO_2G3kD>& z@7q8s_Grdr)eu%zTBmj{4Yasu__QLMgdd_@97@zgzDYZq?C?{iMVc+8Nl}ELx6H7S z7|ip#M!;LoH({R*HPB|{jzxheXiHe*^~I?X!n`I)>;Ydx9F6_=KK?U*&{td}=AgX0 z;qkVyB`|&3%>=SX!dd)&uaG6RBGbM-ggFdQi(E}fGL%K4qfLo2MudAW4$*&XVMzok z?g{~(VV2KaDx1FVb#13JSUkz-+g~jC!X&*Bls>la@6JpmdFr{VrEmx*Fwl}2pgp+E zTE(Md)>?U-yfzQA?+$H|X)fvH9f^I{K-V?Qh;`A4Czf)vFPTVuTtugeZNw-mb8q|ixT1F)byxX@ z>@9NbC;gu*H&>!NHff85!=gZCqt3qrGE6KwJU)olW)0HFrl#nwiA= zLNR1#wBhKN`CYzM{H{$v_H`4>| z@MM)ldMLkLAE}65czF}Be??H3TpC;MwH?Z`JRKK*I+bL!E>w7J_tghrwjLB~DO7VY z&xS$>U|{7-=Ddp1-^DDq}K1CNXza#UBv$7hP`o1B$m52 ziGlpE8aN4#kT&;%VgodIFP%k)@0-S=#3~TM zEY(RVja~2E!~wCXpnJiMw0v{ShbjfBt-dhk=I^d=i2Xd5oK_>nMt8%o%WYL!^^rn> zfqlrzWdQTseEb-%fA6cPskqy@!t+`XR1U=DOOpL=`oz$#Gkyp%gfUfa3bayYPXx>F z+kVB?2hOsN!uPBBJf_z!IhoxK$oaGsPPlTjVXTst!pF0>{;02lz$H3-Z-5uSis}Cr zO6P^On19~l*C57Lzj*?xW0BbR&V&MBVnme&MCYerbw?M!hA#lOM7u!zSu>);ikVVM zk|%Rd!neJH7-mRd-JL)>IEgVXBI@uxI@-Ni)YWA5@^}^uNiBshuf_93E^ge70zUWm zakBse-lqBu)jHVAd0aMaH|gPWYxBJDooy_(>;LzzoL#0Tc?+cw%edb z)RKpIkjp&e#71Xkd)>jAP~fs@gQEn~_Lh5>3P!{cAI1m5(HWKI=i57e+trVgGHds< zzi6sm9HhVI(984M^xM=n6`v298E<4-9!SdG89}R3_aA2?Krgr7v}WO}Q%h4jur}cbq^^;1M5jBI9uvtFO416mXW&8Ll0DI%JgP$a-~%r+RJ={$V6l_n1FUl>xC{_^VPKkO*w3?(n~rC{kw*NW zjOD3i0=D3Gs*O`z*qYVG_ zk*Pyna>r&m-GsANv_F277=s_D_Uv{VxZG)L;2t-v|L_vpvfsAF^f-0p7^*NKc~pj1 z{&;LgAt&~Fz;>nXYK9-psS{JHva*OC#gchdhrV5NZW;^w$)Ql};InMq{jMKO zHOyqJ9Dq$vn#R-)KC4f&W?uCX(WP}M#Z{9`LFtFa8EtBC7Z zi#LOYS;IE3s2Qod^bACdC#3$0B?G(RcZPM(rV={t04L12Xn`b)H$D16IwetN5C}}v z&9nbkEyN|Zv!;Kz002)}xE)(#BiFcgpkk{sh&J{qo~`Cpc<`3E`=)IZH*mL#l!n`8 z?F&;{sj^1eOGeItTe0=y#Fi#t-e4I6XGZV9B!j&)tf{e2r}zdUXC)W7;JsYe!Y6li zZvd{kW|_y@zUp_*a&R^G?r72Jp=?AGWgtrc)vL3xv9se`?4Ss_JsbO~BYU;LeCCPL z5ni^Hf^X{GEwh^l{Q>S2mrOEM8bx| zRsAJH+8;2No;$Ex9ak}_o)olAR|ghmie+<;n!1RSLg-6Vf}CbV+JYI9vQ0P#Oy$M- zAUtXBDW5)kgEa#o(KfmRo({N=ja|7}n<KJ!z zr?dLvv!yiuoFZ#y!&p*RCI6(m)6@nm^fut_;)=9~Cix2f!8hHM z1Jcdnhubx6p2=;QrK3t+9>|E#(THCa6Ak!_9?#}hGRK>eB!yjn#VlUqxEk?!}?d+?VoUazNbox=^G>mNrab+aDaxr{Ac35l~B*$b`%r1(pWM%1o zXZ#g!Ph&rqW96xyode`SzuBpjd_5yY9K4O9Zt3~xQ_<(87?x@5AI#>zR&TW`?caX> zYWbuvSuFgE-mNx&NZ6F}DjBH6BGlBgxl+vi`8WmSDAuUt`%B`86lL@Z?A!aTIA{GP zc+Bmq#K;+%b64l=nQ9W0-;kBd@akK&*V5vJ=42g}Vo5P3wJ@J$hFebLUU;P=tv zrvy2gsF!j0Udz{w-IjhBoQ4Kzoi*w+!Sa+VPGI|C`ZYFlsI6_@MvyaBjS`c6q_K;M z?{^T3+?2Kb!m-?PK3Y_?-(*XVIeI0}Y!)xZOOC-ZWsxN~p@y!(bfAy`Be^9pC+Cjt z&gRZxdQ6&!ZZGg@$ylq>AJK&$G>SRX_7RBsY3(4QF#?UUVEt$&@36eye{BU9ANy1H6MwN^GJ~+Dxyih7*?3X82 z`@1r`t*|Pw1#+2C|lIQ`ocU=1rZ>crs@8B}8!~RwcgX-B!FlyWZxh zl9Qf_P#(ic!&2g$^4rcx!S5W+CYhl58nr2nm6{E$*_I~SQqqgH{V=juc|k-^^{J*o z0rt8Hm~%cJz59cL(!;ibS>$Gd3X>#K!Ua!LO*t-Yf{ai|HXuT(mc$|k2X3GoyAg{J zgM9M6F_8|%%KNs@fn_HcSRXeZEH*znX*}r+MzwaVqtr>#LMiIYCAo$GyGx`j$LQ zHB0Oi?3~q&r;rYC!^VuHaH6-Fnmj|BOIqc=q zEGL!U7azPnasc3U&4O)&f3zIZF*udd=AVY#vZ>;_%8u>7zn!nHvpuvb5rXEU`}nc= z_5j-0S&#IHt#d>+cX`X=a=CehQ!kqf4ta2mYyGAi_WAb%`$~)T@G)m^;-Jv>m(O;b zldG2$Q{7elBnO5bo0;^?2n&j9AMbu6MX=r24q}tmn%)=v_pt1v(5a1;sTD;&o)r)2 z8;q7IRkr!|mga;!OVTvZ3fUPiT|M9TeX{+T0~Dpj=v)^_+ng1PQ^lHtkjxg|;J^mz zW(ZeJ=b(2hSw?{LY$uubq7}oY=nu*o;WsI-J@^~fBc93!c+2tknLNSc`oN@y!MZjU zstj_jM`JOTL?n~5a4`nv{p)eKl`~ZcU=6-#$$ljJjmtUVcuJte165k&YfcZ_5 z_*4JeIYz;Q`vR^UmL=;4o=oI@kqF6ajOZaRdB~^hf8Qg*cLzainEOS|#CeE)i5t43 zXj&q*mVs^RFMJJu51}jyv%iOeQ7u6T6ND#&r4)p)PU^*|xl@k1W@Ir$)V?fN1Md*=xkJS=Laf%xK zaQR}w{qKMX11@#tP@ravbz;;Cz+Q>UQ$4?WvQfnMTBodvF4qrST)*%AsqYBBS$>z- zrUH_crme;G%SDebh9=zHS^UC_(^#Ulaet?k^s8^oT^7YCkkZX}%N#NpRLH)xv6PRH zRL4YZBLvI`@Y8Mwgn8?&n)as=PZCGa1WlmEz^6i6NXnml(W9|Cl+V^F>%o}}wg-!EjD(BRj| zj@|{mbn6Y8LGIqh_>Ikkiw z3FqH=pNM|qIG!ThAy3 zv;BpqW0dq*qZ-9uml7}jq`9D|ULCu){JnS`NMHA}J(i>k+;^ytbx^IW=cj)|xmqH8 zc+ChE8)-A*A)?UKHz&2XCcTY7`)Co8Gk&Y z3XF}o#|cj-33YWo9Y_MUL~&G(ogH~@mVNBfJwD`?Xv*GGfq(dN5y_L4qUiqV!s-j1 z<3rx(XoDcLO8}TE4P@Q{n?ncd$vlk%Z2-BDgMTEVRC}e;Y8lhVKRb98V~=a|M%e5V zVKg%4Mq|~yPstW^@wiqCOVS7x2NCahZi zw;3pFX4_!`GN{b)z$6I2zl-qa6916^%_b*z6v($DT7_mET8y1pUbfRDC<%*A<7N6v~xB+ihts<)mYjDWa z1xK{Wx|scuSx1W>17NMldse5CoI}IHr4ZKb`33(K_4D$MQ|dJPcdx-k^R_>mw!?54QyFybk7J6ET#yCGW{t z(tJ`du26Yed^Gr$h*AYtGK7W6zFzGSNQzQ%mh{hVM$hC@5+&dnfS@bGV9O_~LoqR) z^s^yDY!<}WyQH^Pg|>3+rU@3cv1B>Vcz5RYWZlm@pr64$|g$ke#;fh61Fa za0J{=P#ckZ9zVVHy7Q!QC6WSEMP1u^eMFj;)bI&jFuV;|7*$+JO1|os*~MdLO5#f( z7g)&^U)a!Kw%-v!?pR}C&!s=F-T5hH%dp!zqG?$%GpR~CHD>JWI;(%^goGLRyEw;p z9cf3Cxf**0lde*lfPJmC`Po8@6*8mhvt|56zf~oxPD$2f`a&mC)MKrP&Pfgm2cT!`+ql7&v|& zD*O++TTk`P)I(1Ut;lq4{Jc*9ALTAwB9Ha*!W$XX`X8SW^bvj=eOJC$_sz-ZvjOu$ zpx_;;XL)7VOn`W*2(rsD`Wq%;k&qM>6A&UiTbju+Ev>9Xw_?h879zS*3?PQ)$&Nj= z>O|c6S6p^zqOm0XZ^~xT6z#R*vYZ$`o@Di=l$|yZ*rC-kEsBJ^Sz(q87|`yx(gR^s zHBs6$5(;z1^~p7Kt6)fsK7Gb*H@&P><+nq7uFk%g?pCoZ|0{M7s$8~V?GdEpHvCHF z2mxf;S-=p^iAPS8JTdBxCe1A$UYg4wm^N}YD0OU_t;R78dbG=@&-#1}(k)LZFB>Ig zTsC&Vv?v*Ju42-Ox?IGcKpIm%4AP!d)i}goARPH>JcC-i7X9Dt_iT?lzUTZ4w9z6y zJ9QMzT3&jwoZ%$nV6XQVB8%#O#ak0$UZwEQt&DPrI||z+D+uFc6|;()zVIop8wnGI zH5{z)i_Ivsv+6)N9qOH-^JWkR!8J z+w^Et7WRj*4^A!50?xsfVmAhjiQiJ|`gJ4~v}J42!Q-I24XnZpo5DY|U)?l1i~S}j zLn=<=7Ud)QKHE*g1n`xDwm*H|+UNm-yHd$;=WQqoAF63ZCRPCdn5 zR;=NKi&;X*ot8dfkcp3Ne0uZeKI^&-?uC-#=jWg4jN=Suv(Rb@B@ZDM32j~@{`~o~ z_jVa97H=ud1$@SNW#}2i8JI}{Z**+pWSxlT0ELp zMWql{?UkW#nn4{1qdAzVtlzBf&V3ka6^eJoj`xWU`r}0Y@O33+Ttsq^Z_UB0wHQuu zw$R&zTe^33EWTG7m_4*`a{}S6`|ZH55E?w?MAak`mJHcr%9kjeq5=?B-+>L4xPkTk z1a%p8?U-&rA>zc54EaA?y>(O-`}aS5MHEm#Qt1xq?o_&^8>Abgy99+pN;fER=#MrQ$c+=HBX}f!+ z&H0-9Heq!~^x)2Q+v4B zLe@s{w6wl^bK0@%KQ9c(1P@8G9NNG%*q>y(`@>k9W@gI;P5*dTz`8V5GcLAhaXC4= z558KC!E2H>zAsk-X*vr4eC5COa<0);!DVlXw9>Rc`h0hCmf!=Ozg&i(?#YIC1=-%g zd}EGkp&HbFf3`LO*g`3DveunNtiezGuCRiT&qek%qt094d#CZrx$?Lm_vuQrgiNte z=@=@xZ22_aXtHN=A~;z-jg_*oq(lZNoFzJhHY@K3md73PrK(_so!Wu{I&l;Isc&C@$rlE}j=n1woJViVI z{tl9C}aBiT~qxU9zCR=Xl)nmx{q`x05A1-)U^ zFionj<9UiAR0`?zz+(xwqF{Mls%FaPcJy5~k@=@BSvpOkQjT=@_jdu=!0U!udlFW@ zl=L42{Y%}o;1q$c)t<_be9ttkK>}bP&fsA`nf2YWTO8s z46kW>BsxE%us6iq7+8wDqfA-zM)2*mL%06=sBOD&uZZ}+m6b6gp`c(32??br1f_f) z!bg^fAc(S_uE14@8#N5_Qw!g3X=t5`K&*c8)>RnwI)D1jN(zL_1zsBKPjM1OPjvnm zB|F6d><{_1xflmnh97^s(|u1%1Tkz*ks%tr1C zoYkb7=pvQ;XxSuIF;=qyYeBuuh_Em${l!4<{U1^;y$MVSKLZf(C#S(`b)O6@#`2z<;YM#MEEG#f$M2m5D7VkyMOVbMk;Xg|tzXrD$VL&4KN<#+?LU>8HJZQ-5lz zTMZnTLF|lXgHk3wu@-AMH+IqcQrBX!rI|u18bbT)=JK%DEW?{h-{(4~gF!rfzsIm^ z&x7q_Z#)C5{femj{q^R0Ki4Ar!tJHF=h2ZryZ(soz!#k=JVBYBFxJLn_F*B;|95F3 zlM&s1=N%N>dm*Zcj~zj(S|}P&_BuC?Kq!C+Pj@R{o z9(bF53qn-lK?V&UgaaHZrGYxDOOnj4V`xQA??RhUI=PrdADQf_=OL=0sSwIEwu}Y*BHA9W?dWLm?)3|>JW{!G2 z<6smz7Ha>dGZYZbRm*@XhR=mUU_-;|eqt<@!dX0ie0eY*gHA5~E0xDFtdvQktZm9A z(3An~9O)P+Q@Gl}<{X@MRdyHsG5MY?)p)$c`hHPZ@c`%H0vpTzr6`P_06%QIVTa+- zI2oUUiCzJY0;R=DHc>{*V@T8gisbtw@hDi1jc5flL#vYaks@HcL{}TA9P1Mf=Rd#C z*4p*fJ8p&*r~y479w@5y{Nf-C%CI_z&4K+Bmg66*-7(ZTu7?W_S4Rd$Jk^%t7;+aq zi7Z+D>t6x`e+3cyyzPw_@QH5HZ}ov9%t}Fn)Sp`YE*?flAy;24QVM_1bi*IeRoz?@}!0kNeVJND3wWCq>rAfQI zR{c%K(qE8(cJmBa`P=Myi8rfq3)?NtREUwVfsNIvA@VtHggNccs4nV&7QY-{3 zDmWPN`f|~iQoqG(Wp(vb)D@#6HRChThVbofAI>NBAMWHfI)SgP*T}_EN~mi;o2d9G zf(zu^Pd{$|S)d|Jk2SPU(O7!G^yWPi?y$gShU)9uD=n{>D`>kp(&M11+XY-yR1{P# zFj>cVTC2v|g8#^|c>@o?h#>2gt$Su#gR`0z{kAPpfz!sy ziEc-(%n|Ku`XI7K)syCJ_;C99~y|H8HkHPA%K?ew>8r3}glsPIWb z4jEs-qw1Kw(xlPzvg8T|YdBYFaK$q8QJDuTXCUi0xs{KvEx4`BvcKCkX9aa&E6q$xVn+pj?6bBFE6mVu5EO)B!dJBclqLJseymbAs^uHo!(QJ>Rh#<=Ks z-gS{9x`ThbWrj~1{BR!N&RzWaLSl109WUMfPtQ1N5FUF-*bW4I#*K6Lq(Au~|3r+( z@lNPmyaOFSgg|?8#U|VLTHTOVZR2m6*58c&y8^kDa~aTTrsf$dv}-?V((w_+i<8tDbZSLg2P*a@t%5nFT#D?PjFzXShEr|fPDpbgdP&jLQ|;DGbk34d4d$9!1I z?$=-Om3)1>=>T;f>kLVy`Zo*Eqsk%3UH1G>y+Hd!hQ?vhTj9AsQysqZF^*Oh0{}h= z4Cf%L;?Fr;I1z+=rW8KKI`t!_*sPub9xw~Dgn-E9h(_1Lc&>$Kc{DLE7IgX9+U+L=6CWO592S9eu50Rj?!qS!s>1fu@+-_B%&v8ZP?5VCa zIBrHLy354m)6?0`*VzMK-Bl`7_9SEJV@2vuTI4%yGExx?)p{aZs_A>Z{sx!DFtFc> zg;0I+cE+kyYA{{ku}BSoLxvBnZh`(4Gbnh|m#AL5Iwo!X?B(k-4T9qMi)c))heB4} z{rVTI0OKaY&+z|yDr@`}ZYDgJ;%(lY;wmxQoMDVL6LPKov9Fd29?NE9z(c%+XG=^B z*Fzj0FgO5!I%j7h*k4K@Oz@`6D`uf8w*AY?-b5CoY^UuJM!PrpU6$ue-qJdlKj&;< z;KqfIZT2l{OsaQJ#}uTQhma7?XsWx^#2 zlK~PZ^7->;a5r&Sm zM7t4SApYk(H9h7k6W?r@{kym$m>dqKayj%v#ALbsiyArrW3(6CR#AdxZ8eBK-NBr< z7h7AcXR5k$;@;4y$rh*5EI&mg(mIAf|sXR~-$pg-2mvMwd zS#7-?jl<^4bo+T@O>|kh5a0|d)YvWB%Az}mig`}9&8WxpIpIq+m*`fRNAa-8(^&wd zV9E%jeo!)B(VC7w|Nb;9QjoQ!2o>On(GAYK;$|9!MuS1{GFf6wn%*{m9;cFgk;we? z_wj%Pwv9JVu6JJV7lD(<-9lpfG$^RRW`DoQ+w?dRFbVp%6kd^ky1lX=X5h?JgVCxM z#2$b30A9#cAbQV*at2xl$T#;Fpuj{n0ZlHNLJA|-JI&ecXvu$XzHw60dNj8ksOc7S z#zVk&*QzZ|tc_Nr8+{*I%J*{?DBgbf?x$G4tI3Yhk^Y6(@&6fNBz}GZ{zF3_G+TX| zZ_h>*Dr{ys3vFS{QF@C!1u7;;3e9?l^||M@O=Pgi z_4aM~%DG`d6|$3uKpkbp*x+|Zx!KIQ8}UB>r!WtA0`v0Ge=^3xIv86`6{Q4q4iH41 za68NCDf=i2j7@BX)e$@p_G8f_xkpNEa2Nl~cu#o^IC6kqx6k+unDBHx&_w?iLJmO~ zy+>~n^nKh7iRQ_&9XAI?GR0m>#nHy~b364$W%%A}3SKQeT@62dflVvf=(3+}*ck$4 zfr*~3b;pDoanS4md?oC{Lq3(8l-T#S+&-pVZ8YUsL)zB`ddXDo@_s>&nD;R~XEbhd zv~1PDL6!#UQh0&sK)PS##e4eafCCwm9QG&n*X9dHZJQqNW#J(5dw6Zqw-yEE+?CTy2HqWU3(VyYor$_lca%oT=RQnZ`Y_ z+ceLzd^N@jEbv10%yv6;(ygr&Y+~P~h-;(NmtCtXk=PI^$aiS`DMN0$A9aSDl7Qht zW4SoZh6Hman-^RcuvFPU0T5v)_y`?7#{Gm=TcVY?wt_wJERO*grpV!lW=AFnWxF)EDkx;_y&&aPT`TlZ=Ca}?t1a|@H+z}zjWuiN|$ z48dwVl})4uyHz|MS+_IQR;-p|Z#657LGx@S3Kg#&i|6#Ww^5gX7|m)D5=Jbv5FAuY zfw}dHtoQf8Be0a<;?&c4YGQB=h}wzgU>3XmNrk(4__LV};7m&5b6PV$FOTyw;qh?7 z?8vlD+mc_yGpn_oFDhB5FRhm~+76J>V&JoM&>pj)$xp0s9<6S|j%nI{A6HjQ(_tB^v)c zZ7Ea9D0v?%u8a_abU4%BX8n^=M6dcwiR+V6&5iPx;C{Nt1QgF3kbxf0Gvq&tl5jg3 zGi9*%X+IZdq5XEsK(Tgh{G)7=?s>qI@PU1~2!JZ$i@?p>pDGu5j*RnB{>5k@$NYw! z0PF@femkcp>JraA(r3k}LiJL;mCa4Fvtb5e=UoVJ<#mTELh?vmb25a)E#*orF0h5rMH5-~JfVXp3GtB8f$YE`gz4%q{*l}VZZ z%0{*?y_&o$==m!~ymVk|7AD>|&p5kCS^8}BQ2(Z-{qx`&1lG!py27|}-!N#MK~hGO znRB!yIL(LJ4%EWIEY7`=YQbWKaliH zdSc7*tbks20)Q4=mIMavi77$RRgP-lnc;HbpWVdE-JETYb_Lr^6lp@0XJ*cJF{xy~ zNypLF6ISveXi$9>E*NMa2PMk^4IF46u(v9|cO+GRyl&-2R9hMdUt5h|{VVTy~0k1rsm2y_LR7`^xqmA<%5Hj~D z!u@!!R*SK}R_w4K6&$Meo#4+F5WlDX`tml>mz8D z+L#5p{XuV%!f>#&i=H?-lkJh5jcj_|7Qynn1FVO;;bH>tghK@LZKpG-d@VXzn%{kFu=r5# zV6rN841f{0^*)L4-cgVNAd$rYQkgA8kIE4#Ny{wuF$YO!W$@t-05S{}F3f3OgY=JX zd(J7tM|RHadBw?s+}+>&FVz^91^?I-rjkc}W%eZMU8;7x?-8 zLyjcB$*J1ua+W#Vvj8iLi9kcyC8xo~a6;_5IK8WGs7t>cC;EkyzlxVKD-!yZ%sZsrtw??L|atag(_0 z(gFRXW3Ck7e$de)aBkY|30BPG@Npq&-6SE(4FA#jD-*`kTW(e*Vk-1;1Qv1K9e1(-npZ#o4T`@?WXGZNH8I~apx0jJ{G8XIj~L9aRrg>;=qTFqZ(Ed8PvP&%UKz_a2+JPwe&L@;(aLrQv= zVwSk3YQIYtsO3*#h9Epi0U0o~MDbfVK!euHe-k`^H`*gnLyZupijp)0j1FK(FacP4 zlsv6go5V0Ji5i;AL>~VKk_ewYJImh#^wI7zgO0xOj-ByBGnK=U+$5W7pq&73ifv)J z+f7#Ew?8!G@2FJ*WAsZ;YL?G9=IUmmIjU{m5+cv@M!7Yl57ZrWZzgKmLuq8OF6$7+INIO&qJS^xN?2hJcyS3Rfr| z1uYV)XH{fXdV|FpQ_a|I4baUxeKnMbqDJwtD`+<^P}QknOgR<{ahW*Qx)~Z{fBd zZ2+1y;Lk6z)7$Kh>5?oO$t|+s=!KMhMZw3j+mrBS(_b8#DER0xDop39N#(eWIRwLM z5PVl`x3HVC@$wC0EQ5BBjGT%xLm~54JmcrNSIWor+={V_sKl2t` z4cCCJ7PQ`6yn?R_IL&?Pd=pUFkHDc5mPMSackG&~um>s&v^s)Mu9jx3Kp%JkxYSKu zoV>;$xsOm9xXs}`x&WYN1>8kTT@T%hImf}IpxH8m0L68g(r4#S(pZe3IO7EjrN=tq z^Q%0Y5)&Ma%g=>`li5^?~8V57hQCSKDLWml+4r9Lgw z-tb2OTb!cpaPW-!^-FSf+ss=@Cq;0&WL9LM*>iFu9JyKZL#vNBc~sG-<#vT4sf>_G z{Fm`^G5zezbbvpj5NPLqDvY@ZR>)vhKRR>1D|Z8_z;>#;$Yr-V>@M@M)hP>}G6J=hdBhav%8MvFnamTctJoo*&l8O&4d zp?H>IJE*j@R3%@j^Qws0$Hxa6WJ|a^lp}V!%`FdCAm7;vttnt4f=&I-X4_rvU?G$$g@% zR~LXu`Hoh#V&OoqP{K-;RhfgnnQD|)A3#j@?mo(0k1`)g)cw5k!5q&jOomP+rN(h= zx5sk40CzGy83Oigb+6oB1U1BM047?vF-I>LQjVz+fsN3o6w4Q@g?&nhOwS7yse5nCfk6Yeu!NMyU87yV! zQ8{!5{E0h`B-xuJn0PfiC1|@Ge0=VjOtUQOOS~Io(w=x9G4g5@&xw!xZV3OYR_FT% z?a4f1aOv*lS;z1p)XY6S3IB=ROJKo^;^R6T1?AsE3>ydw>G8+EpA$X&nTG=L5MX@N z#f9F7j{tOWdpIjdHHW!ZUW-PnHi4beW;=jaG>lv>KW)C2Hg{?8k zS!x54hBJF{PS~lJO15fCw-TyGN8u^#!dXQO4r}~ z(wucE>6e@X2DoXgoQ8QV{P|;YNoNB z{;Eny!tjFg6W$v_w4*>jD*0^`#?dao_vzT0@u`t@8p&oV z*7|{@pG;vQJvCxnIGU?~&vkw#p6oMm?My!!L=~O{oRA=kb>_R;d&ZIDTP1L8)q_IO zNXjPwS`*cD2Q*IIVigUKK3yo-k`1dbUi}t5&Kfusu^Ust;pBGfBRU9*}%u}sr*USQ}*^RX# z09@i~oAd9#w$dt=tp$&P?YCS=VFs4TUYRWB?O$wj3@sZbq++O~<{O+d;(M3v6Bu=) z0Y5+)$l(~Aof>zRmEE{z@9deXowh13b8PDr#i>uUe@7Bo0+d>DN}=NWTqJ$4F;nkg z@^`1DwkPEEGyu<3BTf#>=DG$zgE57#g}z+gNQRA^=r{$noJ+%}^@h1-ZxN?PO`uDS zKRFZ%j!mhxmC^XMYRpu8@LfvQCm)VNH~Al_49+%gPT%`m=%g|XL2?D_Ni^T+s-+f~ zR{LS@<+p6~+8q1Nc0dcG@|^c3skkqa^eg>!I(^TV@GytQQ@jMdK4KYYjEl-%z7`0Y-zQqJ9V*kqTn^DJGZ`e z3$co9tZCP6|3E=m+DIZ$w!;0@?o7!%Cen-7Qv2je{bb;IbDB_+RJHBx6f)7wQVa&->s3!TMb=K@=p43{%J}QN#mH>+;~amguiO4YsrOlppQ|Gz zLwD_+6$s z7rL+YYu|z&|ywmAMYCgSPlT~=En2rNUw6=Im^cx@~^#HMh{!z;> zCARYoNtmr?8pYNZ>k0aJ-cGa9cePYr{Bo7D$l8Fl$`T>yUV?g&T%uZ-WT7-QN|ntM zs)|x&m}F~}*&hQk;ME%odZNb!?DIjXTF@KO4D%8)K+>uE^fRx?1U9s9ElUjJkSiI1 zkjPbyP%z2evU*SKzdAQtzrB75U@%1>#=}nSV&O0uXEnKg(f<(Ld}h@N*V{d<+(k(u zr_~)xnidrp%kRwVp2%Ok_A6B$BUh5n#32_moK8Q#$ALl7FB=8%TM;f?wu7zj>Y5HR z=JjcaBp8cqo#DkzS`!1X*QpDn{p=yb`dm`9Mjxvl_n!M*p=3kX=H)MxsNG9Z3~CIi zq`hO7go9v$CMqTVfL}?oWKtMH@)tAM^wBK!lK1S9o8y5c*&t%-z_{y&+vE=}yrL9R zWKkiSqPGvNqx-}7WI-jNYw`>jiN~#z6JSR&(n~Q~GxY9nx5twT^RI{RyD>szd)`Jb z9NNnwlnChU7JrKJdJ_5IIY=2b))KiolE|?vn^KZ*Xd1P<9NC)Azr70KBxEW+Gzz+# zB=TkO^^+uekX-d57Ceb8T5#531k5{-ZF*Gb8^&LNX|O(Xj*syg&yeDH`{5~jw*p<% zw)-drOR=chkN3|W1%W6@Q`k`HC|a=3Dx12&Y1=@RR(m*m=IRxjj z50EJI-LQa=)9JM9Slmq`sA4%YYKR7!1brTfY7SLwAO^;JDbnnO6o=86lna5@%0+Lx z_V1rbviI)znd&c_Hup{zZ*(qv_aC&ZYIpRvW#4@#`GEEo5er@q8IDO3$7SqNA#z`5 z&T8E|D1(@ocgId&_GW$hv+#fa?}8bg(|QUGh%tt1+t%ku7rslSD4wl+nFae1M5L9I z;De7dYp#2%uRZE zu~8|`P_CKN;WS;uJd>_|Lyi%eqP-^R4w<#mMzM@x?h{=y=y>fv0`3+VpV6HNr^t0L z`Rp;#Cx%M4VLRYp8WePq1_~C{Nf-1iPYM2+Z|TRkvFqsjS`to{gu`VlD~Z5{y-iWH z*(wIHLp%*kO*vg67GGV+QVDv{;hEW1#545W8qri%_DrMQc)|Q!?_CO#QKt2%ufZtV zWX$CTx}(mk)I($&9v;p+LjK9S5*MKwjXZ`IJHh_CWUos?aWr>hGS1CTCxX)n$xwG= zW`-_3--ubyy*_Rrl|lyR%_aspgSb7YW$AwCDDU)=a@=9 zekWBgS84-|Sv4EOlV@Cq?k{uB>i(YV$G`aB@!ji0Y!Wt@vM5ip@+@w82zG@o61)G@ zLsr1m`tN|hfGdTE%k1_#FVqjW^EpjNk>iqUe!(2OAgtKLi^%+Lt&P{-TpO!KE!zVa~g+NgWgs?$NY-}D(#;}SknLEv2F)C?Mi zJ8ppYZU7Jj3#zGhFyAG-r{^&{4w))*u%OqDhv<{NJKt6(PV*e^YP&xlu2k?Q;4f;C zCa-gwzu0nYLmzH-^(LtI^$y!gYd7rI#bj|76RoX5FD06T`@E$EF9DIiah91xDnN{> zu~jf+h^neN40bcu6eWxrtqJxajKK-v4*9kFZGmPd{;f^`uLOSuKT`6o{MA_gYiujw z&EUHR3G3M?h+ZM5gp)-Infds54ret(a(tfNoz;v=d%^4*(JCu!`#Ha}u=hAr48}p6 zG!7bhOjZX%+QF8MxD(3(9-#b(63nK7|G>#(hH((?|tt_g{vsl?1!^hyq!NF3!1 zc%2;yCc7@fjAGIN+!U9)qg0F1Mo@n}Rp=f+tur7Rmk##U8UX=f2EOo2J z#f66>=VbME|3 zEDie(meXV-stfpCARzq0X4`ZO)g4iR-J{sKiOe~T%8fS=X;Zm_;**TJF zZe8uca@xu-ItesoUO>#U^@?OHP?547UWh8@zb=x2V-~_hmrC1G&XL?pB-2B;1u5m& zu27PdFjT9A&(DZ`vX2^JdD)5B7K+McgfqlWMDR z*T#@=wnz}QfLBzK2zlI4fp|co5f@#CI-ysmH+<6}TL{J~zlcqtuC55+5a+L7e8StH5-`!n= z-QmpGe0Wm)w_;w9!gD{dXKXlV*j)+4l+VkQdf-vayL9;#!fb7qvCP`?Upf6#uYrGl zv=d%>hSag(cq#-HkpXi?-h_-k2t*LNjV<_z;EMcrL|?#p;=;+q(}QFf>$5Su?s&v# z`+zk{UE}Kdx&kdsNFDNx7fl*92JURuc7)gJ#|o=Lu+27&boVdlXu9&26Tr|xr%|N9 z>lkWCA!HC&b>iv<*hx|;@3P@+x@n|(A zLL2QI-r%1OF!6kvT5F)FW(5yt`|4D%fVzY85vLrq#M@c+gLlOEsZJ6qKH5OC=#=oO zQLUwV-zrUD7LtjdS=T2V$-L<5pH*gOw0R4?`6wc-iKL#%n90^oi(ZFVa;B;)s+t*! z9`{oo!em52H9ARqG8sY=O#5?d9l(> zA%luh`i{?Aqc_w<(AHxKq`~z7nrb>ab~^!e8F|Rp;$R0+N4<>4Tu4<7G}`mCNX?FV@q>y;zxZlN}ig@V_9G z2VV4T^Vx{ZY7bp$5kY84!%jh&*;^BJ_?GLV%atAp-#M>x=fa8Op=Y&NiT|f6{=c)x z_5SfJQZc3yJly?yWptk`cARX`)%757Z`3&aL0bL){MufK-!B$GQE=38nKpT#!<%w6 zpe&dIf@^v?`Cg+b3o4-YmfLkE_1XiO_Oel5ScFDH%MaYlO7W(ay_*Za1R&!ecYM_G zs!QaRfQr~pOu6kOv-JE-a&#w<$I>S3$4!@FPkw4 zfD&R|u>Dq%=q(v*^hi{&`KQUV(;?gK8?oK!y!>(+-8aYG-Vi08lZ3Y_2m2F(E5Ed~ zI>uATvqP|!L%&Nlhb-opU#Ji+4Gf`pEQ|y`5|u8T&m|RMVM$VEMT= znVM|~&OG(XHZ*vG;~1}e@1}#U3!wBj=&Iag93%U+{5ug{-5LfBgBGd#|ixWvBx+k`#d$d$o%I zyOi%fk@kfq&CP2#uD%9_`|Wh5eb%j%#rrIr4}k_Z6Yt+bdFnxoZ=r{Et&hQv!h`OI5WEuKdQ?=ynXYhPLPbaH&Zk7-U~z$Y z&;G5j9zQn2{|FGWQR^IqvIesV$IcA z??=?+iKYCq^(^r3n#@Ho)ImjJOF0a1bi;MQeTbcB%CWTV)Nu4)y@zuv@?^5V~NZSqI07HnKjC zYB;m8QI;O9BQBqrpg?5vnF_4{o6p{j2#55T==6ML$X9@x$OalgGy*LHHD&@^cOi~} zDo5zfVI-(68=o{TgH3kHUG^lSpr*z0r>LGb+%bubROd0@70_+MKD@l|ny3{8RYrLA zRjMTLDB;T%9A5_GsN%ng`fjzaMJ@YLq|7w^uF>5p%6DFw)9%}9xk z)Gi}vOr&}c4PoPXNS+n8l^Pec;)|mZ5Qks^wAPJCr`VjmdBj`G4Tj@uBq^-)-eR){ z?j62KtYXj0qu#B*HT5GkJdxMYPq)d&1yvr6R3F&(aSoF=O@nKx4c<#7Y`6rW+qZr)INq$r zc2-Td?|oHTS5k)EWQp$SWqmXKr{4D!E#5a!Agw@mjMJ!jbFoi%^Yj?cK=XcbkYEJx zAl1BY`_}@uM}hbmtwVhzY)n=9e^3wb%C+?d2%~>(=I2gO`=16h^%VVTywnGq`1+Sb zLHfg4u249DZ(LpxbiO=1(e}AsaBHx)Jq_3-xCLLG)rIQ0iLHYYko}=h=vBX+4<^^# zClOhnqo0XFc6zE@0egjd=LIRF*3ak$ z#1h1B!D*rlm~k>07mHH2;Kkbh@U0=L6^?)yKk0|@cd`?;${1hKhD>qr6vt#K`*L`8z!?`{mowlb+co7w$5%|PeUFlf9 z-aS`1LOx!V2@f(C?Fk`zitr^V_R2b}$Ayh&%dVR`_};wZSjDC0htjKmOZkxl8Og{1 zFl9pkB>D1^n^!{UR>If@+;|$xW^2zEl}k!k!}fH*O*gfT-mNkSt_twMbxp2~QD#m2V5qag{0?K0X`BqglG5xit)^ z@@~%}Xtme`gRS1yk!+IRGV&3rrkc7#nmEwOJt-0Jp4lZ;QrSBYL|`IEaJd8f0qW8Z zBeoLUP)ojqE=Q4BuNFP!rKF~KwuupyblMlJhaSM2yQ_Y;QlnA#gVDn!&uZgNR6jxQ zqIiq*Q!DrP7Apb)UY=HZ{Y5NoPOi>d)s#%Dd9ZnC@zny=IIevlLACO|qYDyM%EJaF^i?upHX%^MiObMR8hJxB*-EKP{FG%m6j2so zGE*4+KvpLfxr;0al$njTgeJuzER!U;BpNJAKanK@5MHdLkYz1EO)U`ZCA&EBoV<{d z1dMogv2UyTWTb`})>W?stgpcoSY$qAL}aMRWpBmSF#M$<^k2&4Zo*Dy@Q2C8N<9FQ zlZXgL1R76p1I{R@*p!QkG^!lK8okHjDbbm2pcK4ZrWZOb=)JfGN10 znl`VO|K;LSr)VU+^(Q@atNIV;`h^3bL>Kjg-X$3Pu}ZT6j2_&ApLVHJmT}mYg0BW} z8^Okm8rbkhvtM4mQBy1tJ3l!D2D_7mGQFzO&MG+tLwVMz;>IH9XnXXTM%#0LV~Pfy^OP-(7k z8G{sP2sA5Bkr+QWE;m!7SSMA6D3lf)ZzpH?nGa>sVp=WhUq!SdeeFOyh$h!qrJU&! z;1*eK3)GL~q}Y6Oxg5{UxKI>1Z7(x>MJ$nid>5k}mzi;e!$?i$%GjEgaP>Oqw=>Tp zYmq9aX>T2Tf_qJpB4b<}$-yzQ|O?z1a!M&2f|C9&Mmv8{93F+G_l;lC5-TsajNH0%H_n51GUwLy^7wIhTW3F zOe~gY)`$Zk2bC}aSIboAgGC5WSETs?6VuAGUFCv~AJyiX$WT$Huugd0KwWr=FetoA zoN%ryD_ZD|C{E|l&D6=GU*J(2nb;p%FA*|Dne&6)A0iw0K5U*p#$6bb9%VOqMltea zse)pU^kWayS=`lIN$5!m4C_4u@q2<5s&lr1(_~Xb{btH>1&Zv49c6?7ei5O7dP#kV zIPyx^G`vXIQrIX=ltNTkG;i%ANiMLJTDWY@b$IaXrz7$rvvD4EADJk*n&GYKC6(yN zt9`bwuMSdM0-jkHY-u_pUGkvf?I?MOpXVvqfB1k@p;ar2=Qv-<|4V>;M6axmi>qYTPoD-J>m?CLQ1z7YI`FHZzO10qe-~$4|;H7oppFe{moh9La z_0E$9KSkJ`*4e=%P4FPB;fB~JM!))W29lK3v`)uY%C5(JPWdjCtaGaaMe2%rgy09U zPQ}}(qdxyoj-=6?cE7xDej}t9KXP0}GnUNL3IEA=yvf|QUxm=HxQ}GJIh@Q7S6PU} zK}~@PUsbHu)kq=10l}NStHM@T;rz~_>~>#<_)@Nd9`S?AB9}}3afz$WoB7$v*f`&Y z5C@6B$7)Xu5sk|WHkuIu!lZ^$3te|h6U2}9?TdAIDT-WgBI$=TZq5^nG@^vG3hjKx z;X2xSqpI)jHuzo`J`WhhhXMw9@vD$ox^l9*^a_gfG=@Cpb8T>0Cwj=KG@~n%to*wy&uY<&_B(E-{V5`r zJSz;H>okoQT-`d@2pcLRrOGQsJ6#+{c&Oam*nNlo%MKcDaY{=V<~_YZ&V!9Tn>JLg>2 z^*pbXKg9QVxmjJsbU#=^sG>wAodtaO)UMu`-qIYzWJ2}3j3+}CS5JNEgTh9N!#m`~M8EeGFRp{QS#jN{{4S$^a1 zA2w398e5493B=1{9+~py&Hg@V-QD375Pg(&_I=4@?FVNXGM_`EV6myN+P5Dy)9C)} z@!>HIUwqM~V9X>*tTj46a(J6d+Ix>~zCb|Sbq?e>Er!H~xp<&|cTY?(kh|J?mB&^d z4EfhS25D6)Y-lFAyB2)%d8X>M&19~E*=1jxYxCQNZIf-ae@aKrgHTsF&zGj))8LGD z`o;5)n6VGdbB~YHunowR_CFu{JAcTB+)0=u4aRgsZ?Dh$8lh{bU5ned)EjMo>se?= z>-iD`wjpUi#Y-Q2k@;NMLKnDYg`N{#qPxYA9wuNtr-mqQ6pu1bo895k-^LcV_n0MH|F8>atQCkg;$hc1Vv?!~z((t9@*PpB$d$7tWyLmT+7nwS;_Tz@rnz%%d>=wKhUX1!c+pGCSkl z4h?CUH8=^;&*Z;V(*Vtx?FL-J#38`mEJ|5R8>^5XDOsygr~30+h@@vb4s_=WjWqF1m>_7eGIC;t z{cIOQ2w{3EAag&MMIVI;v-tyX?UZ-pA^#@1peC3wLE*I;B#9#skpMW%k!*k{oxQ3S zCaQKnv2qMm92b3@lT-lpnEB?fW7hB~c%eo^hb zJH^v9L#_(aQT99oV^%2s*jrpSpFZuAwnr!X-tsdquUA{NpZW`z&1HvwNxuBcU4VDeX1re2dT(JSGke_empN26BgC{fLX>DFV0r#}q-j~AdD zIv`x2`>O3FH%016BykV%5*|20itSnXLRJJf9>4M48W`Ug-ysqc$(kaZ+Bw}G;|W*d zI~dk!r7Huv0>h-IBAy{uh&y#(P^=4yQM%fzTAU<&SD91W=A`Qc7T0Mt7E~e{m0l6I z^njJ@^QFnt$qm+jh7r#m(6-t`)|zfM>j_h%tLV1kB8}kbvFH!1W{gS-#4Aen-AxbI z#1Mo@bVtC|xNt~~BVu<}@`d(2(sN#2MuVqyLEG92!U9Ya`GK8cMZFK5DD2}{&2MWk z@WvN`N9-{UCTYl#=(b3!;MWwx&t%!|Pj+Z-kKRqyb*GoM`E~{`-l%*LR+xp-%dK_J z0l7EQ-XN8?8a}7~9t#cI1(5t)>j=dFmM#?eAO}N5QiNYtzPj5`D3jNL->?d>cZo1{ z37iU}jztRrYkuffxl|KVn&KU zt@y`hKK=|=4z_$K@`U4VDI=4yiBgzS(znX|qHj~RvZzgo8@We|n1XZ}bOldLShuac z)~9SLxH~p;R*6|I91p#X>tG0mDhyfFe7Qg!#<#G@xhbjhn*HA^u~uMcA9JvbOVA2> zrfLQ5yHi|by)pCw8`41VxWB!%BIt?6JxjQ@c+7WkvS(g+!#?U@+Q6F1|B6hSp4MQS zjmTFiaAz^t?c`uWLWNPNjN&bvs=oR-w{L;@QKhzz+;fb6VG zoUm{pIP58QbIoEq_71Cs$0k`~C20OqvguADsA-@s3{4RNWREvQJ3akY2v^)q3WHSN zBP(7t#?)_(V19l*5vCz1?B~BJ}eo$M@y@mc^p^G1N!z;kuQo9 zwSHeGZ zIx&)58&e-r`^l?ZtaXcnyS5UpVi2F}cDvs9;&XyU#yugt-b<;MjYO*DUt(e@zM$j&g`EG{02kl4u~1i7 z`NQ(W)E8^rLjGr!q5O$$xD(4LVcra0L8;@ZGLxF^694Dk#L%ckl(RUg2>QP$9{~>dzC@NCWdbkWq{&A+Ta_U z3fDVG_#2C^h-2wD&qqjisDwYKsywxzLlRXhSyC**(I4JL>oh$3RmIfLU|d^T%$tNx z%gZE$qcg{H$PjPT#W=a|6BW{h*HoM6^XBxvq>neZN4(7Fqkbkt&;Xk4mqo1P$Xh*$ z#mT(`SFLXo(UjI_AryTyBV>Nu!!*uO*z@qDf2B-`nj03Op_of;>;dRW8?m6sW&RRAeUA&h2Mf62jE0a7Q^ z?I!gc)k&A|GNtG2VhqdjW}Khi2E3FS9sT@xBg4CdikoEn&MF3noYCQ_Vt$uDNVC=v zoaNS`@Z|Mkqha+K2w;@3Z&LW}cg?r2NRUzb1A0D3jiwh%$KZ=>T*}?Kg;<_cQg0rG zSLdl)R@nW<{l#FTWzTmXw@4EUzv;llLHEH4DNy*j=zUK5&b z3oZu&Cr>WYvy<&6dtjP!f}Kzw}3! z!#T;_pt{v*ecbTKPu#V+oL6sc{pM=czJ7qL%K7lCowioc{GtEqOQRWn%s43sCGihf zP?I0KTMR%867o9B!YsggZrsCGG-!o_)pU_uWCBgiD8B1LyN$QcmuCDc>%*8VO*JVFL$=EbKU}_p@iR5N(W<*rk-S-qKu9zap>#N4Hc7l&{ zNRj=na_!*BJUcrH6&VB37?C}SUqYbec1gG!kB$4V+u)mpB0UOr;dj5cUVCno4@n#G zs76`pB)o~B^8x-`>^7<}w`PdaZ>QX8Y}bMjvT>4EQcV+ODP6DySvlDybCR*OSl3Po zMj4%pj9&QJCj6wucSL$31H1TKdwv1j zINRo=VtR4Pi^|Cirq-bOQChOvdecc7B(!w3A)$)RguLPrl&AiWdGucm>kToU(oL5J zHWa5D$P|QGooVuNr0y*B>N^-K@!EaU>Sa@ARNWHeHXppQvITZCBBmFS0Wb^msq9Jc zT@Zt49Gd~LIc8GI(?Q|9Inlh&Q{1ax%q7iBuRS{3Ih&4oZ&Fxzqk%HQA@}T5l0mbx6XqN9k%+US=14#PYfS74 zUcTitn?-5b1WiSHLC9+BMd*XoR~4~LsqFU|Dx-&dVfu-ZWnm2r5(M0dKd8J`W+WeI zMsVuO`1o~^FRwg|9QjIKw)kE$yLv@5hdrWOmN4&#_w25wELKth;F5+tzAK(3|6sH> zcz&TrP^(N?Ib4;dfn0N&QxnUGV_OmN7RL!3*p`6Oqa*`}MnY-hXF~=bPi2ixaZ2BP zj@}v?DeQlE$EMQKBE6AomOztvj!kT${;iu1`3jZkSzaN0dQA^JAc@Ypfck+FMzwE= zCM*%*e)y2Vr;i=A7?2i)XIT;)K|aT)!d6`o>LBjd4p`x|pt!hLJwXbqD#zq9de2y; zSe6-9X1kl+01vJGY|m9za{iF^D7RlO2xMPDu`bA^B!jnx|I=Wg_oA%?KM3vRat3f$ z!jl+a-{1A8Uy{5Z3f75>3#tDNe@zp zcLuCllY9h)W3leouU3Ds-WQRvQGxzD83jf!jM9mF)(C2qPOX#et{z@lXZ^Sfd@7=% zhd-Z1pD+VQ8|!oKQ#ASt#3JcjfbBqg_!}w}t0Q5;mL!Ibfx*6dntns7usq z6m!=n5I44vSmDt?)tL*g^awplsFJ<5S32@=#%RN(v2nyoIpG6QbH_B+52J&up--!R zWt!Zv57IPw#&QG0F{=}xAs)QQx;|@BvNgNM+1-Bh z;BqrdWlS!^gG4jL&|Rrt=QBEy$H7xiDX@>KE;cFo44U_vfzn4it(q&Rmlcigwf;1@ z-ZK!DkIA)`Pmtavl@o|;%@;fud=a3yC`oWoB5NZ8uof8sR=zPrHu##+3u+=is7p6K zXn|L34IZ8CJ@-6B{lfG>d5SE0vXzc>3D*hNs3hM!n4ZKRLPO)ZC1byd!h%N9F@8%K zzas4R*w}U{n?vNtF3 zU1I$=wACvutnsj@Y|vjAn5oaA*UOl=yzZzz*v-u$D(t*P-p*x9A6QHDk}#Rh9q0zE z0~$DY0UQwM#>Yq(vXH&o8x_P2X<*eeySwiB)-Zlyg!>W6$ioh(F_#11&9;A|5{WB7F8kO#(NIHIq6d% zS)~WTd!x@~9`t1)^%$q(x=D8k6@C83Qhs>E}bBzB2u zHYTmY>vr#Sz4G~z+`6>7U)9|ifBr)%wP_MZSM%Cf8P_!N2r5)aH*&Z+y_Bu6eXU>C zmu1)6Zb(4T9T=yR{+Y5TC7J2b^DHSgsA)4}Y)38VD` z|E}kq8y0DNmb?sQZ?xM_4hS;b5gahNSHJhrcVF_ffi)<{_m*nH&wIsk8zj0jj%r`o{Wm$z2{ zUS1JwgkLwvHV2ABfL$W~cBgAu5iicV&!$BU9`(iTJ&d&ZMHTI<9xsyphbt!ng34zz z;Pxevi(_9M#?a@im|qbWu_aSD14meXq|wEyW?1BpT*MejkQg>o)ImkBwPVVnHK^G$({pyTlAL*0IPP@q-IdDZb5htr_Z zgMWvauq#<89ySebR!DU{H{E6cylc|n!dP^D>IX$Gn}gN>@{QnoOz8)&(h(VmR7Amj z9U?30?oH^miHttb1^C6|l@c3OPCxONmiNGtLLJj;Fz^H1U1`TU1k%Crk)oW70R~J2 z>H^E*&b}8EheIZ5#7)?k5b_OjSGCY$A(@S1U7tbe3D)8$hG-z}B);mA>rvWy2M9x_ zN=A{-$+r14b{q|0@esBNfc~oTc08RRI}qpl-+Tokc?ROBF8lHEU)2F_hshZ_!c6!O zr1M?AYse_XDD|Sd^R5#}M$Kl6C4^ISO)SBNjSt3jzdGgSPnspnDw}I?^^T%!9zAV_ zp_eOq$eT-dSaB1(xTtb7TSRPK6aLgXdSd%}DH=`JG@Ru3BTPh6)VCSdDP5%B0@2A(#P`iP6JpJ3^3ln_TH}zV zZCNFhIRFjNg+8~c!RKx&kM;AG&!uP0pS?@DtYj`S-*c(sJ%-(l&_?cQt3Vf9cfuK} zQ~W{*5WdT#dl>)-XdqqsE`R!~(hUm+B`X=E#MDLaimR+q!Bhny6z}cEotq<@BZ;&e z7+gQmFK#=jz9kmmSjOr&nIX1;m4GnZf9?z@q@3o)`g{D8*_OxiHnR*KU`65^U z>TS-1@#yL4QCOf+T&Bx6j?bDOD6q7jvV7p56XvF11hd2=^S~=8^gAo8=Zj8(wX$dB z*$>9~&$S5SJsT|t*LcV;&%g+UgmU#O0-xIR1_;L!l$ee(LFs=hc zbv*Nk$7h%;mc2UC-Av46pa#Wq7sFnHZ0zAi^1BG|Y+mF?9pl>hU8`d3Rff+P#EG+7h-~w{?bnaSsyV@ zLwPR!lour*9wf5!Q1Y%KvusXCIoW%xAB1rF-x*6HySnLP;6K(LhxZ4@Au_Y_moi5amk(f{znohKwZ=i{}fs ztaYWDmSuKx+YIxccJTzf^~(rei+mRE8MSPJTLC@MahsCf;C$|;+CpJI#T3uyAZkTt zD)KoWvKE{##n2GsEvohHOyITn-%7p8woO$P3AyyPk+2Xb9 zq;Y>5uU9`*`_4u9Zj2ELdkwG)Z+?K}`&61&bG%Z;G8U`7j*)*IgP`CmG_;F|Q(r#q zD}?39w5y6a`%fGlRtJ`~xl@?-KdYE3Ew(BTr0SiPhh0_t^9{&!2vN6y)J2!C7IxT5 ziY+5<`j-#9l{VD4GzTuIb#s`$bfgcOydA~tL7Cxg+1>+{Rp)@~g!sL-%Y#_nipG43 zNOYInN(k;+Qtp*fmdAl#3VFIhDtW4pAS*mM7%@Z~Puxd5g0-eR5)o@oQ$tk6=%l=h zZj7)>4Si1-ov8OoSVemb9<5C`C99bAsqwR>%2;BZni*EhQACsxcOpI^1`#tYFrw;{ zK_5gRA`h|f%|%``r#Y?DM67^^#H!i$MD`;@>HL+0%=K$LJGWsK8{e&P2yl@omTGAQ zKyl%%miRHCGOmA+O=?otOavo3Wx%cE5SArlzSOUzMchBfA?E%h?{;<~Q(Lo^7vCP= z8Q&G(7T-x{9HcK)#*nK>h>|6s;UsY&cLnpk`-*#z-|%6Cr_h>&Q^gDE2G zmU=_UY@b`z=Q}KqwQu7-7%3*r!7|I_$fdwkA-QPA2?_Y&)zG|YaNVw4WR?0Q@vIES z<34#9=c$9D39{PLMr7l7)Q9XHX;his@rrTR!$4L5`wTlW?D3vuhxS28XAJ;>FyT>M zldMm@&}$#cR`ZhSCat{Q4uObwZBhyRG56Xj@}uGYAvFov#~4{$SU-;ln=2nrtSwPK zcSu^|-wiElv|j97&We`Xu*3-hFj$tWU30%I^y$xw#>InKEiu}g5(>2PZv4F5A|nCF z^}LpuFRxo2-z{?l;^uusx#B=SdhNP}8VMA!4|5w4iW4b$oXFVZewAU2CXZcbd$~z6 zeJ$hC^82*5b_x>llhFa3*vh*lJ$|& z*ED-NCLH$tO?k;*txiJw7#sTZv}CEV?w43BC0ynD-8Bk>UPJBGEa?~4Fi5&tE_@S0 z3odYY+yw!c>)Pw5dt;A4{+5@fStB`ZCj1U*Vu)6$N1LC9QryB@iULc7_q+E^zy~W< zzJPexgF_HF<2dF7-6r28x8DxM@%L@%Sz2tH31V_(OZ{rtr=cX=ZBq*&jcaf)^jFeR zlJjLUFA*T`QrdkC2=jW2k{a!3>9-JQ%vkml%TB>S%S!<$dHz6s)%83nn< zqsT~;FA_x^VntcHM~_>lBb7s)Ec^&p7ufe!VJ&9Htk z<1}Z*7jyCn_Y-@Y5K1S_*=J8)$Gcf$$U|OY{|i6)O?~!066k8$vt5@OBJ7kLTWRbG zcVgUNq&*cKD(Cd2i1_m(uqPx29X(!lGYznZyJ@x*@vnsjr`&kz#qc1f6k1Y+Iqv@q zTKAkPiM%x4Rdqj@B^ztx9`0_{;*0hpU1plT9XYo~|3;=u`>P;8P|MSb3M^8OM$jBo z8FX5@Rt4qkW6<({0g^b|lD%=ASdt2XKj7?xxm*qjbn|vqwR+VLHvV_kUJCE9P@l~+h!J9b@u5CvJ!nQw!Q)V=3^Z6vLzStVsvDjwW)!Jq)g74~x zQynWkIH)i(l~YB2$m#QTQJ0v9pUp-ne;+Mye72II zC4ZKgFDUG5S0JSG>P-KUjP;W#W&*;C;R5NR6U)5~Wj-Eb&Gt76M@<}_Y=kyPlkwIq zqK2F{e4XzOf%U81%|?L-uYdWN=?mjKAPq+_IaZ6I;fHascbY%>x|pn)uWj*0?B%{f zGsrXY>M6|YUSJ$^S55Rq2=Eb0h-o}Y#0{b!F(m`5%Z>Ym=miM<++XQg#Z@4)%H`5W zV00s;oWA5+l)pGu{P;^Qj61S(?&?eCs7BNMbM#6DTavQKQ-+#q!yL3y;NrMK$d)1T z2*Az274G(?z{Rm=&a!N(EgUapn|G}lmyu1leXxlu|HI|tpk7w!vW(;%Xm;MRLia#i z|NJXF!DSO(06*Q2VP^9DZx0b+)!r+rL%%Fx(Clh+=^kxBfy^MGtC^a3I`PE+@d6C4 zG72+;7p+sI+)cW=ujnP9X@24aN77aPJ)w19OjCT$2VUtnxNHZ8tl11s&<$> zgF~#KGi+5%aptKR&vKU5XuCvEs#Q%&&EL<&6kO%wMcVmJ*0;!;95$J6@hwgJelE)P z6RD(AFSYoiC5Pp#NCioH@&wtoww2Tir`xjS3!B5m0%w%$*`_3aj)SW}$@CAM&mJWj zmv&=c-Ny{$1(crmsDhiQ_Z@#3nql6vt5A|34Z{PRSXni3XD09~7kQ6f( zocn%lThOsN`#O4t3d8`bVJfd4P^dI3p&b!(0ntc=%l_DJx-lYfy0bH+DK+(wxLo5< zE#4G1vOWF7-k|Pwu$b3+;1j(1rWg)gZlw$hU%g?+%9;C8y#v&P`7ZORlKZuKYgmg^ z+=Q0A>;*R2{#9*2tCH(3DgOtW!*73Vu+fao;OlJ3kDEt^wHAfwe_qR#FkYMd{#@f; zCvT?f>KBhc_P1wQ2{MQJ?=u@7_g2eam74Kn&LkV+P+3 zB`_i$+j4DyU$U@EuE})UYJS)ZvStsXW~4g1+ zP$h+dV&&+!B|n48$6=Du(A-W_dEge2KixNAXrUKN#hA~vH5UYrGe|V(+71u00Qm&z zfrw2pN7?4{GyNj21v;_`bTxpW{dr+Oqlsq`qe{;Ze-PJ*pNQ*-SHwHI?m{};Yoalt@9_b}07JM6rsZY*u3}@GEbl}L1dr7gd@;ExFk8K5% ziQ4=Y?whN|KQK?;@tOG;g!DUvB?}TC0EA^+v-QWO>kGw-{Pv9v0hx$<6~u=}#k#Lu zG<-XZFJ8dtkUM0mCwB;_mPiNyRiaucPux7{CmJsvhD?0^R;9v-7hP)^$kmfTe1m7Y zq(y);X*HXF(%Mbo%R9N+5%W+zX$ol-2M&dwk6l)#OB}DeMnLW7rSE07oYk&s_s@Be zNgyw^k1Kq9*Fz#Z&@hY3X$&@Az!Vo32c0s8^!l`4E^dt%rb0HgZi{(+uE_E1$^xXK zJQ?U1=RLV3|Fx1jA|PvYLB4?Y_f(1p&o|qQV6MN(^ZUw@;@J&nyey2mI;{+5%nx^- ztL1)szDg683}3iM`EG8~ zsdz;0?`A(n?SBHJe~)`#z(KxzMfq$H{8q3nPxjmqyLYrz$J>7WI4C5E;`wS=gv3Fo z9~!)nIzgowZbMGP?JG*a(!N<6RUTRwb6!^At#0A1k} zI;|8Zsy^agh^*4MWXoHnX@u9G;KInucZ`kDUNgBt$DG@ zOc#olY)P)uP!9r#v3Q%oXA^(XEL35{Re+QOVRLMypB`&;Sm#(Bf>M6-lTN5xslz(zGYbaJ`DW**e7SPMk2JH$k`7uq@d5Pug&}4_t}Y0m=%b~hD2X0yo*h`A znh?Yk3?ASQeufu+JCEq?ytI& zhOecpj%F|$#TEW-kpHb%)E^Q*0EHc9ND)@SguF_iZVBP?Z>52S^2I$T=Xk6 zTft{_&Xx+I{V>An=R@nfXo~A-@|46Z@^s;stSvjwM)i)P?Y20~S>;L)l`r~PvU<%E z__S2iqU=-*m3f|oF(7BEPy4xkV1}v$!i`>ey8MZq*> zLNi7jiOcYLZnZPJpmv*9vC1#ZqYc36(XH)tg+L>>SGq@#&RI&n2ORfN+fHVZ-$P*0 z78{7MrA`35yrMAfV^g}He*KxGj!zsYdpk8b@*R=@40ID7C*r%`17enXkM?}h9p>kt zWXTh$6kHUt!X+l0N7xOvRVx{&Q|m<3#%-oLK#_vu!voY~81=^7xzdaM9bJ9?8EHDp z@0+s#&ckP#N!w)6eGSoG5nd767d-bvgW6rxq$F9EL@bc5t^RTk&OrAuvl$bq;_f9h z1b3zyq~I4V9&QGliJ4H7?)?1q^U#JiMIPA89fP6b^S;)d?>Q?GwioM7bzcB<{(T@N z$zSN4bOw*th^f=2^6hs-a)6!Rt08jy=4p5-f9}9mD%3m2d5E?^o?J_2Y(nWoB*GZrU->_{O7Qdu#AF1xQN+{;?iV<>q5nk=2I6!oP zMUBwAGJY#fT6j=dC!_O^qpg)P^ibyQ3)7pUb06SBvd^veA!siaG?~h8f~uc)i9HOy zy+mKmA0p};4OQpGol##3;!D^%a?VnHkok&}Lx&4zoB zccmY)a|J<*LeF+OZMMY_R>%TmK+Z$L0{1ko@u)L#gC4lHTT{rCHd~f zWT;h|y&4hHru|VsSMaBMT5{B=I|AN=xNv%52Ww7(DR~wv%O98hnKlGXU%VzwH;w&h zCl@`A7z`kP?>BW}$!p1JH6rxr9K!^A^m(C*f7W!ycqVFS7s(HAXr4|7m-tO(^!vWq zG?~5!koD5uc1749vY)#_7~yV-91Rg)X7oGGo}McI`$ z>-c^KW+kS2#R_00WEEq@x5jXUptdiR`1$e1qPqwygxp2Gh}?x7x^OGd@hit!TMlhg zDA!rnz6QNcw-tiSob#=0wDy5#VY)D*a`jF9SXlG#Oh3zjpgv5D-X%5GZHH(BmJ`({00IalZy|+nDtVckM zy)o+z(xfYu0@08?0@n(S04y#0-wnSR($3lBD(a6&I9(k@Hdzl<5U9uD0@Gg%dObHT`NZw_ynZhs5jQtVDwCbVBRx5xg?5ej>5jkeD$|7fj7S^Q`O zw+W$nhZPzA8x~rgVSv3t>i<*xXyZB>imteBb`#C?Uddx|4JxHg8E?0h8>%3NRao-K ze;@?Q*9R|!?Z~X;?&w<^wo`x1qP5D2Kc11Mo)WCkG6d}$c@7n2{dJ*NP z$j3@oby^Mby^MR2dHX;ay-&86-tzOAbAy!SGkWui^EMQXGY}@THNCTbwwP5$5cQrO zHF?8Eg@(*T5dERHxRoHrKOJV?xJ<{JH3@BO?)S;-%KJg>a(cRq8=-k;ZNZG@{$ zp6y_=0>uef*vdewA27t6Vgg2}k7wJB?$oa(Dbm27wA^FZg1VrRB?#KU#`-H+*rj_^ zm^@Ps_&|@1#q|YNbMzOF92hGJM5Xuvj+91LsE}e+9$is0B)LDCL0XHx3Ywv6qY>=q zEMlebtd&oU{@|@fk`m(r7WvWeZyW4xLIpXP1sxbe((CZTIHMVJ#~^+l#DsaS{Bg zCR20_cb;<}o7zb9v%Lr$tx#KFoZ^phbj3I?cv}6?WLi2vOCWy5OChi zJGO<5#a2OXS@~N3Wx}#-c(UU6pj+H!k*d$^8fhjKh)zH6yH~K>W0#*ZOxt7H*hyKF*f{Rr=!BwUbgw;}@F{dsrT+i>x&` zt{${pdTkFTRYaE--L`zQ$)5L)!Rj-o2h(HY1^;|0@8J_tSk4R77tP6!)ZpaaNLUT!gmb6-qs%08a`C7 zw)oXzbNkKXlc_1ov689t_;~7dL2FT%#%g%2e|GQ6P zvTUt&<%_HW^QZg(UdixluZ=^YuM$Y^K*()^D(V^X%LvyUlSAPBgt{*2x8j)zy~^ zpAjA2iHEVrzHj|R1D(C7UMg1&$PB!P&lc~eTBfxo>aD3d#oA0_zHSVnY!Dei_Ed!0 zFti8xGrLza5%ryLs6K-^rgYW4eCyiMaIqGMEGYNcH%hyBPWDCikQP9~xtqa=44^!O znhqXPnqseZE;U|EH}np-U-)?*&IsfLSij{U_CIf4W~8+UfY!R0nS5;ST)rKT{O5sS zy%>CWISoQi$Mv__>c7|eZUVF9cgkcPdkP!=C3pR8?t-a8AGln-k!f;zdn~(#&%}5e zcv0xj=-{*D2e~?yzxCj=sAe-Ld&O7~4;F6wU6~7KCzY0}qv zbv=YGL?}ddD4d1FM3pFiGzx@~D)Llr@+g~^+EQ^)S@}NHwvzE7b<8_?Pn<~nT`!db zR{*!RiSBz1VFY<5OJtvr;+J|LmvVx2hu4!1qg;~W1BmZKmtw?(hDnRwUwf*0&K7aj zMZelUSjAh#Gs?Y-9qAr5H-OvVCBnoI+tw96Hg!#$gQy^GE!v|hBMfS>^NUMhez#-O z;VtxJv{!9l<^VEH_rdRZEY_Xcw*7!S@sKc^&J*}HPIkIaZOPiFde${@FNxXi6 z{!Ok=$x0)Dp@2BbJWz=>JO^Bh=$tm_$&&2#+{!4tM@(%X2kP7#v2;h zc&;(R9fLU0$?S+iRv!e*fZfp>U{+nxNWxZ;=j|1LJ`3zi^+g;9zqI82 zUz_v_n2f8dibnfCY~t5xW;CR77)uU(2Iy!fbO3x9n{c~tJl1^saYJoDGPL!f%bO8r zEj6Rak@aZhpoXHFX%4c1OMQQ{r^0Mh0X(YyUG@PG!8aC;%l2AMx6Xob!Lx!MJnwnN zmnN7NmO!v>oqY@R^MFAn@xx6b%UZ}X?>bA~I2!jA+pF1xx%T;U!^=P7#m+kM%EXUG zzb5oK*7oDDtGYh=Rh{M~Zz>a>yTYJk>>DLVZZ=E179g1&D4wH&3HLEHZ3(34T}_s( z{2A>_c#!b`#&ZS#GU#>PrMkZc6+G8CjLzhe3~eGRb@RGrr3r&37p6JaX?! z*`I`J%HXK#lWO)qQ)Lz%@wfHMpgqmRHHEK{KR*fAiw5_&mQckj=`*qe4nFwizdd~> zz0oOoLbN<8cN4*k3YntYb0d;bDp5@5#jqpdVp&MVC6&$n8_9~899>LrBP3*gIs*b- ziD%K#iqx7OM#~WmZ;A4vd=iYzYmz#;JT^eN$Pb9#rXB@&U#$&diE?*-ZZhf{XQQs* zg1dZNZ>w+JIgtPch(o8q2w^)kqvnqjUC>0ywhWn4rpkWhLyH}6F@E3XU+q!JBRNhg zuyoW2Erz0|_k@lrl`=Fa4CVc_cfz~u*d1D$tQ;19c|qDX*lc8r^w>T;a|mqG8dy{8hAQ6)?m@THCJkpb(?t0 zpvrVRH&B|qnjM;i6%^n5#^xm@rW+y1-`@#$ix!Fy&SXtv6?#xdR}H)a>L$X z>-PN~E1`PE_R*S{L(W==L&_?^_RH%#Q#5P*xW${N;bU2yjq0>Ul!I>)I~8I*M*2VH^?3C3P5|9a zseY*Rcvjbd)qO$NDCpCN__iYs6CjgrFi)yiL7=0ue2vGc(UDv@l`J0kC@)Wg23hplt){GJFUJZy_dSB+;U8+-WIzjt0%K9#|U<7X5FWVgfrdv~NN ztHb9B%(ur>u%Gn3blcKlO+*GtjNtAa;gNT8vkYJNKHlr*NGj-^qZvh@>iiyv6gq6=n^ZJh#$OWau^L}V#*=Ah7AsvOO2{;^h0HumfMfUh);O8EHZPe=3@@L z^GU0SuMX!f%xvs<1Tr?L5%9uWx#lNY&;-Es>G9I}OMzz|yBWWFt6F20Z^nGjhY#&^ z8&IzvTDCToDg-f+xw*Y(#P(VWx_th`Gl6xDc3HW9Jp}hKL}*G5pP}OEQe6Ovz^0$z z14hU1YA$^1lr9C^bYB@ep=C^o6C|_D{JM)C|NJ$n)$^za?aBUkP0>P>1Xh{Hu5}BS ziNu~@MD`xHv2ymHsV6bY@ZGv)?a;5Wgzk@VLknq5Ox&4&e)oraJ__E;d(b;(G-bwE z61dnB?)G+P`rlpv(UlJuF5-9CFqs%iu^Io@631KA0Tk1aqb_~Y>AIkp|lo2s4ikQA3C$1=c0ST=O_x^9* zHK{muYE!-5jc19c(j&>bJ_&2v_H_+#X}k7zeRfaD_C8VDyvu4tt-*4W z0mX>DVUTFxzj8b4CS`z*$xX*wWYdMj*QhSZPgI?-hfVlBK) z+L4Ulq2ilOk5FHVw=5RCL-_eQsemKybFXqV1r0%Bv2~ig$z#JeVOH9Ce4{e-&vzM) zo3b(!2EhmQ6n8a3vP@G@vl=6oslOfOoh!{UDag)$`2QJHPNblUxwrFRX|GYA#CGu> zy1C0p_loMtO_y8SLDu5Zi#i8F5zMt8{rgt3DXyF!C_cO^UVXAw(-)qknhh=RTi)e> z97UuqSv6K2ig#|T>*aaAPVm&53DoVF)MsM4l3`RB5K~O^;_EF3THM?29M+-gnwZ(bF`UQSuD|n!2=kT)>VUJTubi zAc7v>_|N@~38KfCMca`ga)V){)$JrwV4ntIX94@8ehh&QJu8%XzBceO=`j-7-5nyT zS@}Lu2@ap)^O6tJIhE0;yl9~FZ-2~hju25#b1;CTz4&VQH+Ui?KUc>oa>V-V2t5KH zb?Krhd8;8FNnfNaCO?5(njh35~^4_!q&Dj1~`V;b%X?kPnuTUX`nm zFN{*ACS^EkId;q1BxensD>)C0pPiaS^2U_w5ybw+?1{k1c0^8G)Jkw|Q`Ml!Be=##rJ$Xm( z0*QN<^?Ah@d#hCK3Hxf4R~Jz27G+A=ru&<=#$EZ`1DnorA;(?)HaYXPb2MP+p(H8HdC*Z`jP~HOU-2 zW~;|zxeQL>C4}?I2_6eNRYE1YS=t}hIpIkRT!lNA#Qy&F-8{kN=?d@@;Aemdr2sio zcw846yhlUbD*z+?{7C)7e|~kUNFM4vZiDof+6KN=c)lHsA;jcvnM9CdJAz~Yr~}d1z+ll zmX4ZCcTxzj(XaP+=98-A3_t!q!zsA~0myqfTU8onOFWUEduV$|zTD7U_)BM$3W^@Gq|6boh6& zHT9gVN%Ij>{*hKG@W^qc`jrbbNU_1r4D^dhoNr>`Y|oU&V|}vZ1f+Bz!V)@@=k8DV zwi(YIy5WE3dtRu(L}^r_WJ>hISDan2&9)0i#;;j=*I_#}gj z$3AEAa9Vw&J!&h23FqD}w=jRA7$I2M<_~JI^%(CqEZ7k@gY|kfp|TQ1+CzfQHYYE| zKt}}RN2)Ig2$7b%W29vt8C;t2(fK7NBplfB`;SK~olW4%4u6KI`RxLwZJUsA(C;t* zIiZ+PddyOkU3KvGE9@7K;8&!8NAWwv4-jlwDBcV|&#wr^(eB@tjmnL{L;j+CBpJRJW0^e$@trqr>{}nl%UCPgx$cXgNq@N}>K$Wr&kQDX zC%pph)K|!RY2V3Wnp+{<=~MZ!O?AnB);qqHz5f6yVefhjltX@x65q;5{ro}q?kA($ zw96WRPr*3|lel-)LqUh4%v^Ckz++ge;9*_!VAJx(J2W%xU*Q;j5V&bybI}fRz1FHs z#&r?HBP|7zH5$s%IFJv}-+?sh<*2#lvJM|N%$KZD+P`7y!?~8b441Buy<$q{P<^hu z{yjK9bEBKyniASX=3}bnv#-Ljr{GsJ#d3Vta!q@m)m}8XxgbE_n)kA95MB*@Gbs4C zFXSMT(o-QQ5vMC_M8LD@keD)JWPOMz%&Nl?4AlcY`Epo>fxhip=2WR5=z3u8f@9rdN5 zKxL4=<<7KUSshAaRqiNT+Gh>iR*xTCFEk3AJCO3dcX5%tlajmRRJuzeD}^s@{<~O* z?le_!kt}d5^$+hnJGg2CC$!>2sobE;~lAoe-c7hR~(> z!1bdzC71>`RaU`CDKP>L+z!CW0aZB%1%t2%0M8R{_KCOX8LfGh=c-N7LYlsHY_)Os ze@N9m3))e&WfaPKsL$Z6RXDMpqTU?JRaOq3Nqtl*LL{|E9>d_nodO(1?Wly6KDMk66wyC9n{hH{kKacUQT3xXggJdbCNyMyuw%~ouQ z&~I~?MCpzspHG)&{0thDLCkMnonA}!gn#OPva zkq##>WDpKB$+U#+TGgcG0KsP*=XO8R8-mtbH{a!&*!5#GL1lM@hgIC z;Kifgnf?Bqd$9p1P6gqALeT!fz5zq=%%NT}=ZTk2Y#~R}i|m~3zf)UttR6Ih6;F|L zXLYS*+ou0>X+Ki8EqJtC&~e!>pC;mY|NlHrMKO1v0 zqK|Vz;UA-WwSSl*jl5}_lL8XWwR1PXjgdpq6Ukr1@g03=6{S&!4 z^vSXKzJ(oTPQ;2^yf&NfL)o3L+S+ZR8tDcD-2h|c`f9gPi9dla_M>a54sAi=M9zrU z{n|{Hr}3u=39=$n>tC+Qc|)-NgUUFK9npUCIFkRfs5v+Dx6O`|(7y3hEsk;aaJRyL zjU^*YQ~#GeBb0gy*`mF6OExrlBevA^yO9FITsu(!Z7-LS!49?VzKNIMPLt0gq3k_P z%osN+h)!c`FI|~^B!6n$E65aZih8)y-0)pj-&T=2b?+MTwTM@T*I)uh3$x)Zs*AA@ zXz1>1LY{epq!2jVJ#S~|m9`b$en*FLsl^2uUg>0yli(R*4?#ZYmGZ}LqBB_=Gwl~O zOHER*rSEiqVGjo=Z8H>QtFpylgg8?@=DPYes3(k=YFD-l#IR^@w&oh>XOkDkPBU$I z(@ekXJN>>%#(EJV=|4^QJzT^K{iEsdj97yIAa#TAZYtfX@L|8^A%%3INDg98!MoBZ z7?G8dgl9hQisKv}Z*P<-psHT1-xO|dlzJ;X=Tm$y-t|*`tX?Qi=E*K^=vyk1r1gBR zbEc<%?Y)}4nd|7T)ZAGH0~D zRk!{LH;~_cVSrFN>s8X7D*R*mzm>16BcUs)(T2_6(r@F^JK2=wo5$%R4y*ra37Nue zSi%ui=k{$|!_|xD^RM&+0_FF|=+2|(X4vi!cln9k+4v3yu zC;mIL9h5i19|Q$Ga?c*FE; zj7?kpzD;J?6~CN@U#IL1F+R~Y#c-vAi>Wl~Fwxgqb*T1q?^xoO6@=uo;&1&1&`|Ezp zVTpJ$;UaMB*lS*xDs#Ri z-PhT1+9rcXGOAvUf1`djWwvySq&Ta$el%D3Dxr7~MKF8*|DQ7d^L%OIpcHAx#Rbgr z+ti#&YgWq3FCNHk>@b>Jg`WS#w@kN-{F9%mZeU;_e~^uiCuO1XCyb5EO}+wLi8&cV z9IZi48njCR8i)M)JOZhZSf+)7#E0`4b3jf8@jz6tH&=6cCoaJr8fMqMcDg}<_3eN=*Y>`LGHegL&Tuqj z#%Ibu;K0k|DaFL(B7J!M8K#GIa5n_Xf!EWAUj2#j&NmYwl#wrNsR>yn3p&U4#fF`I zVPL0SW0Gq5>>4d6CcSTGKHQG;L0xmnsaP{F^FA1ZD$HI3d3Cfs;zaRh?s=fQYXC$e z5vBJBjDc}b^Btqa#WY41>6D!LcW(JErR-{nkkH4+MrNMWN z3NWAi+q0*N`XAo*CtEJQkh!!>7j|Pb96T7QSA?NB?KYNNS5x=(q_UqdmDxcW!B2mL zKKxdPNG(XPP#oFVBUjpb>fXy*jMg~d4CIC4jJ=;Iie5@cm-OVr{!~c2)N@sGea_rVYLN^YPsrAzn5$0oZ~L2ozaVrfQTP0ljwZ7ThfElz zCI!u7k79S%ZE##2?VEK(1V)J5CB^Hx4g;sfXQoViVz=EC9`!? zwhJNJA)~K&yk`E*)dIE`$r(>t+m!04y~h|m&)ZNGx-ahRxH#V1pJBm- zFHbct%ntaf9lq<(Rbv?9F>22q)IH(~r|yIgZQbTORMyWJzIpW3Ay}C&6s)TY|wX{2x{hVB{sZBq#wL3&RmYfb9Ydx zk}LLmc7^3#eESuy=6@{R!&!xU4lJlg_e6JsU=-W9>Lg-P@z$agk;FH{Hs6ORkTEivI`&8o=Kyd9bY`xqT1SIJA* z)$lKL+nY8)>$BuHFL?#-GgA|~<(UD#g#yL0DvQwG#X``2o>t-wN<`v4CyLyajo^lw z?rH^x#Y+bVzRlxkLK|RvBr*ujZ%3)Q8_{IoQjG)nC(9*`Q{Pg3zNRlqo zydjzI64MO%EuoJ|zwTI0C@Fe+?cfv&Vw%AQOF5tO)jFBo()1kA2yp`Tfx~{^=rFT& zF&tIb+Eny$(HD-YMb}D|+$JH%4`OBhuqGvh!q8OMf+a8Oe=N2y@IIlPV<$<{s7|Z% z>;o@xI4(gJ9B?3nzrhfyHfVA4gsHTTzt`SY<1*jNXV>$Z9 zke2qgf%IPSn0gbXjMAYpWV6(jSkAuU9Y)E_>nQt>$LnD{j=nfGcq7hH54Bu;!Y* z)GFL?(mBChQD971lbDYE)mP@N>?KD=;>qKqad0)$Hwg&}-c&N5vjiK{O5wqMORIW0 zu=try1p`VR6DHN+duv@dVqkN;yy;E15mzC$tYw`r4K6-9eVNzNJcH7rb0E8FCcOR( z4@4_7T zA#*L=$Lyy3Ut`V^otd0Hh#Tv=RnKn%a)z#PV5btrD|Aiw?;R9f&v@xt-an}N*i^`T zsKkz%f2w?KXYVk#4bHJoNf;WA*9^hgcLrt%eg?QuB(V=-tk(7Kpa_5xcmrnBpzPYy z#HF=esnG5iVVHUAx=wzRH`f96Jg}Yp4(gHb#mFl+4Q#*0ROyHX#}1*`jdb#9JY_L#ID4$5QjX4gY-$!f`HfCj&pi zTg%wklY|R%l@47uUlg9UQF^V{w*H_xmPVwB>z~*aF5d-`ng492;M{_t`RAV_pv*;Z zQtUXZO;$jx6Mom=>`8R_f8OOm56G1)yN@w{HBoc=Z>F|qq0x3he@&iZ|4;oip}gP+ zKz+y6y-1W}l%XaE)8+mqf>9YktIj9Tkkq6}2{}tX){-iIzXTywBQ4t~F(ZcX7*|y- zNF((?a&~mC1`M(@u`|D@O9^H$wNzQm?3AR zgsz=tTGWW6+!iKHKyuW1-{jJ>Au=;d$TaEYT9FjMZ6q%wr7f|n>CdWe7Wf@?WOD8L z)OZfuf%3KNH?liBuZ6(loR`aTvk^d*MN_|ANcxJwo}(A)EMyaxZ*!?d_1O@4RHLzpFMkkKpE1g+scx?~1BGh?uLRZ&T@p5Vzo4 zqlxNUcf`q+np!Z9un-sMI^CU<2R6tzNbL|R=5q@uSB3-C1nc;i1`UUnD`kiUi=;n{ z0VHBxGdGV{SCJ+>bG@tf=Ghht`n)ULCWsSW4j*Zg%YR)v7?DZHv0+k%$P+w==WO&z@g9ix>?2K=}>;t zue0y5`cqwNHXNRy1VNV|GyF@Ajuf)3aHP~C=UkzFzb1eD64-*~O zM{1j!o>o?cnpV4Q*_O&Qto2I0`DGAK%a-1368x?+y=W3ww7O`dUIvKV`6lDx%ixbN z#VPGY6_B0~%u8vfa+@FG>)Z750jlVpsjH`K8a|2K{w;ssf}7jeKdkxB3bHvM(cV`N z?)`jQcBY)jBg#Bnuc?od2+1*6+mo_oY28?U3EiLjde6ylv8%ABDIiyApoo|Z<#=iz zEHmVGX05MJzrGr@?;1+90P^>YyLfZ6MU327d50f#8U{wq+NPc*VTPJUV$Zsp)~dS?V$St4l8quRNFb6Q$J#PjuWrD5J8w#%Vl(@Mr>!xaW2}*TYvZFsZP4% z@vTr7W_Fy#3s>XprR39rQacKtKzcF}=27P(wtzjdW3D^M`4R_7eu@6x zm_72E)l>n^)2%&Rw&tFGnXiVgdU5IRmgDdo0uP79Y_KL#$ws_-{{ecHetCtXF=Z@Z z_-+2S>&~!`N8}y3A(;`Nyj`jA?p5yRz?;AS2*RqRSEK$0br`R77aU$K5DfLS$PGT4 z!S`}rh%vCx0B$*2%~_a1|9`u?RyOeP5I);6ZB;we6Ol#t+5{E*Js;U?p%cg*w+S z7qVx^0G?6<%1*-4v!7^Qf!v@@i~FUnBp}GGFMN~UgNv~JaEhaLi>t6MdUOO@+bEsFR4GG_S9nZJ_6MP+vL9dTR zgsFpCTVk_D6fAWbq_;EkMl;<<731jrl`4;!bw?rNz{&)jC6EODPbWM;V{fGsbbVsy zO)C*{8j08UO;S;l;J2S2=eNp@AGg0Ek$=ABYY`UCS_|ZqxQ5V7G+mLzX&IZhPRrx{ zw;6a!u0J)NX(VRjdlK|!Pq|k^v_VJPsIS=-MHZBMq{12*PUvfUHSNV6WgQj0NRb8J z7){8V7dcOZSRa3m7C}gXTCT@hBYkGot9rkyz^_Igo{gRPyho2KM937@)mi4y1&+yC zlvm?xD@H?0gVbJ1p4Jjd+j=_;Fu1(hU)`NXPBG~-gpuRYkoQ+JLAM_}QJSB2)X=+- zA?p6QpZ#|wWg_>0VcAjxA^=;c94RCTw6rd>Nzg|y z(#<=3%1TRCpBG5$lWpiLk5TPXMf;tuucdUtCR6Ym{G3Cr|3v=U|ON;NWb2MdI_P-J#=f^3^-hAvrvOp-VhG~T9zc}E{rm&S&Q+8kyhUjLFXy`^d|v{ z#8^F~LRo}~oJYQ04*U2ukSvhwgOq}(wY0zY;@@5i_Kgl6jo*&rS!YgjzTYgZDd@xl zX0;?!eOUblc9(l@cI<^s$;P}JZiZ*rnn_(5tj49Dkse=C2k{#>EMOIY)giAW0ogfu zC;2^|uMyPpYZCkF9Z%LNpN5Efr1S6J2)rgY!p|9=#9qN6TWxR#zhU3oEb(4v>@#_` z{CMuF9){95G`YXN}XXfqNW|ISv2#tUPAEw!0RKa}FY!*~& zk!s24<0#9J*9Tkz-$YvGCgIluE8x2Ck)-Uc%SoxZHS_)cccp(KiHVnN6}w?l+%Apa z_sx@UgWwCqO2Vf#B1%7(j|w;E1i7SmUr}4jum0uh zeUn{$l`DnijM!xn8YO6RJY-^cIi3{a3p>MdCZ(jL$Q+DFPImrb@)|_#_SGu}pSXRH zxlHp}4XM~QT`BvILU64U<=$M^@6_tspF zgxy9{vq~Z4C?%Fs9q!8veg}v$k4^5mX)5<<5=fJ?uP~rH6$PE;V!vn4i?n@c{UMwA z3Kaf&dq({kz?hQP8>sx8Utr?SJ##<*hl)VEFE(oBiu6rU4**XoU&w0E*=e~(I;x#< zpGha;Hh`G`*VWrRBD8?EL!1Y>0Hr7c(~Gia6DD%0qg$eyh#KbFxUW&n-8a17aZ$xW zDcu2v5yBDoiyLXE6ViSt+2tr=p85GPUSqWcx&XhkCSA~Hp~B9-1{fx4tHnW-ftzr@ z?QEK2iON_sU=BJf<&W|?oSn}&1^ihKJNyQS{H)Gs8hDdFiYx zB6?$#;7s%2JaA0tI`DQ~>Ro`vt42lgdGc^sl@^Hq@w;epa;}IxCz^fosM_XaykCuF zrOZpWD!>yzty2ZOy=?wuVVt@^mW8~QT!YzKGq||!QMei*h*E}42wJ%*qIJxX0_61B zO#QM6V9}ha98vVczdZ}0VT9G2+`v|#pygMHY>5Ex1NZ61tGCD>L_Hu)e-;!NBskE& z;fXt5xw8(b$IEhZrTba*8z9tjKS9U@_Szx8$otS9Xkk$)uCM6SJ+!gD5OGy0EE2!p z;QJ%KnRArI!gkNPX08WR@H#zo?&I!$^XW*Q`c;;axCqOG+Pk1lVtwa~iyc~yiqYpK zzSllBTD-%SJF=js@6>UJ*g+V@*u59netq>_iIGj&g-gn4z&JbmC29>*X*+m zF@UT5Qf-5PZq*|eVl~!$l%5EeWn=XNLA42(0&@+a(iTHmk)*rPnD_zJbVu>Z0CU=V zZpo1YRyYczllSeOkKSL4T^joIn{%7KOYTdF2R)Puenl9-@9>owjZZsAqnFYgrm=Jb zA$IfjTO5+d#9r_D0<^2ExHXzls#{Hw`)7tv_mg;`Yta>LgKuv)$D!94?=jnC`@C`+ z3LgG+>zwfutI}zLj*Vw#LHE-{)AHYOc{pTE%!4^3-rn=LK#^VbMaLSIx3^5XfSxz- zz&>$}uS~)WGAE*GN;bS9_Ki%hZR`RZ5b^3gx+F*-ARCf3AZn_Xq0|CEb#V7@4K^bb zaoPK>(1@eH{e7bE+D1UWWb=EBbz;TshGu)J&?mKe`j6yG6%M-0aHi`CR=lFg8-b{G zPV7-`+n1@K=2~cYfy9V_uRMux# zzd7vy8ms0rw&k6zj{bTN;*ULnd?2!nBRhNRc&lh1EEeKDS~n;d`;E`%fotHsw&m51 z!<|GIk^a^ZDr#IC&7wLq3R(+jU-PTku&fc>jU?Z zr0H@4UdI#jvjrI8hI8%%C{jS0WLpLO3_}mS7 zev%q<+9zm@kCQq1RWw{Kp4Gk`#9} zmPZga%*+>SOa5%lApves;^}U)RZ-~2IJ?P%N&Md%O9{e^bE_YzI8w*VtIYoE^8TNH z;Gw*XTnbTx5hix~{i|l0f~F;Ii2@ z=WF}$G=Qg$_ly{4UVIG0Q0Y9+{IW4z%au6wsJNB>2MCCP4?us(w3W5ZF?q#`MaTNF z>lq@2kuqY+RhdJN2h_0=`VRJ%4|}|L=5}VlVUvE`?Z9gID@xCY-XDVwAu5~}aaS}? z7+)Ue1<_+s)Gqei1n!JRT_=WCliQN~TK-6-!bX#$2`O*3?D(%5XT4N*d5Q10?2Cp{ z7iKTGpp#yl2MbrzJ}MwY*FVD(Y&~|qR&&yUw-Xq}cj^FkhTXw9bCwW7`VCepdpQIv zNiSLt0x>PeI=gxio;;eg~S(&G`oo@E+YYzyzfY9AT zX=KgjiVIz&I=GLqVyUUI>;FbbwdtK}+vksx?^La6P5z|?b8LJ`jngs+S>p(6?jW^! zf*gIbOd1(QBBQCz(q=H~TKyW0N7`yQIpw8AZ5iTCF-=gmkWiK!jjf{U55~jpTToam64G=!4jYS>--iw?O!kb;pPL@%qC4K#O z-02ti{S`H}qx78C)ybWpomN2(@rsGwLIu(#@b1cfl0h3uvN`OGLkBN!ar0@J&!-!2~wx$;i?ImTd6r81{RmZ+g#k_iz zSb=<@{)1QZopjs{I`+7xjX3rq#t>iydsJElmF|uM03JuDWl2)vvbN3!&N9=?cnhG20UC^eq!BDNOMq=1E~5 z-G_uQW!9-ft@hQig6Z!&zlrzvpKEO-jo{X0*oPj-9^}v z4h=pR-#Flrj$k!R3(a&`Y2fD^_b+46+}e1qdHm~3Qu=+;9^cL8*weoTNR3B)E|$`1 z=kVq7H47qD@JU_**av*2EUf((}-H&ld-#DjmzosZFRJX zo!;nF*j>=;9|eiH*nN93_wx&WXS(AqDII(_`eiI--9|xZ$U_an*Gc`;lF;cz!`o-; zC>?BeK?vS$GGg@eCfA)-%=>P3R3-lJ5US_ebYZ#^NWNIf7q`GgZ}c<3%}V1mPj*zWfY4Y z6FD=hCM^6=^(ojd#TC#BJ{0G%Pv=ZhtGqps9t_p`Q|&giD#9Z*L5GhWQ4;|_GsP-D z0`)N1!P8tQjM&91IU?wJiZ?K8ij)De_~--kJ|$~@qY+ZgqhX9JJi zPiZ%4X~-FSijBeth`?_6Yqf{=&rBuVi0q_Lf)Q|<^4RP-g1te8oFDnn=*I@YTPZVd zAxfNRg182Mjr=UmTx(!qey=rExbQihtj=;;VrAB_OeREPIU(R}K{R6k!`dx5cuD;9 zhL#P1tFU2HvRGy!w`)+}(!94y=m))aZ3X+mJHT@Utz`C^`o;TKV6lK_SH`cUur8^n z{F!vwQAdK0S3>?K!t-(A9`?*LY!30i-D7JN0=_X+?h}d!iv3=;eRqDY;6OAy7_NxCL_%=CRAk>~WngrwxSg2~ zW}kCO@tD-!Q*JBu2-J=<_Fs3HUcmZf%Cxw;8nF&Iml!D{ws}REhKEWIdFk}Yv#Nff z3?tMeqxkb>?VZMAhEvv0QtD%C-F)xbiyf!6ky(2y7O-^&Sh#Klp2tbQ<}c;C9jVD6 za|6ec)ueIruBJZIcuPOJS9_65$#d`lJkGT^ydnV-BE>1y!4mpHCs%~|mRT!ug3|be zR}J#!9YvjKgV4d>n!m@^Q|~)Eh5L_QU^5e#tZF~#G0ooh&>_u{S^N33sEC7+Yn=8Z z{Ly!pd|Pg5ZX&RP&4&Rr0)1AhbNkpLEzHN?K8zjWJ z;)W|sS)Z36cCGnl0oOZ;#W27V(>iOBl1Ye_{a`Rws%hF^onYYQS! z^|G68sn-3@HBa5qO`}N(VT-S1bLK~J!BN%z=-_=D0&#>84uyu|7BWlgatykEI8_0L zpo6m|p4-eB3_l4!iC(_)b@?Xc$MvmMCdNxa3Gf?boU9yQ2bMCFSli!C_hVUqjb#CdDF%ilYW{)AB>T+|Z z7~Z;k76j%KJ9Z^x6;#Yum-L*Hd!xe$Ka|1@kUyvY2wb4YaD6f6E%#Fd(JRM5&%?{L zzUcmj5a$tT=RJDOH)!ZxO;c!&;qtyCoDy_iF_ohI$y|clpVa5U!N5vNtpOusMaN76 zmm_l>lbykU3MTTFdScxT@gz#&)&XF~$#jMkbpw4#4VM6(wP$AOL_{tSI2t83d$#^b zSLC!&Nd$X*k)it;!ZRrzu4>^M6+f+%0)o=l0A8cPgTNeA{Fc-;w=5-|rqJH6>~?@x zsaEJ(y{!<{M>Y%2eK)d z99uhjtzX#!JM0V-*~4EylfON(R!8VHAHqXm@AB3sH$dC~4&b0VeYWdtJ~RDSG9@L; zFm0w^;HA*-g=ZmLc{3CS{i(m%xNj((oAWG3E^7O@ad*M?CwU4@WGjQt93EzPS)ydA z8-A|IKIvAzN{)~|5y&L;2XLHIK(g@n2$om&^DR9Ovc0l{{Px^-$v$Quzpb@8-o#Ao zm@ltc9iI1yVsrt(Vi{~03v(`weBxjv|jah`Q^B1uqfW}AyyX>1C=g3uytEXU}q zve2Ns@W(<}%{%iKJah}!Xz`_4I*Hut%%v3>PN~sEoe_`vNcksFzD{6K++?4VH`zH{ z7WdNoYV&pO_;Pc5xn{cV7@d6HL`Irdsa>((Nq)#g(f-1@`BXJ7isPi0+PEf-W-_}t zcCC34e_-Qp<3xVH2D}T_F+@Z0_e8b4dalTkvt^mNnypDWICcT&>lzUa(^}kOa2b|+ zTm`wvusMNVwGJ5Mq_)6dt%TH$LU+XmjR%Ot|7@N-F1yEVWku-A_-?>M0>DBKzS(QWWC7)10TNgQQV*@N0w1u zWjrn^`eV9f6g@2QML?>f1{JdEozFa0Or{H zqnTKf(b5o8s6U?SC=H>sKWxcK^=fO;CxkDQ-!^UfKocyt8`k`nFklj5X8T;;GrX~R z|7QL0?qhWibo%ufH3~*V*9qGbpOHSA?x__`>VM=W3kL_ z%U8MiVN0irjcC~JWgEruPg1|hs$?u)dA&Ep`tWz~zotcdIV%6F1o>a;kn#qyZpX%i zEE7B=KV@?KZ3|dk={(K3?XunYA9?-XgC@4{J_K_=$t6D@FjZllu2}zyY4h!#iSVGi zH5k0c-#zHOR*EHSWqMDKiZX@=A-TaxU03hL!_;WOrocAAC#>J=I;B{YhStL< zP9P0XLw1&4HnUtN!HGNxKsggL@59}fg4WU~el_bPpS&w7H;@vVuM7h_rNB`!%c@x? zRt%ncE-5z+uQRZFRTudB_B`Rc4AicvETc|KKetMo<<-$7SCG9IxoL)UALS6UF9=cZx z()%DOIBVec*1|t-*)N^fXvDSl%k!%QE81(6lx?~)N?;U91X$TIFEksY3&D#j^N`S) zzgH9|bC|OMJwlJ@SwJfs@8)uAbCTnh7@1mZU1K`?Qd&jERJaOdg7K0Uz1Er0AACn1 zPwOj|DIiER>@RO`TD)WahWTVb!@0opMIR$v*VmWvThNFy{7#KX^wD*;=7+Zmy*@)xw@F*}4(%Oo{*Wv}KCv z4Ww!zds}FV>MK?b74g3qB@Gjm= zC_`!fo0ktIPTO~bJ>)Obt4Z#_!^*VN!G$$7d#~CgQj)wQzWBQSs%eh^TO86v%7DJV ziJfE#0`GuwT9L_7+TQ74dwjFZSo>`HsSH@ZO_K<@03`vvfnl+K)@beZR+(x`vnI`fNNcGDER(GLNs`t*yx&%PsJHluy<3 zFtA7wxrAd;IcKStHP#b0<@ghqH6*@g{}P)?h?|znEgu&BWpHNYeaA;w#2|2V44y7^ z=e4xoV5&~EPtPYdjRW>yse4r;c|}SyATZ6xWTlW4z+t zi*I$ZUTkxc*0W0PRw2^wzT^qrI`ojuC!{M4T279a3=`N--%JK9#ybj~?04+Per0v_ z8vEkLgY`c_Z8ja!@b6Y0+tu&V>;jd#@d4;jHcoMU3`0fjbldWm_+cWk+CPOuhI&^@ zNvv}b3wCsz{0FYfd5YI4KF@>+ay0I6Tn%eFKv#fNdU04r_zTu=0Yx$KOQx-p}gQS=4|7YZR$q@d} zIMmceNxGwc^VAE?6w`-Dg!!?z9O?%>cr_=Z1A{dbbJEeg1?B>Pf-(3b)G8=A&o zWndEgF=dg?X_P&sG6-a3T6+vTEIy)NE|1{$0(SgAvc5X1>G%Cx1qnrEAl;1a3DO}j zN=hXq1UBjJ21URP*Z}EPkj~La35<W*c`!uTLT z3M`Rtp!+eJP;Q`=pf#B;-Tf$-t`oC#ih0Nnu61SCO(+)bAqd`k}XIKl(%Q(S`SQH%Q5wU;<374>a{tbW0!zXCrAEX zh0Z0W_w@SnP-~OMhVPggroTSAT`R4!-hcBWkiN`C+F^PGhKT81`r>Iajl`i?A-i|| zL1Z+dHwHnuwT@0BuO_7mSnji!w4R=`!xr4s=}RT51{qgZh$!Zypln+Fc((1aU7yd# z1qQ-;wR(4=X?yiYWEkX&&uat9`7e^UYrFs0*`-)xf-Kn0Jcd3x^4O{?ROGS?Yn=QX z`Q%HaiS`qMv}v1cSX!4_`eeyiEbu*EUAO?T4*Ni=XJCdF7YiDXb0F%sZgy!qh|L^w zux1u%w~r-6ea`LTykMXy(fX*}NC@h^t5&p~5~eCng48t`jI+2hDF2P%oTurl%E&%)rSn_8dypg$Dxa3IV>tLauIqiEdJ1hQh_{u9DIBIRZ z68bixeaTPL&Ymya!tLFqDnzIn^G$!8DzZJz7&Nq|X@7ogCj>1Mq{9JG0TJygJ)MNJ zE*RH^waXc93>LGHlj0CDWa445;p*fh@Kj>yQ{%<$XB(q`y=To-xDH9<)F!|F+M+zH z+-W`MzXS>@UWqi`+_iv_U+)ra_VbjaxcX#nug>nC&~s^!Hg)L0L}2fhxECN~fwVNa zVez!e8mxT@zJSRN!sS;k@ibUh9V;>fL1m>G&Zp_0Fg=17OOFqDP%oK~v}y`Gr^I^M z)9#jir7-b3rc%_xtp<5R2QN&jYXuk=rWIGATQb_N;U9i^%wEb&Ihb?xB}(Bb$L(Oo z6&8+KOvO!(rAwOB;5x*r+_gi46#4>X_llD7Q zhv;}m03~JQB|LD6Kf3^hkeeK)H#}zTkI#rJsNXrJx*y$Z#$UfAZKUU)N$wrE+BE70@$w^G-n-g|tlVJY0P*$AKMc zi}lB7zWUjfD7`e7?cyY^>%tSbBONzscf*+MnjtnEAlCJ!&$iErMp*POaA{Jem2<1- z(0r$RAg&hAqm%CMisb^^p=_)r55@sV;y2&HK#&S!=zbeFc_el}?Pg*-ptj5Wyk_xj zBfdFKOtpQFOK1Bz5f7r}tT(W0sy>x4uuUP+lR`F5_|fhwM!aWDW`ZB;rXw}?rCZg= z1Wu@S$3&J-NAU!9@tIJaz1!1*5I6|nb8J2>u#Weia&Da2{cK3+@MtOR4VOS zqu$fxn+UMA)id-I$pH9Mx2G;_zYk0qZg{+N?EglX>O2jUsJDH{>KYfETt**W>!*4X z@h6<_jMF35bN1>d*Ep%S*3VtR=2fk-RUp@7+$PKC1OTToGOS z{)MFFc26nw8-dwRnsU-}q>(}*@N4$~ZO4w|bgBY5%o;7~LTLtXknf8NQ!roUbs=U_ z5(;_6l2gVV!+|%kGN;_;vLs7FYTpM=lj-OkY_?JNxFyLx?>Y3CCRrh-VR{-}P4EWI z`i@pbwKAAPfrObLIwC-Eoa76Yra0c0Ke^E=dQcI?pG@V^jTbL;!9Sa#LNChZKRNlJ6?KC(EOYKgIj)d7_FCZ$rek5YWJ1Gc5wiJ|k2|r$P8jGm7m8m#pCB=D2JFsXy z4Qafs+v7<+Fto6s#{4lBgIho;SDpFePb!PU9jE^up$2=lR?ee@KG8N_IlyZ}5#D3asT4NoQo zzY2Bgo_-baGBlH}SARG$;&URrH%;MvC7CO#2o#T&q=Bj!@aPZBQcWeRGAB!+TmlS* zpnSsZ2P}4jE3bUH*l2JDNL@jsjRRrl`optiDTaj*@e7D`1m>VmnM6SS<;9Stk5m~- z>_WnoaQ6vf1jNv*Kb;yuhAPVQGjE&@5$`q8r5BRcK`jF+TetD%h;sAZ9&D z*A0?01T#+itWT*0=+z}*?FDmFyH=nWbD(;z9KF46x$JD>QYxQD5;MYQF@8lmNidpK z@^&jrS=!+W!DQDV5Sc%!4kc98SpAYDoO%catrn@qwtl>Q1r>hkULhpaCks{rW9~FG6YDnt5ee~_4$mgldQMaPaB-oR?G6)S|3{4gs7-A{eWZ9qW ztDDsyPekhY3xY-|Bh-hkLBomgW1JZ40Ke5@+6}m>65db)U6mgf?{B+QJG64WOwMY` z{*UI1MXxi|GvxW~kFzmNBVvA;Q-732fU1y0h4a}D{M!d}UB{#UGRio?$5ph%ES#N6 zS)9IM@LzMpPGB3oN@*_S=?|hrm!jkc-%c>p&ui_MEh1HfNxV7&VXRB4F1%t-T1Wf;6pi2!{$rJp z7yLKyYeC4qJiad>&6@UwH0Q~7-&`JH~+v$BQ zB~_E_Q{IoxI<|G`Pw#Y%gp)!b(-)rT2@iXNKP%DoGF3ICo{MR`HL= zE{f}nn$rwSz04Ae6lOpR`^h$F5C`MD$={rBKmTw|9Ce=hr-v2*rV2+09ysY=zRxyu z-97Lo(REkw&lZmr1BL;0TeW_N=5?=vwS?0;cF!XuXRZPBL74*UN8?sf%gr(DfaOEs8>;!l>#p4N1`tX@C);%SCJuDEe$mS+ZJDDM|~iZ#)%o6V&sy0E=) zAmhYtkj&eh(;Ncvq1CAq?ZFCf#1rAbzZpGDf$b81KbbnF=C_`gY)N458Sa+tGB?>Y zTztuTJs}+2(w7~0mt&o4$3?d6m&5D2nidq}+c<@WTy1_eIdi(&XSj9t+tA7gOK99O z|A&>H-F0=@=H5P9xX?*4?NPKFc;xLIN#;4l5Nd+e|IhW>^9}B-_r&F*{hU-_%`rQz zmv+N4y28;@2%+&_;oK3=*SEDyFw#5V>c*_u6+a|D`F}NS|96`^`4Tjog43ZKQ}xV#g$r zaO}B|9IeQ+vxzo!WEa*Hf?U60x0p=R7PEe9!tS9V^35wG&wA=3yF=<0e7{bSw(L7y zxX%=-AyB$nO?dHA!WSxANP21{MEclw%DG!x&>`6hgZ9%Tsup+NRUlP?y$~OYD8%4F z%x{hEV0ZgM`nlE1NqCWce}8+6{Na8~=R?ql1Ll>hG#>RKXgU#@c6bdMv2HIPb&II- zC0TDM1CfPaNZg$#3oP;Pr<0L!#qh*bN6hGX3#I)j_KhvuQwIbXI7t{8y^=XTKo7%7 zu7|wvfxJ~ybhH$9*y0I4w`w<4!wQ(avt$#lOu(JctsjU80a0~JWT{-axk43sz?wzd zb}E@bIf_h%gUocRTn{1-9&(oG;ctCbc6v#9128fKYJB>`^S0i6eNmbC0wI7XpwJ3x z>}u&Yxt+LigsH}44&RuzQF>rb>Sig|i4=456BLWA5ZLk+?TDUZG^Xya{Cxzf-2!<6CXh|WZ75lE9IqmLni zGKr~X1O9hZ?Wj*3-MV6DXU_xQKEy#)TCz=yFzMSoWow`i(Me8LV>up8nq8yR%1R1` zZu+o8%4Z^?CoEd6zUPvWrs)=KCn+yot|Z;?5NxDqZl~0Tq`@g8@)N^uV7lCsbc^fg zg7KuXa(+;0i$o`Ls?-d!pQPCck9+BXkMo%|WDY7AX*vy2Y4m zNDEbYHDV*|rL$%83_XeITiaH{`c?3IpffAS<;uVfBV*~vSw#t9&v>-<9sICm8$Cf{ z@n=Unx{hAfFC5 zkY)PCi1O}4kgmk&6OxQ7NVgkz&Z;=-SD^6Zwg$;P4Ur4zrZ$kUX!J=n^irGM$JYna z1{Or@CcfORbUOJ$*PFO01aT<=czzgGd3>MTu&J>w8s;9P`k2&X3869im!?pY?k~37%ty#LS~dY36SSGe7O(hvU!sVsCqTmKCTUQ;`Sw zWr%$n;q$z%nAmvW%Tw|`R{3!hqM^)OcAdWaP);Y`!RtaRko{&pGM$RsH+NzC_G&=% zx|kshMvnI{5N7%H86qvnmkOt7CQ9o zZ}1s>UOs}A5BQ6*fRii{CQ7TvF$!cZzmHWgl0AcHJ#i*Sy%leAvPIIsCD6{o{Py(y ztrVG0GRd470Lld??*&E>20>L&hHJm$#EccWd&q_r$iLUOhE-Yxk*xbsHt$I5nwf35TUkbG zhn#)1-qYgg(G<{%5DG%5ABaQv53SW!(xYJfiK{L~z-Z$8I{ANkHJrunlc197ILUHh z52`+Y5%{_lqA8Bi9I1LZfe&-OYEkmi_07-J2lmGxpp$HC(NHR2Gn3PS-^7uk_VK3? z@2vR8eEF^Jjfcp;-J6cU#yey) zva9_@1n+c@Y`oQYZfmsy{lRa}h(x#ii_9L=+`|AqY$3f`@MJL_pUBZ{5)PDWjumr{ zbWR5Bp6-0bGW$D~WE46!Su7u{vGmtK6h)uD22y=0&P4)xU@-#oo@nl!x?*ErM*v9>)rFWDve#hY41Npt)ZWYTH;$4g z4!Ktwith|d{!N^C130S!?q%}dxL;kmn@r5?&cx4(-7KG;XcJ#n(HyyYc%9wx$j{Ee zS5pcZk?Sl%2Gstb6a|Ge9=~LVqa5QzQWT?(w86~mH-kHAxwa0>)I!CVB_7+i=0**g z)Tf+E?#?80b$)djukCG}X=tp%nv=8^UbSub&OIk3g?k9#5laac!AVEakEb6L(g zcWao+(T^^8#Eab5X4;6?PfmBAo@(v;Se-w@Ac76E@}zb)g{mG^=4R0qYZEHBb7g+l zK>>9{sa1t}Va6y%ak>n5ls$&TcanI+5ZP17nG*nHl?-pv3JX zaQ%RCTy*j|?r%y*qMVHn+*5{HbA|@zb#^@n6=q@)_}V8jn?!orF4a8{`y73Jqr*UsH; zj%vLUqh)kodXt21qEB8$^b12}ZKZd9S@C&jRB6o#7!4;QF7I4knskBDtw60ql(~a| z&ZGLd@(F#xagB`IkFn^&IC7Tw6Zda2%<(!^^>bk!Z$bew$1f$EuS<9WjKu%I+#laT z6J;P_6!9ORAfl2|_D|K1`jjcTPaaX5>&mI?Jkq}ozcuWc*EqkIET8|>QNs~(q@)cj zM$A2E(R1iKl~^GK^`7#$)n=@%2jd*<(&<5*yDD}D_?YKfwI{_I1t3EoKh(;8A~j@M z-wx7{$fG&`0G45%hJ+|E+h3-E^3}@Bx%e6N*W`~32`a2`>RqP8+Mhn=maZXqgRI*Z zyH5d`W&Up48`QvYJc5`3YKFRG2%wyPvxQSls)iF*Onf+@%6g5vT>I?t=RM!Gi#wCo z`y1sY#TTYu+wb?+_h+*7JPYate;^3@8un=iHLs1#Ki-N#N{dRLulKj(OYhg!-fVC$ z&^g}L*B)H-4oHYlX-XagoO?jnpHPL9<;??LJxqnn;Wx^RG}w>XJ3l0JGaSFD?zBmA zNxb^M7qzQMfY<3WY1&o4O$NuqD^|@$0p8bJPsWxLOz4&9+_hgUXE9t`-Fba>a;5`o ziB$nR;hu!*1P*B3U;soL#QGylU+1X`rMw+WwQeuA0t{BDjV|DI?B7p0Q#DdvV#afY z`@>%j_7@w?Bt2P>gAhA&4usoiNmCTwHT!eiBeTUyTmi|`qRElIEJsdgIuwlS6~tAJ zoS8+Ad4W6#7?i;Bl%@~FXBGn@$p!SnRN>%pjXysf^?!1;KQ8x*ut!^h#4-`?`UF7 z1EA%1LYX2_QRaKVD9p90Mfy+)4ZgRO_@J)3I}pm{2{{k!147qviR4JmKxBf6tEGNT|vf3PCn0oYM$YE%l8|f`8xl^XJt@VAR z<^SP5kIwLaTxp|(cnacT4_7E!LD&BCWZQ#I4U}IB}tL<6aP@2l91pt^Cj;s`>G%APp(W$6*TULZb=$u$uxWB zG8Tkv$E#*E2IP3`GV9RI1nD13xSE<}ZQWDcF&LM9U>X1pqP4wloPMb?)|R@|1Nvq4Q$e zJk~?b?`fcho(6=|-JI2UG!{;}O~YqK^>#b%xGLWj-fw-};U#Kmmz$A))^So8_Rv@O zkm|;4E7*a&%k;;=r?efvd(Mt#pLX?YIm3x}fUv{S+yW=9iBtLj(+{TdYlR!_y1S24 zc}ik|5Z>ywsZ5>y$A{f)`cyUT4?RR7@%?L&be;VM0DPiV&o9k>wp#S~8Al@ZrZ^_M zpa#=C{>Qdl9C^So$3_5nnz4ZNtRVn_GsWMhY zEM^guD=GPp^F*bqpGnh)Q^er{j?Bf_jJ!6han+H}e|_}6Gw_{ea|hNJbUuxq{vc=o9`}dS9G(eqP3sTIW|$Rgs~dja1O;()w7>ZP{8}zfJeF9!hUiK=k&{W*_9=`op-$Cil(isvK7k}5MA3uU;qBYli2CD=v1^RU+sa) zLVggL#4`T@;R5?&f@dNFGf?`wpctDPLB~=5(>iiB)h6gp1HRpI>E!Q7ubT06;Uv^c z7i2H}Ip<$M#*2&t?O6v+a^@<~!lmLz8vU&K*$Pf&T*$3WuP%4sM19pMKQ00QUY^ zZfdRkVauDw)ibh_w&cAxA9$1mS6ZVf>plJ&ajj5gof{6PtWuND+UlGymCU;C5(oaq zDbq5!2>D_Yu%Z}Hs+q7$d3_M=GMw$Hp5BpRwnn!)w6)hH{g+S^_R@9~jW50TdAyv} zv7qSRGAd6a+yDo&ab8lx#TuETpHmT)x2;n_S&n5bbc}QsM`5tFZe=Smz0P8I+$`U8R1sqpb0rnl< zCyX4?9WVp5@C!1g;>g7@^Kwe(b)4$=d9NfdrYpcK1&mZUBp+nRnYJjaVc#9mVjzNd z-_knX1~WV$9>|ljFS{?RI{$zs1LzUmb;q$$F$@+1gr#jJ*mnnW1(8xA|A3PXz_L$) zae#`OYSB!SVA~0?yYN*TCE7YJep<#`cumwFbHJBFrsCsu!Rp^54kozlMm_oM_kwRl zgqMMQVdEG-;tZz5m^V2x;eUwRF>M|nCCsAww<#4XD*|g}7y>^RRY4OzbumY^vp}60 zdWbK#IO=zJ>GwcElA_F+y%Y?BCiw=V-(dZ-QCNko(CHEKxeDTMVC(Qh(ZB7A!IJw4 z3eSF4hde!D3OXl!mw4@m-*>TJ^pN`IuYUNQ{&k$^(;gwN07+VgA&2r$&kA3oA3)iQ z!yp_NUjnlxjw!2s_U)pb?s33$eq~uc_wRBJXivu)bM=8Vb)@IlSDfdauL3l4Er0Fw=&Aaq_(=6d1mPEO+~VwP|wlXj4Es~&Sa@n7V=RY9<| z-CA~YbHn1`wd*^Vg64!+)O|}dB;q-_Q(mFU3)n2 zi8Tx4vVZLiHu1=j$b_2_e!2P(!YCHeR0Wh_l4iz`VBzg(n{2Mc@%Oaql+0{oH3^So zcJ__xZmuz@g{(Gm2l_SEpUzR?Qg>Taw4HDw5}R z1-5rA{Efxo>)7t9fFGml4<0}$f?k{wqbVySZSwf_$~PP>oM&3oBR+d4d_^fCIVc2$ zGwFp<{m~H+C;v0Lo?cc4lCR;%=byS)5S1!fY1j?d#psxF`JEQwrU$l%>iVpapP5o% z2}jxfb)qI5b-{j8@Wfwo0?;^_Y+6ep8E6le7FCKWP5#2TT3^MIM%kj}m66U1S4W(1 zP(s7!nS98RTRP2Ra-vE|j#gi%Zl+vQhmteHiYk(ARXwu_C3S;iS(n+rbao&j2}|wO zDfqQdhV|{z`P;4Q&1ZDROl0Im=B+M%@RGS&{o`%aT%wZxZy}xEH2v3JtBA)Zv-LrR zk^}(RxfKQp?m4`B71^xiBHe+P|Uy4CWT@e0TLy?;_%yw@}!fira*d&;LaQrh%{KSII*W zeG%>ZyS_d40_TNLljpg!$AC*54flwDbw9OnPiSy8ZU^VkmVp<`GmBCmJ7xFxu??F~ zK{xfY+hw&kzcMf}8T->-Tj8^@;MXvL%(!=ignuJo^3u#)%_{oC@U!CMI=SbAhaRULyd(5M=#nS( zL@MAuCY@DnS}3+J+!kctcUb<25beMGjpg~C zNBsM=VG$ijZdhr}H?i(tCh)huO%QEJ&OL5Nlzv$A@Kv=z-n`>Qnb!}wjE7{0=1>3T zUr=BAQ@r02cp4tF=iI#SGb=j3JyYH`bsD0e((gr4wJL?WcRx_NJs_Nb+^=ieSBYmd z5$BXDCWP2mIF2sL%kOFh_wE_P}LxHQ}x zmp02^1@6hbJXoh_hd?nqpBh!Bkzx_FDJ7S*Dt_WJkV`_4H5I#&QK5?%j5!#-c_GDvB7 zo&v4X8WU2eWY8pk{HF}4(NAORSAeLV5|AD+)_p3e#BrB|U~%2yxs-H3>z#6?-_Wb; z><9qC*=N|X+)C^>UM#Cz!bsSRFr-BPa0Q~b}M5^mh=UhxG0D)L~ldFOi*^gKWheG?`aZt4PifJ`~wp%;6y* zfGwSU7K)SF6X_h^OCONI^+waOs0TrdY`hrz>dl!Fx>V-EDVE~>wc>n$Uj|azYa{%d z_(8!dJT3h$SA=FC0lRnDRpQ#f?j^NYAk>zG6=^AP9bK`8t#1`>t~l?ELr2cWS*)0j zx6_|?IG#tLyfpX3_neY6^$*o|N$StjJwqL*YIFNtdbkXS@M;t$o}T|et>@Z(f1!5( zlE!7Pt{u!NcIYEAS%iu-TMGV2ch1eytyw-HoYq!y%|@7R!(yKH%@MX^_izP{XP7Mc za5D5+~?W+}XdnlP!}COvS6`87vxhN%dO1hmC&$*$ns`)nV| z`oT+qBoUG9f_1?Dw}d7kY5xI*{VhuwiJK$MTepmx+oGo>|AoAl;tynM1c-<=o964B z@21@zP?wg)Svq_^gPKyPHdgQ7L~_$+xp`I-A05AyT$4$u6?t+7N`}BKnHeSluW@)Y zoyqNFAbNgp!E40!1I0hl-7$&T`VFchJU=5&UQa%}oxO1ZR+mYv?mTA>L=3Ziwi%EmMIc21& zDjfWSm@yvhuwShF{#~kn>e$9vuDRnv`*UHEOoeFX0d)}leKo7EYibhKH41hqeGddE zHx28Ty`*t#9+C$p&|Q@QN97&>`2pFmw+mAKVjBO;OmswY z`|8$T5#!#Ml>@#d2Db-`LQ<)Q2i`VQxzoB@;Nl44xv2pYsfcm7RKP)>C;{3mLo;q7 z>Z?uuyW2E{8<)2yB}2uB`?vH%97Y;a&)nvI<(|C1oY~YaJ>Kyww$HjJOLV(n6=SPs zS)P!HJtJ_EV-VKYToS$-e9KJ-7%S~vau<&uvQW{wXKozE_XehimXxbww6sSu8WT2! zEk3`VUA2Gx`?Id@P6oi_b&;DFAU=>vSFm{@7Y_0$j(Qj^1}Ex zKYc>82e=YtAwU6PuvL+gF`eJ5QlxfeoJ*bpa#5u4mY8Sy?1>jKfvw0tU5V*90k4bn z+?2~qzDt-eK`K0gjqUgg2F+Szh_`k&sZivYVOe6fl_qDQz~;n^s_MVL0|KnHy4kX_ zP*Zv{lCI>+2>$9n>-SH^`5!z)Q?{jzkiJwg=Nb30?3auI(eg)CJ6k?r z9ya>8Ab7>H8`x;U&iK=n@fD9QMH?42m&<)IP3Mo_7suLvVxlf#h3z5uJ)O=!jPA?{ zRrU+LG>hKT+A4EsxiQr{8>7#_Xz7Xr|5cCWM5F2V&p#12z)%R4?<}`bi_b$e!14-u zsMvb@{xQjb01u_QD5UO!!b8plyTh2nFj=_$BLH%fP-RJA0aBMZ{;f>mN0f{)x8sSY z@sA~Ah+kc;{xB^XQ=)=hb6-u2?u#L+^XF=L!Lp2r9ivCR- zJLxNxr}4O&q+-vw#RCobn=IAOav?loh{b0!r_L2Lg!~!%#Pdo79ZSTA^)z=)sC2Ig z6Gt03=Qd)^+8)eG%{fO~ss{n9+)wR zY#R0H*~n{;N4=gNodej9`1Gt7wYQe+=Lu1xyc<|`pH^XigH?I9sSzQ@Sty@cAU&mh zIH!HpTHbJGX*Ri*Ar`vmWjHM}NR>8nb!gQ{EjWeAWZiJ?chVf@jF&SIJKeeHt3xrJUdMu8hIX~?kww+OY;YdIbxjIhGO>%z-7u=PwqlE10ykH>zu<>0s^>4)75$;0JC z3+7+y1hTDPs}O_`u>YFywVFIt2_C5;^0J|ndkn&fGCY2L(=#c?!aT4?_FZKupB}0DZ)Hk~q)3f2ymi!2nI28! zp8KmAs~`#P$!UA8W-p+x36Z@?`<49iVYINfoy&8jls)!UaJzR!A0}C>WdH+5@Sb_ahb>Cm zRi4oq+K&Uw_qlI6_$M^yS0+$QaYzZcpULLOnX_-FW{VQ{Zc1vrmwek@O<+q#rM?-f zshS7;hmc6$q*{cM?FJQRL7)HbUZ4kw*kmk|P{mDC&G?FUFy}sfM%C!n5)Crloh~|G z17Cb$Un{`ExVI&lGqUd~Nd^8&>wj=flf zEAfVpHJw!LSJyP!zq_S5KTT*5cK8sui4@~A+^ao1f-@uzyYjK?F~9lG+QTyMy&(%V zcIjl@FVJh`O20YE#bRY{_WhQo^vRcD%V_9=*ezUT2;^qSJSv*ZcM!b+mYYrJCxft| zQjBE3pS9s1>^@74TNE72bujLA=**X}wM0^dw>}r`)@fi5aAT>{e@kHnYM92LHrk^< zL3Hb$Nf6eCO=}PpG0J_0qZ-gYC1my865-HI*qjl601KF;Ih;w0f+sB)>srJeAlzhB zLd@RzxVL!Ch<7Ze+XrrI>^OU1Gv2|z)UjaMOzODl#lJwm++9u z!ejkmVamG%toTTE_%^P>2t0A-#q-8E6w4z6sR0Uk1TxW>Dng8RD)WOQx>`}s&gKZ? zRN3Cz6~AVy%twu4%E_4nIJk=_LKKg%PHyPYweS*3Wb_)$CJRsuqEAB%Q0BX#O|#j5 zL0K$DM6f3|o<~Y1BsV>_c42>CD-CD9#LMKq;A#W$;z10Lax&Ptia|g;h!4?9d!hEi z4d!T^d4@p{a4T+>m# zg?vSxB9QC>Qk$@fYhZ#>1sbVm0C81Q^|o~M=YUh^3gJQJSxLEvXr>0w$Q}lP=%={T zK&4Hb=h!#d?A5v#5N3lce!RuutP^AwQcQT({UWK;rqZX>*dYY_>cyW*uFZoJfuW~qfc-4@q&K=r2u5IoTr99s$Bm&*K(Cwm zM|3QlF+E=7i5(a5sZ_w-=_78}Z)N8AC!^fdMlv7n;XJm$ur>o-LW-D&@B8B-o z1`U<1kn!D48e+`w1l$z9_w^4!X$Rf|mexG&_jAvQ04S#GiMmA?+yYyy;o-47*D{WG zZ_3Q7?TcyH8}$%z6wzno6Dr|W`lb^8U!5&Om%?VS8N0l)&I^fnVz?T3Nm}?_PliCR zQE&VBjyG)a!=rtJLs6o`iV3+TD72j|BI8_sgF_?{rPr>WnBCea`ua=hI4(2su+*b; zznaF0mhfgk*#27@vR_4F=^+At27*4IgLf2QO9YBH4iyhIoLH5E8%s>CW`8B}U%?}^ zTMaE2eM3z>rZXb|x4~)s`*jPo;WT3wvMj0g^xoulOVee-72{y_t2w-<%_kX`UhxtA z0crQn($%ZnB*TI7c+3Wd2l{pcX%WiNx0@C#g3{e=qNLJ*TCvw724wvNe{(D~s{A%` zR>voHwklQ#feSpDT&I@ofhb`=yL_k)?a15yO#{j1D|YuVTl&8bEJewbVxKpyUIs@Y zh}9CL0|*1>WN<%YoC4_>&-6(85X-=iLn|ggsI8rJc6ZVffm7#3b)K`e=h%S7LlEWj z8Hye_$FA*a08jkwlCN`@stKv%S-mBCd2E`p!edSbu1PoJgH7%QiI=@ZiY}q{uU2Jb zr0)`au6~&GJ}cb3Jy{4unmE+jf9?IsFDWX$|MOI;XypyH;r|w=1iASiTtpJvl|z}j zg{*+K!`p3DP9*yp1-rL#U?9@5QI8;INqxamEPOnMSf%K6(OnGlye8bF-TFWXrNi~e z+GF7LL)k?!3+*jN#pm@KI|cG6{eG_n{^l5Y_*5f7KRPs6=Rk+9etgxy+lVC5eMz$S zy7YY-bX0zIS5kT}eFe0?(`-=|yM3Q@&+t|yj@(s+ID><)2DVBs9LvH&=~%GS42b@)p8f9Dp=USZU6f58E)l8~Fg!RKfBo?%Z%)k! zL}g65Yr;pu;GJcWjp|FxM%;L|tt6fj8|3>aIS>(_r(1|Bvt_pMR6~c|C=!hrW=>Ah zU#iiC+e1h0b^-6Qb|Wt-%B~MQU0p1th#|bs4U!yC_Fh<+RU{hY8ql>y+Z!4}4Sads zCz2<0;BoTBoSKJfz1s!tYhWN5X9<0QV$~q*i{%(}N*_3CU;5L*^m;MQYdBVMOQM3l z5TAQp0^-y_!mD!&P!)!Jlw4TR_5%M-JkJs{M$`uSvR=4bY?l0VSn^_;d#|8peg1uy zT8zPN@YMLQRzH=i1{P1hNi*N}oy)s`_iHQ# zsmT!Pyky*UtbJfc$K%PR>b0TaY%ciwf7X2e=U1FWm#*LSk#GO|;{Jm!1>nS@*P+j_ zB0CLk*oc!? z8HxgcA5D7jbRc{2nKR-E&de|36R@DNaIg&Oi5XsWw2kG$V;VfPRDfoz7VWKatvqaG zDp`S@>o+MF3%-0`z0IoVC4kg#x0?(sW5l9s@F($(4~A} z94YT>M6TdxUw!sWWoG%qa*wiuh`napv5zv!GelDS;))R#%yWun&bWEIL~>Gxi;+$B zeln;bFTXH4C)FAH0&g|`R+$r`mi)T+-!K1e*aJk0$BA$Io$K4#rEgGPy|yMaxf%W% zTb|RLozH0TZIPQI_HcY*J7iY86kng@VUO2wp~>}snsW*iw^~;|ZQFOvxh1}@7_6q~ z;{V^AYn|dp3OZb^?e^@i3^kqVWgMpa?ojQDT_2s*{2#8qGOo$D@B1&Jbd2uUkWP`& zAV?}9BGNfzD%~J0wb3D6N{Arcog0jtpuj*nManT@L+asv?)$o*>v}%Ni}QSWzBta` z@jJf1Z{Q0Uxvx&q&nR0~65!d>3$V^sxV~ljKQCvVJ{f;iTLzkWK@G^)({OGET83k$T3DQ)L)vWgwOhqpxf6PRnJHX}!S#I(DqFSueU-Lbs5Pr! zB%d_*V62YxmXfwj&p~V_X2x^opOrtE#5fqdw0zEX!t%;z=D#y!S(WMWBB5VVNxWMS1x-t(xxxt#8UL zFUp5rl__&x&%LN96W915+^tKiaZ9W8fhxG11FAo6K=f%o!?j+ z;MHzl@+`4^##KtnDEW=EbCt86F6RfQR+nUXu}Ag3RdjhG#*oibT*y)e{fJT8c^cp|!6J$(GJg@vsaQCw*8>y?a@3e&fag^gL)D6b5; zSF;+FNitrk$?IQITi(a-t2SmHr`ozz^7l{mNV5siWEi_~Rg`iVxs}~saJBTRM^%1Y zOx0UnRe;3dw<_Y)VZ`ZzZonO(V}r%;#lD@*!-{W<`LL~|9McakM&w~u>f#b4631%o z=|o~m@Z#K=NAlXoHos~Xa}_&A!nPU?@9q39UrbJn9gpX;@;WN68&yu+z!O;HvFk$e zD^si%hwZN)I2^q;|6V%SoNrPnqMF|Nlfch>AKd8)-bT%`GiJ|t8*E(`vpl&X4I(gV z*RPvy_InK%k12$bGN;FlSDG_a;tH|Jw)x&oymuaTw~uk>c>pRXc*S3d?(}v3*(|Z} zO?=@YC?TsFCL7B~$2qb$E|VBZ^Y<`BaW2uiE;Evqow}pW|2Z(U>~~G!!IPz-JSCc> z;U{hrJFDVQ{-O7+W3}+^@mrsc-83H`2O+KYr{OkPUV&haT8jeLub#^t2lb9wa;LA$ z5xA8I)^Kb&9JY<>CPgXn|CYHe(vwPofonyc*|mog+-&f~D> z+Vs(5dyK7n;;3R4>HB|MN_>E}zdh{hzAIsP>K|8by*opD)K{^wrPbHpK1b4+GgViM zd2S@C>N#%r_To;kd3zG^&CV~7{{^pLeCOf1dJ&777Qh!|iYX;9302TSN`-7Sx|B_g zi3p$z_W`kaR`Gh#NWs3|+k99<$G8iOmeeM3{y76oXV3P5Q8k!MRq#b9E>N$qvaVxmNK^!WIIEP)B1 zH3B*XI^#>OU?I;DbU^^or5WyeKc4i)*>{qt5WC8_W#1_In@{5zE|X((w_p-8r*MAv zLDP?Jj>omSx(Hrc^>Obk0`xF-f9U=zg}V)bi$&*qOrMOiKvYNE<9kh?wKLrKs{v>m zgqGyRf!piKfTeiB@yj)mbL(^9O=id^aj#}QN_s`!w0MyCw5KS&IZxSMrp)g^2Z0R> zW(V0GXSZP59tHFHZ1r`}jR`TisyAnS8d_l>y6D*dYG0Fd*f901BUPC#b@>mH?=zoW z9l#|?kI5uNB#S6_0T3!Q6rS-)PM>qn#aAEgP{e-O7LE}PjOjIQnIJ*26V7QdCF zPl^7Bcah{FxEipamF7c-xA#rK-h={Bf(uhABs#Yz>5ss}dvCV}!Ot-egQ&nB`pMAC z_-Xu*NtP`Kv?#=n=F7_kjmOOoemS^Yrx4nMm7f^IJ_7pUpR z?sy_Tk|4@VuLUj2C*rS0-~W6~Ib>HRyZv3ZY~xRiNP+G}X=HlI%qJNy-$=*_81d&l zk0@+nYHI&;lguaN>|^(Fe!Dm1^=v0^T`KhwwVLm3o3`G&HLX)3rsaI{+4M(AcUfv0 z*)w2q1N`khrY5RS5WEl1uZjfZ?Tvz6(oP}QlN-2-KjK?oRE_?RbQZ_r1+Mr`f%TS% zNd|VV2d;9y|Kxpcy60(JJq0W4ZZ=aWOLgdV-4LsHExz}uRkpaKnJ!5G3sOS%MR8bF zT8BDZ#@omMbU1IJi)}68@|B@89+NNi>91W4a)-tw}5zZG9F({~Se9 zUpdBe7~80|@%g`(9??Z2H8fO21nAC%-vAWZ(-V)W&Lq{o>UM5-8L8~3)^}Pp7@_Ms zH$=Azj&I>J%L7#z#hbT^%Zr0#>(hccvR0MlD{qong}6mJOqu1?f2}W}>RaV)+l5ut zJ9Hc8S@!z@v`iy7a#3HCOaWBb=DcbM&yI{2bzr&D3%BBue0hnyELW%Ge0>#{tJw** z5!sqHi5;0Zl#f1MUOs8>qA(j)y?rOFJ+isSUGiFaZEin~#2qiq<<@7>XU+8o=PM7D z4wI+v$8=kIA0pioMz}rrMx5SNI%U1~-lq5J20{@fD0h1YJ?C1&D>o>@RpHl+SAV|h zyjXHnqWHu`B>LAv!VkOhJs(JxH?#G(rIo*V*Z9LRmetsC8k$tOJpS6J44k9LRk7i% z>kFl`T@MZapzW~Le{SRd=$We)gZGlT?U*lqvlMEcHTnMTS_fjp4NW(*lH>}~h8u@G z))j-J!p^$8R&e{Q^^?br^3q)dHtrMU2YFnQ;UmX?ksY?fNW2y6#tAlHn@eCw4R=~3YAmwzuj`7WE`WzLX#VAO2gJ|a?6fjq zO+`TUr^^$244>~iFN=T8jQ;x>MozS|a#~FoS+GUeNT0%X_I;m?@2khD{;?(D2zcs) z_d#Y!w5yOwrKBAN|HxYxSm_j;t?gE|xoV*qs*nhL2{Z=8!wjw8r`VFV+*xXmWPZnd z788DtQa#POS(qY#4*nx|#Eh8z{=`xDjyDRTo_3xGEF4CU6%0Oy0-pgQi0u$6IzgMC zs9zu;6*%K=-GLUT^NR!z?fbt(V^H3dg=o5$fK6JDvx}ZaFGk@s+8p!?@0o(J`m`Zl zn@9==mK+NXbJ(J2(xrDnoPz^+leff#64c8*v%l|%`{(0l1tzIvNuTkh=h_=o+bf*i zlJtqfMv3r?l{@ujBVfRJK<^K%NOumUKMkK4mYj~>pi>`CxOHpMj6W5CfSF{BkUnp* zWCzo#0h|wHK=k>uq%lL9etYktl(haTCQWVM>8H%EqOoTKvQMJy{Q9;!MH-&QekPlD zZoHM_I(VxhuBxTQ8*h%1q zf=;-B0Hjm`7`&?YdDw4mFeiM(8(KI^Z!{?ZSgmaCCUC9sC8XdHmL8z{KFSSwJWog@ z(NS2yDYD!3E$`HtdKU=|yDgZ47JG>HUxO;Iy(#z>CN_Um6$_pCM8ti(KkyA7y%83C zluco~c{_N-KT4kjz4Vq7ms{|I@W8TKx+u1r>V8;jQc_tH*?^%-3ikD!h2h>a*wc{h zT#9Rvp37Z4a^ZXqp{;VBA-5%u)>T*Zvk-889r~l?M0@V{kle-U<$w8>>JIA~Y|r6( zFDu&)TBGa@&4L4hH{nM)awb_9pWTR<~xEm$ixHQPegtiG~Kz1$4%%`H_Tw}@U>hm0!D z3uw(KXLoDJYg}l^d#Uvr>!n9YDz~54Wg8|4(%hT9UvyMEdbURP#CuVM>jAf>{JPZ{ zeSczCjMz|GE!}+~+SEu`VScq1HwDD9o~?F?)x+}PP7 z-Ue|4%DKT3@sn{IzmWMA+3HutK{yTZ%#=>p>u*!p4(kVJQ28vYM?Hwpz<0qlKi}sk zm2soa3If;fFsP3NA%A?a{MFy`@3d&A( zgq1N!jCe2Fs*p%HBaf;Z4PIVMDvUrUgvh8EbrW~2mM}r`PR*%DB$jBMe(gS+q$6oB zqn`aE7s;C5G5M{oacrkb2inW7sK z8uyPSG!%Z2u)7>Q4YlVtJ>^`J9{N+ow6x+u(gO*W4Y=AYxX2H=$TOokB>=S1qnD(E z2^0QU6%U@zfAH(1IvlBRtQ7k8GTe65a6Z1=E^pD=lci7tTg*!~ipz(oRC{Hp=lzra%-ddU9}?DXl0TgbgRJ!Q^e*Ik8o2L6IKuiX z>OIf59ns&DdfHC!tOTGiFW8>_zwK>DdU0;3!f4xbvZom8i@M?1^cDusM!qv4AAHO$ z3=sT}mHmKpzx5hY0v&B4{EnXEr(=P~5r=F9Epsy$oKIzgT`f35#;36D*MEKJ!hae> z)qx(U{o?%wfNSuh5CWp(4m899z#dvM-sMyz0JByC? z!29nRqeEKm{4%NSI##7XqO-a)fIs+-mx^;RLM0D2K3&g9Z{8Qv1ih_j{}Wn}L7Nhw zTk2{w68F8u-a-17Q9KVnwR-LV95WOje%J3XvP~-+_}QM=nYi|hSIo7pR3APxxxwu^ zfRTI3Nd+OwaR8BMRZegoyw=+=gU&2Of2Mhz_*?Tmon-xR7vZUt>Bd4`1b1FaIm|JS z`7lvZ7F_=xB5d*n^Z{_ANU~@gWL0N|yBuC6y?;?}vpC$v@q|=wZeHiUMAVJcxuX$@ zu28h_9*p}<*6v*3N>rf)jyc&fRT)F4bb?h}{IKTg^4_hX9x>JWf{tA~u^+zvoquQ# zI(K{ivcCFufj74@IYE*|Mv?Q2J;{Z z>opjG^cQ*9iD-7@1UpOFhS4z2+nU2UurI$$XgjD(PNflp!F3OY1uy%P>#dfp=bK?M z(etXV4LMo$CfAKZoX`aZ#SW69=39=p(JLnLIAdo658bNW&Q+l&$K8a|#IjDe4wdEO zt`Z1#dTD*|`eX}py*fB8?K;@!l#vy*QU`W;9Afz=u_}3xtJOU}?CK1ABYtz9zR1Jc z(Fcw-AH|OycE1^}HTZG#Jz%0=BJV%v{F3vu{}nT}kx_e9eiQR(n0OO$D$o;fQfsW2 z6U95R&fGrZpSdGvR&IM%&}I|4S;a+5qjy01Vy+^9L_F2j zts#1bQIR;vK1MhxD;bVMG$}mYovk7~OnB2nZ1K8~{;bi9!<*rY2n+2BeFs3E)z*gR z?kr<`Ta4KggAM#Kk33UqYCbwB&Vn>$RdkZ+HoYiZnyg~+5f{%;CQ1C#nvN)l`9!|H zbv^C-$4&|6x2WWJ$jbb41~*jan&>s{^(W9gW0)q>LQd;Btt9-&SmjAq+3%N&o*w|X zJkM4KetDu5yGA*4uC1flWraMNus&up**ypSBsZ=-@vQVPmM2yQeM9AT+8EFgKT*T% zthqt$7}Ih%_h`wM%J#aA*V}^j@$~G;9fIAI@1oLHtCy&E8ogT{ssMJzTlgp`iY7(m zQ}eHpz3RxUqP1aD0^$U}a}ptCRID_pymeW#1+933GniH>(hOlGBJ>I!y1M+CnFN8`C>d<58)lgAa?do{j_ECFASRnPywtv{_3{+AX=SK(s z;+IXU_b-5=&X>PY#V3!d%~LXKT3igg%`Q0`E3yBHU&a?suIxX3KytfaB!NWW?4;qW zeXkM3?mN;rTNvc+5Fc2V(ff9CZFm^7r5Mg4>HGe0g7buQWO4Aw1JL8Imu6#gEDz zycZZI1Q_eekzE1WL%uCxyWD$^{hf}H#PadfyKJ z$uzKLU{*d~Bq?wcqRgwZc{=&;0MHEt1c3D|pDXeIbsA4>l;{$M7j&h3tgJ&T9(55= z?6!2tsxIa(3r#|kgydvRw8}8Q*ZUjs=sr|fL*TwzO!%q4gP_KJuYGtt$nD2_>4T5bra^}fHCalDatLG<39m$x#67-SRxTH!i1;;LE1cFT$LFMk zk*YBcYuM>Lacsn=WF8(sHGrP5AYOgn2gaUpR+>(rw`6V!wd8ErxW1PnQ?$kSg_(&G zrst{thYZC$k9?&Tx`n7Mz9AUkaGxf%aK7g`REB(xztlDBZc>_4?--C8;!cC!$&{8S zl6ucXldDbgN0s5%lI=7--lCnzI82;D(({hr{$Y*7NJJ?|y>XEaV<#nkxZhZFIKq85 z*FmaOP!Kd>( zf{0#I^?u)t6RgyQ4Wikef5};tmB`qxuvD{bDM+S0uiyu7t9-+)R5vGztGE;xYx;oP zCwVIDgZIVH@U1R!7;pnl@aZ~#u}0og4Qqwz!2Zb~ja*`Da3#?U7&$}C#7inc>E zyUzAz;coU+l8d4!V0+)-UDtL@^aOX0qj7(HIGTfOix4{@!2VK&1Y#B#nz{vs5|k}T z>$_Ym`tv1z_(*2zv$AVk9ok&9{S+L|a*di7d?4Yx#j4N#&!hG?%XjmIz1Ts)x37A$h~g zWQ_M;xAeD@TbqxG|F!tpiJ%3(FBbpATdu#uq2nWIteFIqj<&Fyz4Bs6>o^WXkb^>f zD~qj?xOi4447q8Apj#KM|L~3t+nuEB@FJRg9sQhET4p%>_?D(t$06*R;Rv>bG~aDg zUJA_IJRg0zI2=z`HqPC*1 zH(~A1yx1}DB6~*)DDOlVf;-$%gW`&gxh&E8(FkY$sk#;Pd#RE$~j&JCA97SI}cyA1sEB7J|Jz`$f-2M}c41;xS?s&a5JDyo*8I`%sN{_>>OKVsM zUw@cC3xwo#O)n5SgHsYcwuik;&QdQbAx6I#DO&#j+V20R9R5BB{r;?sj|-$=Mw(xT zyaZm;T9djXm>jOt_T1v6SAHi;dB1UA^gBPmeuv)hMFLNIhYKu)b)QFW4rcGZ`}4!( zD{A@OAL%5>z0?L$$YujXl{Q8vEUG-|xsG*Iss63rN91huQt@9w8tOy5Ebuy5CkFmN zlLg2Oh(VbnR@0fFMmZodGQC9-^Pts5(L*oC!XVn+QV1|NkL-?o^)JkJI36B|NwixG z^W(Qq|L5@?&nH}+pB?o#191fHJdTPEf1dP|I=kCmbZUnxpb+?TPJl(0aH}S##r;WE*^A5|_n?V#|E>HW&0pt=a19t#>${7=2GT2+nw8}g< z{*$8@TaR$<{4X;dZ;(o)EM1peAtJ=8b$(mdlxghuJ;!EYH}G4u?`+C7p{r=kiX+fL zKh6NM?MJeo$1Xa{*`Bzs>5u71PCBNR$#bJ4 zm^)82I9+h6k=Rs@FV$A;$0zcoOHwH^^6F0Lh{eFHQ{)a6;LzX2NR7ssM~5FK^d7OmxTvTx$={nA&pQI1PP2x-Pqr?0JSHlt8yPo{gIR zuk7L>yl;>qqS~@`Xu9l4NpG*}vj3qb_Q?Hh;G*ANEu_lXkM8eS5n|q<HdC3AOJ@+#>JZ zuNnE6bdWsQKy;F;{*M%e*M4pXP{w;eodv|tfVWsgxBrU^Kpzap!`?A;!7_aR%J6^8 zXtT%{5TM^{FtidXdPbQZr|0vhax1{xZQI5{UtZ+Ki`r<>opSEy=w44;uFt56j@QLH zM|xcER{Hvnc4hhHjB@K1;EtkXMmpTzJ{UQgfHlr^lFB`2h~{_jZ5yMQHi|@OO-xP< zJqR0M3XIF{5v)sPH!HW!mCwz#D73ROGm~w9C@lDf+`B53W8C-H>FVKczF1J**~o2q zFn`t5EjCl0opb>+%d__!>MB*Yv+-$P#qT?*nrGn*+q;tVG)*DcRu0`z7ZooXxlv@o zYvrwg;Acsmzq>?paf(ZkjP<2NLIn$N=0oKp4+rDgF_g@w!VRgQKIyH29}POF?y4+& z8qCAnMu6ynGZADDXlnzO{b9o}jTOMh+IkX~2CdsJYIE`W->1ehef)57!L`)N082dD z=OTI0E}=*e?_@-euy0AXu}a(lU*xKeLjAw#cgTdz-ef85oW%tO9tLxTB3yQLCQ_AD z770=EBah;PQ9;Df``2oX-hRK17a251HAC*kuH8cqX8jhp@hjB(ykQ5T;GS30vrs}8 zrWW6BZ%5@-ixl2UvWbc)6bn?PJcIA>V%bx@AEQM+X4k{!Hveo>C#SycSwVB5ycJ)$ zZQqFlS%=K+I%zwSQG|}lhMZzF14VmcaRKNjMNIye#{!-^l_w+am>lxbmL+=s6;EfS zqjogBzW$RvO>G#E>-)kp7BCr8{r~jSeIVKhS!-{tVE;22MW>#<@0!>NRqf%r6_*Z? zo{%~Jk}w}0gM~}D=MA?(g=kHz-0FW(lm98-Pc`jYm+-J9{}tt$w)IYINpnu=qku{a zF$J@wt{SeRfI-z^vAHnW)4)H4p2M!aaOp`okU~~BkN%j^KPW9?9^e9tupya&&w+aG z+CFU|3aTMAeZvG#O@Idr1iazO`21@ol}gpNh)lWEr$h*&TM$S(DxC9B!|ls=4VUFQ zevoV9t$ujZxqagR!>)yoiMlY*Ej0ikrSY`V#$+nDbwk*Ng{K=Zq+v}g2=kojzELVM zwr#LYbjq<2?Y=ffWD6U?M=^TOhi2CW@0J1`FjH|Ttu|JY`q1|qykIEZ)MebHO{M+B z*e4P?&e!miqJ>PP1F1_MH_F`l2P__w(8On^mcrlAlPRn9oM6GOA?{EpNs>A~w2m@h z=MU$3*1)vO!pw;n@@kgzVzQ6_pg)}bKvu7+Tp#`beJc9d*!&S3Mir^nlh=Zyy_ zXLb?U|{>*HU;hp+#=hTl|iYT{+@ zA#?0uYp;}eyDLv9VGBQ8-MY-Sao447w*awl@`ZiSMVah#-s= zBH?4m4F?OUtn3$|PdUUSLNiHFG>DPoz74;TjrPKy>PF!=B_>UQCvn>>8KvW*lYj5f z#|rXG{ejGIFn#lOYSu&vmelydKuyJn^?`<)7 zJ!o&lkDDfGkpeR%mg%`|H3jF!tv4l)9|zV1!oO{~d&Lx^;F3c3C^8b#1HR=ij&&ii z8gj&r9Yy`OheobYoFNp7$Yu{2Q*Y*@fi{%D7CpjvKXYs(Y!fRfOsAreNObiDig z>$FnG(uEJzH_eH^$J%E_&ak7nVy4hrk;TS2laS+7^@mP973u9I;(4J~3>B<27hX*5 zj=v0+Tco*p-~XY^ZVYN`_1%`x3|0TG&2k1Ajs+{e;H-ICK3_JO?M2QxwkcdlgJua{ z8-1a!SOg_0+IOCi7g+8aTL|R8v?&~<4y4y3z?vl#0vS0+=X2tqLw<47H$R3CC+K&& zEWnykJB^MZ_X~F3`CtnciyqtCMXECR0ObA_9NmIgI76@k&VM25Uq8`~p=bLRECp2J zNWOj?^S;G>6yyX$5$N4Y7f^C^utT}&?>P+}e!e>n!s-To7T=1;imVK{ME#M*KiLj) z-U+FSC^TWIfC}%Pn&C*~A-oCc&{5wIlF_L(WZ_ZpUU2|^tcbISFkrgDZF`X)rCOE# z)NM@J^SstAsW${m;o#Dw`&R#5SkMhUh0hG(MgHIO#pH*?SLKo}t*@V~vkQcss-+Sl zlhT4GHLq#?-OW?)zHxsYzh_IR{sTSx|8Z(TK!6}mUBJjR26_~-UK}r>U&`1pPVqW^ z5wFhng|25Ut#$~C4hZ3B+5g(+KeO;^?=XI@cdJ0keFa1}Rj$rx^x1(uB1TfP4rFXX zr}&1b;^zHl^8=)V!Y$q*lN#W4%12T~M4p>@r6nL0rSHHDxlvW8SH`F2=?t4pl$_8e zM2Fu2z<%m+W|PiZ9AjWsc?^|7raY&4!w-PvsVSAyYie?78d9!j76>JK^83o1S>@{% z+GQ>vuvQU0WH5&Wo-ec=5+B>hSI zV46~)0s0n%zm9Hc!(PhBP_w`PqWx(*jC?Aw%b} zDCK8_S+N|SnToN?c#yC!g-DX61`L$>4GYWAYa5LI2gP<17P>SWm7HBd#Z)9P9$BDI zoU!!#Q6e=V+qx4Ac=;h#Nrg}fgWM@+A((!avU}X0Z z*S=>MpRBFgCY|tE`jY8ltG73ixdAC+?lOr74ZT=;ODUh^;bbH2*hM8V?8%^QXaU7x zdy>e&epO-)+`Lq_*?V=+!5cG=oW&Nul0>~O%fTbX(8hUt^e6hFx?2#AJvWx{`aE~IM$XkYM$Uf?Zur%JCmOEvxp!`53$Z<^XwhBgR{oo#=LOK z*Jv(3b7;?DyY71B!C5z-a1|7584=?7K$};z@pM>xoZS>$wwkaQo4SK|YvEmR%Y}>9 z=I=LOm)4Pu`Nq2!JAa$!C47v-rz%Az;bE$Kx22waL7-0i325&ghVnumtG6#g1b)k) zrGs4g=)4z1apM5?1>(vC*W~jTweZN(A0n=w54-WpG|{!^(=q?4aJjZ64)r_#CFgjg z>4_jSOhy-89|Vj&F-(en;upWui&cG=4@#lKq2Q!-2rB{cQTaTU=W3{-gLWM)sDLJs zgKfK|CvjUqqMdr-J*{*B*dwHG)RlvbRI`~8WzftojuQCgmg}Qq4sQvt zX+nu{Mwgr6di5H|(XZ3zo1YIggYSY)&(lAD(VjAD{2EiBiH*L{7Uzx0sU&wi5oXCmg=OEnx7-`7xqZ8;(M)xVhS;T9 zl)O=VTS!7sAqq!9#E~B%#hA24K}cLgF68tQtUTEgHhq@U<57xs#gC+w2r*}QSyl%% zPU03m3?R`P^}ETT%oosk=lP;+w$%L?d2^AgRRt>zg`~Pb$B-#`3-Q~sDuaew22r|) z2aTGklmPnuO$D!nrpXEMU&rUgIPmp4dbVGtM#d1WGsSCeAa074-y=6{PoVbm(olJN z!yEirowjouLY8cm*!=r?N{yMozH5l z#>@cCtkTMz9gVVz*paQRxnq;7l%$^t`JI79Xpsca2fgUvL00WzNrz^Kr8o!KB3TJ_ zy}({yiEa2WP$#PQd=yK_-NTIG&#GdelL~m} zk@66MpKVD&ZAZ&N8ziz&+{A!Y?NCA84iFp^v6-b&TE&d<##G5*;1FZ8{ z6R*F9&?F=m;0vBt=prKic_ha)gCLb51`(ZSX5?+)xP!efHc3p8Z~-N$ny&TZfJJ+4 zB$-5@_xO*H6^-BXf;@kD|50D8z-R~pS83BoeM>#3SjqbJ#v{X@h+Ue*{$koT>z-Y7 zh3mug5cdW+&ftr4Wh7IvA2h%~0&N1~G0K`)l#Wn$7C5f{9m&N9RL@|E>qBn$-STb_ zyptN6?+Eo|h^?n>Th``3S4^P2LK*U@sG0vdE){vv=#Y0wY)LEv^TEl`n1NKuj%YV3 z==>~H?oM~Za#Q{wrQ(Mo$m~-lXw+Ho4@?~&09kr;{D~jv*|~vkf1Kpkw)_NPc)m`( z^Vr_IS=WA$XxEa1dEkY!yArX)EzUH?Q{?vDv|O_x1<({6?*c2W{QGuj#3{a6i=W=J zotcjHigec}YTIM^VFbMzKvfXzN4g0+BtYwT-+h>2jVn5?-I@R~06M`yj+;;P>uzT@ zncUU+S|1#S!IACh6UX?(2oYsb>AaC+SB2v~ZK97=RUkcwFB_wefFad+qc5-QC~;F2 z6Hd_({gmZGfVcVg^vjb@k-Vj0r%m!eKXbHCu%`A<4W>&t%ArS(^kDJ!(2xkR@9t+K zc4Lrv>2oF7ZTN4n%13s~=LF;VR`rEuo0*#Z_H@2!sHH{WqVb0jU{HdlAT$vJfsEr&q1hIe==VoC z4vizJO&33mZjQxa=%*5UOWfVWM2kY4VY*zEZ7IYUGL|Xk`IR?z zh(yK5Pl;AKjh1HDU7u7VM4*(KHT7Tz4@SE1w4rZG`OhCc{9LU^h4l`|II{-Fkw?ep zy$>q?jbnQ=Vrd1m9ZYsYin_TN%e())^xRt#**M@%ytDWt_e z?e5KrL|KtPL*yR!U8Qmi#x21TcoG zly$;a=QlF;41PJKYYZ5n`r->zd*6PJ<2?h-Z$mzao-dddeQaCz=no_eo6wggF(&=Zvq?-IjVl;bt?oX@k(o6b%WA9$&7!gi)Zn#%)eTbJY$l{$ zjju*kaP~@KqV{6rTi33W&D)U4eRJ%X?WMw*^m4!dJOsMa^~cnIk)7Gr_fglm?=@`+ zXd>2!?gD2_KFHq>%ryRQE=K?{@OSUD)P9ev^E#{T_4!H!ZoFiL!*J;mp8OCtCiI`w z@b{!j41jEDe;cLK4==Gb=unZ=Bb12*LS&jfN6RWg38!uWESbCBPa}+zETz`ZC6++#V(mi0tCSNoJgCiG16j_Y1~ zI2)H{b}-8%yV#LnEsUDuIJEbH9{^J@qk>dg1cF!C1`v&M_oL&r_7CG{NF7G$4A5mD zUDUPcrWj04z|<+DNrlFIfc5ViW0QBIx@$ltM~x+Vg)b&Puk zuD*9y&nr)KyEgEASfUxBH1xjDz0ttDJ1#ISRT7z_BU4DlzGo;GlYj5wvAKkb!;%a5 zq{V41vz_P5H!0Bxh=l9p#Ty8Yv_G@JK`JU>ftU@BfxbPYw!8hI6|{bXm@T=;nQ`O1 ztIsM|#fif`8mObcj-P;^#3lJuwoR~(*jPD-T)J3!YrExj7F~`EyIAxMq*i$}R)k|( zjfx|g9V}nAL;Dmk7&$NdWsKVX;G#6NfDm3o4h~QG7F^UjFxm|MIZnm}4tLoMtp9d5 z@?;YSe*8>ZV))`ggX`|+iEYms@Kx2E*32g8%sf1AXJY4u2|q2LU|2r`9lr&4w_fu* zYS}NRFiAv1?0)}W)-u-qTVK}Sj7u1>nKL#ACkYgDpmvan<8pE;-NcvZ84JJG@8 zH-05SR5Y&1LXjftb;Aej&a+kUGu)0UL+{m-ynl@j|N9wA5@8v7k|$?(J*-hO1q%xt zQ```-y-@GktmaN|?(y>D@ZAJp`)NWY;nj%Fq-{&E?C`7ONA2lm`pY_cj|jY>CJ%VH z#E3=K-`2mG64Da~X)zqpbqNA#Tj3{(LIyt1JIu_l?DXpK*m$o9xRr7K3kG>O=$j`l zBVT}VsRY>)W#A<$<@L+vL8>>kx`gMRXGhVlAh;06b(W0j6nrj-aCUiM+WwhfZsjJ5 zt-a8o)O|P{s!8T0jTHi9uM?kr$Tzrn41-V~ZsEQDQH}pLq037>jwvE{*O#TBcCreoxZeY_F(=l3;og>>-a%kyLm7%eeJr=Q%H7J7dp3W z1ElCro;lthFiX9B<94+LtAjjIpl%4X0$mv2GSFt?Y%gD&QX>1Q#7>+s^d8qYsDQf0 z@E2iwlzIg7y$sRW=uvZum1xzxDzSV*VNei_;d+jY)08qL_3wToRQoDFdQJ9NNE3Py zsjPW8bT@Pux~Vg(Kj$~c(Jgt7D^RF0J!U(8>Fv8x3}QPceCX1f3qac&Ns{apmvF4$ zIp}+I8#moMD$d>d_5_6o=T{w+WW8)#&fM10r8}j29jK}+pzpgiN+eJA%^-gL{od>C z4R^_l2+V4w(4<~-0q1h(j-v)2^el{`_QErGbxT0g5URcCJ6ABZFrVHr-Ji44ICkmQ z9ujC3I5UHt3=8q4q@gj)4GX-%K1$6;y@3CNeFzdSdz|zl@h>jyhV!pQwRKgpCHF&y zyJQL*e$*!mdW?-^{+oj5rcBe(w*5Yh-_XWiEBO`mJSgnmF^1R}8h0Pl7F|@CH?Dse z=H|Sy;nJn>mkrCSp$W$@7c26Z-Un- zijqOeic$svSk>Xfb1t?{gQfX9qyTrNsJ%mDuM9~#Hh4`#Rvpo9Op~-qF_pa;Z3{{s z!h@mC%@cV}_6Sz??4@fXn?FvpJ1_n|Xg=uMO|9X}wSw*^obY70{&jZBHsS=89P zXp01I0XQH**f)w$dQmZnk~E(*sV%mL2cxB0iDP?|pn z>FX_?dx9RNUf)=+E4vHx7)>zF)3D?3Vbp$tEXisx&P!DhC0qizI3I3y7SfPBxqXqO zCOt$*(BZzuhmwU5M3>gf3 zddv?`tT<=2@)>J5y@)!vk+azysz)>6z3HCHPfzpH-LjN%4>!A=r?vg^HV@fW?hI1H z5@zS^W!UnCuhq|G^Ju@}uu#C+2|_&dyNOjjzdVm+-O8k6FPJOS!$XyGi|`Lofg#&B zT(Lr)Ay0QViD6VHx<5aBGK4^^$Z79jJTb)!W}3eH{g0J-QWajBqe|EFdl{8j&zQdw z$!uieO<8^rstw3)d&ME4-Zz+??1M zd2-+0eaUsUe{QiF=MH}PF=y|Loa~d>SZFo%fJpQf*pGODbAQn{!7Ot`SaL{gJ7$T(o<0 zwKOY@|Dj~N5m!H49c7#=n}EqLHLlMGsodkRJ$@Q+Nhp^^+Q)J+K$^AU;1buTa~qJO zt1L6WePai^rO|8!$cwHmknLls4#G`fB_A&RMBX4%7CsDrgh+AVlaI#eg#!ISTt?q{ z#rZh^sw0;O>t*_)INQXvd;Cm6j5me3w#~q1!#^(S+3-iFi1dvkm@>j~C@3E!ChHV5 zZ4`eXU&9yR#qmk5=pag$+<)__Y4zyb*amq&EWqLrD86K?^qC#^s>R4^oS!pdtC?K= zVzO&1&dZ_w{Y^TH{^#@#y)k#+K(}}~Pu(Qxv>C`0&Fb}v1#p2fnP0oo07sdxI}kkpvf;8e%t7sNrTGgv3}a_I5Bn++06(siok~120E3{G5I? zux2sKa8xb0P}NMDNL2nae%zS)0Cy}QpTS|MW{_GgME-VplE+ zHY^+qoujpA=>LmVHVg8zat|$htwhTu;459LeFIwu4dNzb6K>0*Ur!k&T~@GvJmP*= zKO6kLB8Z+`7#taWL}Nw>l@xHM2_obaS!MMu*2}Mp6kY2DpIX-@2pN|n1$s^f<*=*W z>xE89zdA>xNh#2VE30vYwV9Pf*~`6N#Mh^d9lt*QF1r?3cuGEarSZ>L^1qtv3lrLa zv1aJdOT10@#&U9QkFyn_3a#{6tQT*TCi2%5+1P_kTVX9_h`BVGZS8vfZ*%vXna#B@ zk*ax+ch|+S?0(y@P*4HGk=#Y|^%wkwHbnFjB2Ipz)AG4DsuP)f?d~CkZTDi;gWXg>T8(g5MDE=P)|pr7Td!*K>Ne%KRWt z*z3oAaTiT;ZUq4f2ih79DU{A;=>&bLGLUr_zIspZ(KL{A?R`g_jN*rWoqOV`DS-=0 zYR}HQCT`&$tA7%rF0bqQ7Z+e+rRk&n^6Uu3 zYuCtd{kCzBkA0d(|H3jel|DgJTxT^% z)NoBxcNP79-$Iu+J0?$1v9p5N6GnE%(`^S9W9ZblltP=p?xpS22B9RzI9UO_uWBBR z%Q_T1BmQ@^-Se=02a!u%vKdG~~kt5S0)1>RT zwEFCMCUe0FW@DbeX=A6y8+H++9ka0`yTUfi75K_!xl8_&e%|TM-%;^lp??Dh1F}JN zq31&Z(|6D-Qt>ZwGzo^PXOT2;bL-2mI-JGy?a z1_TY=5%8 zfTc#Rcp?^NjEKC!GT^bsrrb+SLyb>kB{lMe3umiMFZf?DI@8@6c}=F}bBk_6gr&%_xy|W@d$1}3 za4Xc|dXj^FESX$!O(dDOCRLTzseSnZaaiMBSdBwvd_4E zqj3hIhgN%p`B}ZD^W+qAKCYz-khduxR~5HtlJCA`txd`IX~o^|vSc0og{iRXKzI<9wrwYI%zs@&QQA>y6vxmR>Ak@^wpR_y@lf< z?2b%0?&WqMsCNDjS#KQ`)uVn74<(>p4&CXFq$}53*qc6xcc*AoF9>pmhgf znj&w_zixRh&TY~qOo&GRKGgEWR?C%!R1bjje0i&ejhs0>XQ{zwi@YbGg4VG*tZ90vVBCKA98f*z(zQZdqwApxN6L{wjIjG4wZsNiL&L#ovDSx)dmH~oF9$Bg7|#52Z0 z4a5JkUjH?^;tj~4M_^?9=+&RsoI%JbX(925JuAEc`qHK0@86x+66N*6#)Gm=AwEk| z!RW0|e$q+XZ+Ucld9jFW2|gcj;y7LO zb)}%D?3El<6*CDPcrmt>nXC7dkF`()bfJ#)vV03nO4^U+5*{JN~ffafBIAoOUrBOwnEn7y^^*8p^4V+fB2M3iq6KJ z9-sjGCbQvkD9scuD_CPGAW9p*dOkkJs*-W{bVk$d?V1?k08@`hOs3$^CjCRQ6^0yHP5i>hHp4X4VRk)Kl~0&{|uLTsx z^s1s7e|U0E!bV|bp1YUJc&I5m6PeFyNr7_aM;x)*{b${E9#Svjqu(QCG;mB7eX`6? z=73>mDc2biYTB1r5m(My!(3@(wN7Q74al$m;xgVegF4&OJws%CPNr`!IjnOd5lCFY zOqVq*%XgdBaZF$&8%ONRLOu+>GI~VX0jgD(Q88mDA1UtMT$fSbV|EzBy}79E4udY@ zF5`F*60`xBH~F(U*UVkIjloPb3$l#m%o?BA59B~{tHFrmJa5se*iHD|api@%q=K)< zM(HWX)f;>3P4AeIUn>=3iek=+?~a9;HN2~vp<-ujk7)HdjG;u6CylD&a8SsTL}Qg5 zZ{@%YIXbm~cJ^U*`8TJR4=T^kH&?q3+gaX=heG{Lb66V|ea(k5WPpgUlcm0;R$S58 z#8~D=&{k{fV>hE6Z-+1+oa*aO%uBWH?RUm8Wl5VmZfrXPeZs5VkX6$NT2sZ7eijxM zSZ;2v+HUNb%d2BIuLqVVRd5yX?#cac?GKv%i?T0EyzjhGO%gTf`2DcMubcb&Z_n7+ zI3`Vc<=VGoup{(Y_R)>ZtVa+!ITGpIA3MEPmgdBaJo_2ydi6tT6>lN3nu5H1DjN1f z#OzW+kY6zA{as;Nwe7ekFi%*>rGtH`QUZ(VrC2?qVT&bI5wJ>r#t@-S#wYtE(b*GH z-1w+9aIiSL1O`qU9*tlk;q_HtE#9--<(>-`&Ex2@e*4N8(dBqnG;K9>k6O?l2u(4E z(XUIAj*eiySpTdJ1Q|CKyt1Gev4k`!o8+$Iv3AQO;*&zlW%hJXt0ABRFrt_hOs+~~NkI+%B_ zsm|^{PUaYGTRt*;UHAMwShc%hjUFp^2=2&!UuDgHM&yzKeGThlQKfc0l%wX;JcA)t zlNH+6*4y&g>m=8VUw-tUoq=z!Gk`9i@c+ualQCqB7%-jDU?2mIMuNm0iWk@`qd{1O zY3s5WZ?qz;Vg0ya&v(U=u&#ag+7UaZx!muxo=ohC|FBA{CFM9mOoTKJNAG4=BEF$8F` zMGZecslXgp<#Hzk54qCmc1^K-N~DwBC_UsITmSP>WL1ed3^<~=no|wEQt$GwX+?A_ zPfj<6`DL2;gqqr4cf|@0PW}k~kLrq%8L=#t4?;TpRp8}ilpSc_CbV~lI!t?4;p+R@ z(9(^CB6KGVf|YYRS4s?9CijV@4+`tze_Nex`QK9VF9GyUAbnJA_$3_o!>7H;6>F|^ zzB0=UU3DSQv9hvKUd<5dBiTfz!88{FTE`_xYo)j0oeyc~5@?L%Wf6^}@b?FN2Aa3% zO?XYg-{Sj9@H^SK^I&8w9`6PDS<~$by->Fg$7o}+%}wC0$m9HUOk)Zi{QWm`^7tb^ zVy@SZ1B7bmnqt&-mU)dh1K|D^3pN}Cu%8bT&o3>?q>~1zqJd-6L^lo4=n2p$X+ry# z=uKr3J~hzYkV)uQkDevNNMl%&8D7A6w1oLKC#uPlyz2Cw4|!;-hp8-YlzbY4!vw`> zKM?cZ$ZM7kkKMxgqNzd(@m7&c+I?a8v{N0tj^~| zt7%_n3$SVK@|6ttDAMH7VtB@ekt$zjWXE^oeq~H?aS?l6qI3X~bcE?k)lmspU5`k4 zT`l@^n=;LBeI^pJtbOk^zMv&wYxSZpn^)R*NngrJuuFbz^R-x{i7E{UQhWM9ba)`_ zii&o3U)960|5}sbIRcu5zU)56PYBJ&iR+(dX1Z8Boij=Qx|WAwNf4_q6S-W=n)Cv|kTbX?b0sr(I7MR`I0h zJ|cG6-Z7yoF_UbyvV<&! z@||$%(S;V>l~m-leC|o1tLu$L%k117H`6uOWwnlD;JyWBZ|54QniY1S*@vaD^G6#qASV=N?Fr{HD;mW0;TvlSlP%Mt>JUBCn-LXt6N<^l^=#@_U*udeg3}}^Pug3(ZjoQzwzG* zM0wrw@q6MtWimD#gBQ(r$--4_QElK}R@cp|E8n$DyF=5~Pw#(C*YW3%=oDSUxe6qS zLuXO+{UeGVRoDKq^FBYQL|UTV51E+x^?_JFg$OV(fEm*aRNYI~(_$T}I z>ykm%(oiZ`66+u26?2OSdasSGj(px1ff6PQ)0w(rUFIgsTq~ZzB?=554R1Ve znJ2yOYFLx-!)>b-+TYggsTcC$Z;>Rs(@#SW3? z=d0OJ>WqsLwTm;{O5aFaQRp8194To*Rf)*rNz!P~N^tFXTbOQt!f=IrGx{RoMa-Ia z%SC(RLD!uXA~4==zSZ51?vHWm)bNf^{Qmq^8%tF}-$alaFI7fF=hF;Zg@k|jU%0ln zE~nlOtfTmi!En+VYSrI=F@+8Z_gqB(XE^@XCO2J+40_OqgdK((vY@9H*{9lGmOR@1 zq-e2{c~W2hYQry=8F@XPmYhi5^uAN~Id4~DOuq~M8ib;F@{rqcNBc~^<5?OahG7`Q`iVfa+cN;v#M;j38y9V3w zG?M?L-Xq7yqVOZop>DbWl)XFfK-Y>pAk2i7$>bt;>j@t_? zX?n>#ted!^u40dsdQ9|rCX#z}HsD`P7v?gFMIb4Zw62Tsk-F*VnzsH&t|c+gbOF61 zKu%gDBG6e4yCfI~d;nz4g(smq3oEgE%-A*&fGPgBnF`b}35V6pRPa>1}J+?8vvnGhYe zc&Q>nOXxu^8cO&wZ2rK=q$a2pd@WCChV2E1na}|R0-K-Nx$aX}4jqD#WWgt|AxYPf zOGRI@?MtQY|4tA1UKWk`9sTXU?I8%?bP9}pV2w5=r2L-z_j6?Zb*zfP1DiV+vwi~C z{A5sOJwRfU|A~|pTvk7F+-B(2J@PTtL9;KtQGZ^Te$(0N*A3i>JFrsp)5qKF(Ce=8 zrzni;ZeMP;2({G%=SVnh5_~C!h(r#T-cz~3SsN~;Dx3d&Ne{T*WqAO7;VNi8Et5b$)uajDc+g{ghcuplQSI_KH2PsY3!JJQbZEEy= z(@2Z957|(bJnEo077F)+BEB}Pr6_qh2i7Riu#;6(cwB?{uNrti8F%y@s&XH+W< z9#*q2_U=`q>cv_3sdBi0f|%$y@<%i$KSy{2kYqTgDqeK0ZTxXV4j3NX z@~mUdR&3_OvB(KKTDorn>e=1`;_oqnLLenF*kB|u;esIoMvc2FGS3Pa|I6aio0JDr zga$z%yxkwEEH6Qd`VM&U#xe(#O@Z%_C{z8&_UCR#T2ku%YFK(=wjxtj%XB>V zb@YgdV)SXhEDt_=)%qqW>-5y@MlNoDPd_pWukYJrmb0xg+bZlV=Rm$_i(ge~S8ZZU zfoJu!4FoD`aVXv&B#xz78n3Nm*gShofij`_diyMXsM+FUf#8)`@#8pDeNVXmGXA|) zhXKQzj<4i?nyH#vTGLPGK+lNPeOY7odKwq$>F4Y&PDgh{Qf2p_WGCUSqxxf&$tL=w0kmH8!6G zir!i%GtA9unwIGIMLSu6b0#=b+;3DJ&1fI@wF>qIac33!VcL#)O01`C&llCn>go$F z)~4tNd_otAw&g8w{mrc#_k~97gHNv!K4Cj{k}vzevdFG~Zai!rT&Y?NeGI)SbJF)s zeJwamtP~Kury}AzmbvG%2K`-B1nIF*OW1chKc22>LNj4TuiiSvJH5yoS1*eCcTiuJ zu)P{aebr`nLNqKKeP0S$?#Mnkh5Y!uil5N6)QhO7&a3g6+hj(8x@3h2?Er-r%Y@E+fdn50-o9F`1$cDI(AaCL3&=$t zeMn+GkU@Zx?@XHt=81GRK5+)F08FAjkqOZ8?2)`Gd#yVw>8-#-`&tf_RWh5WQT67O5N8MfBLtMl_5>FEtUEb~@ZwEekX99NIx=;>+uZQ-s~}+HQx#Dfdfz zQ(4TD;?vi|`0GjPDhUzMNuRT<%48cXlW(sTuwSTHJPtyn&&zSwX)@p3OPK3E^UX%F zD9pgaHpz_iNyxt?cOJG8z`#nK!CHD!tR9;C*}G#l#w#Oip{Z-JC?-vpQE?l!_9}kM z4Wkyxz1EFTOh3`7_WoiRXbxQg`B^Q5$`C@*=;pk1r(lb4W}D-HcJOx2)FOPV1N)uP z8KUTk<}qqJY|9|NA^hfkR2{98Pke7Uh|po~Vtr98-ne?VdK)v+kVEhFq3OMo&^4Cx z6J==AV*O|a=2SNVM2!rPY5U)l{=zv(@SQ{8x&`JpbvRGp-h9TTnI!o*aR;qJZ_o9r zG#@Qytn&JVdFB1mMc{*dPsAPZ^D^9|RR{7iT=gi&u*L`GDe?RUGBKo$MfvGH0clsy z%SwLl2;yHgkCkM+&u{i*l%oXGgv(lyP~B@sWbUS49gKf^zi%01yl0=;w;CEt+5cUL zVe=jZoi6JX7zMx0+#mPyaH!d+{2r>Xz(d!94eaHrmBLbRU4~y8j`hvv&ArdG0B=EY zKy@K8uNY?Hk)>Pjs)mpjPk>zc+)})g2Ugn-t<_bPGs9WR2nvjA8KJGg?x*>Efn7?5xz9$y5k+WCioovNncm?Q1hs^_wtBg0w%5a zeD0HL4?A9_y{VJb%&Ppaf;X4_j*fTy40TNcq$hX%v)0afz%+p!BQxxKPoAggys+cX z(s-{Oy?2+ZDk1bK=?%E)dBe2kCwb`Kzg%@Tcl4~-okV#@qQqdwJ)iL>4}-+D%6>0M zt2*cOjQbNf-IqJ@N|uf_mDd_YetJ-9-@e47dc03x5rU4XboF zvmlKxxFFTl_pbO-MICM&jKYF1k6O}td*ka4jUqa(HNTiz-1H*<&B|2;w;ZJf#|@WuanadD-+?KpfqU+{3A8;s_(FnMvL-us;`gOT8Z#97A&WF z4)3QGoY28RR=0L(?Ght$kpXr_S+Zi@EmtFM^X)3c{)}KQdJ`Bq}aqT05a0n*;%2ICeiSgD}itl2=KbotdqBfhK4?=;>juD z=@V;1&B6j|#kIBQ9~Q2J(5R|pnoh79{uX~0Gof)XQzT0#`3&(^2zQefR`XXmWMKWU z|6ccwl!6|koJz6qf$cNp436P^BBo%#H-P;bYac>yQt!blu0-Z2(!BB&&d<5(P7bYn zM7c*=LF#tu!ZYQiw-+bLOYe=sRp}lrq<*(@C3q1vqHcGH$>kpP?^q*_oVj7t4l15{ z4siPsedF8Yu_f3Tma#Aj`Vk1u={F2ew>bZxr~X4DQq#6JTvg}=9~^)RdT{4u#^UzK zd2p|rKCJlCR-w0h-U3Wi64p)=61j+u{L8akt+JZI{*rr!<}pGU?P%5yrLCIp>!t0T z=9#&wFqjD$uB3$}<_|GT#;6pD2RQI8+8chWR#~o&hSdS0f#M6b%2kZ)4~4(&Z~e%{ zW>NvjLXMuP42~%s*(QVatnTU8i3F`pc#VYop#ysf2tiZ5hqpg}52ofVKBEGRhjO|Y zU_mD_o*nw84t`Pu%icNLx%L|amdf3VyuWO+tPnE4!YV?;E5k&qU3dnHeWMM&crT@I zDnIH8yV$^WZK~2!54Ec?4TUK_lhTQ2V-(5>H!)J&+hqxwhC@)xeT{oGVL8Aww@^ZVPWFcjke8Gt-W1Wzhqh+lm5j6xa9F2EJT{?!(x<+hrC* ze=C8QHU2eY|vfU}H5=_Ro*efxHz622E3kA5Gw54O$WZ z;R38hT)V;O&$^+{R2e!_BtE+lHECt{$!UI3XVR!|Jjp`FmW7|Jx_NrWrHq8wK@UW% zwBDQKjEAk{ygB)y`|42U%oZvvsl#VhJ|YR|h=~}uUeVlMdGp|{)>;^fUy>L^zGR>M zoqMw{mbK^A+i5;oU#Nm6+`2xo8TV;#)d!2*x3h_5ch?;~OgM`{Z2I}|ZX~{~5`MR~ z8DdIN{8`JM523C~mlk7)YrW@P7LX|>8M9eZ`CQ45H@TWa~K45xun)QD6Ztq(~Ftc%}k*cg80I?;fu{i7Cj$a?w#&OVq2U z161oH3dO1L9p)#kfRh6Y%m!LlwKet{DC z`L9qOpdU~4CKQ6!uxvIxgzI-rWS2AipM{|a*Y3Xy?TPXSi^{%GSr$70xWZ{r#=BY^ zM%SLt&XbKk7Xbb8L+j0(eKS(7dJ&1a2B*&?ff-!3AD@*-zlyxlN^2gr3F=WpcQyKp zRm#*4HuFxx?`;o+7ZUZ@_;1NTs&I%L;C_aL64*mk*XlmMH30h=5~oXrx>wVL{ zLaau!Z}m>Gjn#`JJj+G*gdUat9LY9eOA2t|t~s3iUFEPR=&iE#!8=Z4|8^+TFU05r z?zd)$c*cpj)je&g!*Y^@1$HaPaGT5XcOI)^%#L&=9jjMkjccX2ab~=#iTtSprs^8| zYipv-j;*^;7sDubyn{@~N*Xzus{fMML=Kq;C z|CQl8cZ#B>j3|IVGp++6<{FS|>|AhR%U&W=kFS93H z-VIkheP`ACs(_|j)@PA}h?FYsQ2Z7668lGluCX|V5k}rgjOGm>h>Ei5*8@C&1DL-u zCy~gL4QV$5&;(Q}cU~pf6Rh1DDK=48l1eYcv61S(~KU*LBH@-4K9n%)uNMCcH!~*g@6jq zQ35^jzKEJB8xg6_zd=%|1>#n+8&gkCh_1sBT>J(6Wlu2ULS{gA3&vc8trs#t5AW+= z6g*LMF>F31<6$Dp6HwZCYhaN66)(oeq><9I|vj+N}vnwR#&@D&*wrNNv^OQ_7R9b8E zOi=CMT`7{81AWg{TA5Lbp!M4rf?JeXYL7a$xlXo|qR#eP@2f(`NF-lYwZB;4vdvlN zEeq2(>5G_^+x_ve`OQs%X^?vOU8U#1g9pUkP7h-oP_P;Vw|qer&jHj z&P>WHs}l6zbI_+bp2xG)F430WkFMhH{k?>3LftOC^Jut_*)K$bHuMHePlwUgp$(h~!3w8iLF z_bhf0Vz+ENr2F_UU!20I2i6+fCr(E2wT+|$@!ZC$s*>D`@``)Hd_UK1T6qMesM_v^ zKXUsN`EW?zG5TX+fo-4QC{tN6!nWDhXCz!UVk+}PfoM{l|Ix3S>oj_$JMoVp~$_!+a|UpD(3SMgwn3J8l%6 zG%A+LV|DL-zOKjH`%gLEnv3^v?6CR$&cJES_ccU#T4=#Hx8m{syJ1g_1@klLRmS3j z$JJJlERo{F-Or4t)2!t;k- zXM8kfOkMenhN9nPQJIQ8@Bo_T#C49od-aS~ygmWyxn)T+sOx)7UW1ZM`c+z~EZO)_A{AhOe)HtD4&~q~Uxl*ZJ$V74@rrBd< zW2Xv!lGEHf+2Fu%z1q#(L!Y3^5pi7<5ks{Q(%XZ?X=bY(PaiF;OCk7xSP>~K$BF0u zgvz*!A{~=t`Mc_)7xr8ot-GX<_0e#S9dyM-8-D+@>XU!K6=BL&JR44ewmaAELh~|Y zY~fSr^rtt}uu~(2>*KUWw(5Uyn}30kk6tXBeu?r?#jZAn>~*wB={S^lW0tiW@cMn1 zuFBBAvvrd0M_aFU=EPxTRjtY@bfvdn5C#uOMv50QuL&+U2pcYc8Vmyrlk7-u6^}ni z3XOaw#>ztax$MKQB(j&AL5!)9R)xvqzwY4TtO@Mxk1v7M*d|ifSs(A%KJEzRdhao*xe-M%@us!pN!~4Vn$$t@Z5B8ETg&2KsabEaP|lyQ{iGV}J&ekYe5n55 zB}1dWY|cl1_DBrEfZ(Y3$Gk)E-k!AXXy|jVJt`6$ar^k4W%}Qwc?ow(IJD(8EeW10 z_7b|?Av0!NRdTFg6IA+|F|Q|?OL9M`l^evQV!EINn%OYwmvR~fFc1X|WHf_*0W3R( z@6RV?_lrKo$YJjsOez8!0h0pit>PNV&0%$e(dviAa-0oja^-ycMzxS;uuZs^TA)iX=TAv2m^S)l!|9ts6|*y|8`V*rs=C#Hpn7ZcoxH{A&X<9} z7hQ|xoWX;TJUVhZf~Nv0nDL~v1wJnOj6PKttxBE~s9$caaK2wl>n(tntbWN6g*{r} zduw96L0!al-g5e*`qeWsIKZWbuYQK~J=KT@0f(%q2g0*hIE$)nBsZ~WI(^FQSKU*a zDGJ4l>^8Ema_z3l$@9lmTvi4V;&6&)(r*{7Yit+!BAs<-8K#Q1CBUkz@xylcgWA_8?M+)pkvv<6rG|D(mu)T;yVWCE zX}#D@uDChxCV}Vka!Tk*TiTmIimhJly)j_VVPlRs09Ln8slG5Ca!4jmI%ql$wwGPK z=oOfGjJU(wLQKY9QeejFv;S2$EykzNG{i(V<-s~XtCB?8S|&JgiETE5PQHEW(u@1= zoQJM`V2=f6J0XHUGb`(w#c8VLSbFg?wxPF}Q$|1srF5zyhwn?`UZ{9h8T~(9eS|pW z?g;8@RoIo&%AgY-H2=WIjxU)*;qFBoR2X$XJkO8-ZVmT=vviSe4q}IC6mi<7M%N9v z-*O#ve#l_qTa>J9LQXIBq1Zzw1Acyd6Ns`sZ<_Y^Yo{MbomVRj*t&7rfHrs3m*JTX zZG*#LDYzPq5lsPu3G4;UQ<{gUhcLs-hxl9;<1`ncx3R8g+b~cc#ek|?%ws$0(t#bQ zGK!%(4V*giOV6U;Ogirk32lsbHd6eNaooFdt%|tvrMA-jN zMyv+z$L)J4lWo1$S=x}5=oFpj9UmpcQ?(=M=npy!m5oc#50yvXGxsTsPh8S|{V^5` z5Fti~zm0s( z(aiToMq>mtSCrNG>K#(Tl1^^Ny7I6==9xsl)1GOc?8z{uaKv$qcJ6~bTr@m!8O5;X zT(ArGz5^NL*b)zrT1s8z_b}k<5jxrm@W{D+6!I)*TI6=nGV6+ReU)^YLetcAAH|@g zAm>cMEw@N0x^tN`k2> zvMS-nt3Ie%nX((=*x4LQ$Zm_-W`Mj2@4N4WG{?}T53YXO1MQ!bLPT@=wtLyCDx7GS z&PEzmo695;0w})}ekuFk%>4@Jw%nmb5ehW=oc#Y!>C_XQMG1KM%8)$^kaYvX!F8mk z;mUX|_{WW;V|wya8Tnd5*uk!q4KoA9UqYr-!CI#*PMM7b?A&SrHY zz5^o^Xag!h!vuzJN7{zW?JoIpMxlsvSh_YzH}vM40uU6o5I+6yESm0JeVLi|PA6$yP& z{{cmd(!Xwc*u=N0<*YB~6-!+8Le`GWd8yM+jr{zd6rZt}2HaQC?`JVJBX$|84fP*T zR|$ob=lyu*SK{YK{3I()Qb4v`l9k_zUvwHi6}4C@oe9*M#X^U>o0A5@(A|$k)NPO? zI)?Rn6ublfP!=+^f_VT99TVLI5{iF z&cB8beW&xU6F*-_phU~#rr{C5uiVYUTptI+Q#q96D$%t?8d0K(A>DUJb4dw}bs2z) z`aJjv#_+&eq?u&01gONH;eP7n{v$Mb;R_U8l^g_n;a_4-(3W_Q^J9~W(1XO$t$Ev` z0vq=wq7xIv*Utk452{%?scA}gi9D8@)L(^Ywtp*$JX9r(PWw=OeL0v~-X_!Tg{H@> z0sTOD;3=x+?O}fS%=kCQh0v2YqJ$gijA7Cx_HHUAplyMJ0gHj=SVj}@D1)?#{}z9g zeKMJ19>I%vUA)Zu}g2oGd~zt`n)pDJs@aHCkD=Ms}HSMXCP{t%g9l45F_6MVLH8K$b(k(?kzr6K$~ zLI7m!he}tSnKD^pkTkByBe{25|CXeYJ+I|2u|a8^yB^0=AtBayA+CGTw32VV!BC7+ z+Vg7;{ZjvA*;JD;5X0e$(s_;vjW#OC(NL>ygJV=7B**kFdR*hj#icb~*a^Kk;I!f; z$v|k#*9sHzfBq-?_;utHLCEg(qO z0H-w=`b_@;C&fuQo!p`yD-)I~PB&8)hGH$K4f_b(JKOdok<^!y4bgtEv&g$+YE=B{ zUA7`(2PbHmFM$&F3xJdMKfgkj_f)P(vb=4yyY%_nFDU-f^OO8SM=via2cR0hh@w0o zfTYTQVmSILz4i?}1zzAWsJ18+MOs}>6q)1OXax4n3)epDLn5l;B&_LiF?B_z5PFb9 zm$LhfQ=`_!hKa{X`m4BIbpJX0G3{*s?TBg}IKy;mA8(m_dVC?32WRbKYiAiCIgcV1!N)Wa*cKRtK5PcayA z72oI)%$J3)1SVN_Nnoq1!lro6u)rY;Rs}oT$_jNDpX&MV(-Ib|2IU+g7sLKkP`DH(GEdoa)-spthgLQJGm+lxR2Q6x7$rSVoeB)P{#h{n8^!q&#jSRH zi2I#l)h@O1HTTV`+@{l2^N)iIiu#woyX043K+qr7O;kxoa4_LMpdcLJsi)E@nwQBB zcsZ{Eumt497%!I;+=|TVw9eySX3X2sEPV8f{E=E-gkTmRjCT@Y2{0kl8>D^RX|XJQ zpK^<`--xjxOqz@o=IK4payk$7npgUZe&st-?zZ&lq5syT4BL%}IKi9uy?9a%n$^=B zy6#?M#7K7u3I|Vdg)W_tTZ`(d3-Yvy6J>v~i+aC!D#yc|^e%40TruX@d}iV3&*Ow< zdxA(q-%c<98t(ro#KrZNgUbHbCu0^FfBz}{e$fWk6PQunqkOR3(?;l5@{uaPu zvY^lb$6Nf+qYE071*0H-1ZC z$Fx>VZZzq_%qAeXP|)M^9DxJtVA~1roQBDV<;28CHYPe+(kJP3;wpYCT^GHdOi{YO z{v7Rq&zileU*O+V<1!Q^{d!d-uS<0c{H7|a%PPcdgZ5w#6Xb;vN{6qfPy16q{fn!s z#@#iZxo^tK&o{f0AV7_@KKI@*R@=ziK`z~4O`1o)BwDnrAkxUF-4_Ui z-A!#4*c-Z-TEqmOXbF0_iS9FI^am^@=RlTZmJn6Yv;Bh7((Ti;kno3L8`B;mu@y?s zz=r{twZqkk_Yqr^GVcwh{|WwskQdA6%--=;LuY$FlyVncSp~YAY)V(hbZbFzhe60| zB&KaYTu#td0WCGiVy%unn` z5w6@_yBD3_=J{>wz4%PcXL3G@S1FiAK%uvhf-w%&7em?YW7r`1UhKE~; zw=VC;nE&zM%rn4HHleJG*aH>1OiLv34Vy@1>@V@AqkF}4hQO%2e z2&ti`?UNnkHY7Pd4m|HMTs$ab;9Q?iXb6uoyU`krT1)un3qP1@u zCsYfWhJ43fp^ORlxlbxRn0DXHZO(4Ky@!6t;n+%wK`XJ==%%;~~*9#^)L z?=t*FZM6X^7Gt zbI)Jj#Cb01%cT3?!G2$DQ>!tbE-3+@54FnW=1K!YPWL#kNGTAhZp>M-<+0}iv93d? z0m-~;$Ldp!J^`ASc?Q7sPH9 zZfT+bXM-3(J;BTx5i!v_f+0NzI%0$|K$ved-HLaz^h9U*aXi&Lv9%$H#g0VgD;HUl zyT`5Qwmq|7MwV3s#!(1B1W)A;_vU(4Ktv61;78!yezFfA50W@ore#3Rr}F~BoQ5_^ z`0pGvZd(OKGWn(r6ih@lZ6$Qm9lDew>a~TiQ3DQ&JtoOdPUxhk0F^NTPJ>E7TmV}E zwX^RZwx2>NxsjWm#Bi!qV5*v<`h$zG;mCQkJ}D$Xrj&$su7Nl0r$uS6!^(qso-BsO79tsn;kuxo@MA75|g<;%*N4G%faHfUpC>HtSvSsP*0=ga5wYFW zDtBkTJ$=njld;{8*N96w(hFSR72<76_(~dQs{3Y=ET=csUQ_U?i0~itzFS&#E($Ks zMm+mYQurS*+`H#yxHAY9V{C476+FiV@Cx5fce$x|FzW~o;Q>yyp zuFuNGrmY9F|9-#r;kr8X%dfVv6nDDmgs2^N-QEAQ%{n0UaQ^)+ZS^us4fn%O=a`FD zuH(2y4c8PAM+(JWpBdTXYp*%k<>XD3nMrt@3QY!7YPf9?xuy$m8zvEd_p(~Ay{E>}xh6B$H#{A1kB^??N<-jZ;JaV95&b`cCWiIotP69&=#5HEsA~=Uaa&)| z!~EaoJKuKtr<22b5#4`ODuv z;oq21FF=xM@)|~+NG3T}-Fbb>)fnlXyHE37F-_undk^o@zZs+V9fCPFiLlksMdWZJ z6;qCXK>CNC%>*&FWYk(yjp?2~9}Vi*ge{#-#)NM*(Db7&HioB)UHuLe(&Ps+vkdRu z{mnfQKjOUS<32(nu)*_=0U-8h>+#NP1&eOEptoOn;ZlzYnR$`N6U^rJJI@slS39Ad zd$;IP*dB^LhN+acSX2-(VUxi%K*<^wqI<^WpD(a&B7SjUT?ZV5H`Z9 z1E>$%8{wY*{3Z;CA(f40yO%H^8SVbN=W|c{R-lriS=<#Yp5X8bf2pY}Yta{C+cW$9 zK3}88DeZ5LiBTb?Wi>z*S`wwF$xKZ>3kR&H_gd+s;RO}7$Kcx1yX9yN1oSHjv0CV3 zQHi~Pq?9&(O;$Vfrxpjq*w$s#x#;GnH(2EvwJV-S$>bZZ5_grQ>SYRm)5-gH>IdJ^ z^zDZB$^LlfehrWGH_)mBxbKf4BfgwIZvtW#S3Il3eCZC#e4A@R?9i6R-8Gd6AouBxBB5)!* z!D{=Aib-lwJ^e9+-}{`!4~X@M^jM z+Xt;j4tI!&p@a_!8UPW#iN?o5`3#S^d8`m9z!o8Gl6r2$pRsAhh`{-P{oPmC-=z8A zggr^PF7o$?OLVKPj?roe+f&Lj1P-yBArlZ>YYch4c_m6sO6)uc-2%CnvrPc4omPFP!4ei7LH&Jci7x`f!c*4> z7HpUS5I*X_AMo3Savtl!yLxYBd_RCy+GL5!a(jyVAD;N`=JUhq^5~Ib&fg3I^*IAm z5sBrTR&HE~w=O=N;U`87$<(HNCzFbI0J8$`6XT}~1AMvfPbmuo^WSa-t5{@bmGD#N zWTq(aJPIzZYjC>Uk>LdN_upvIdF$S4s8EQUYbWF(N>jO$Xyl^~UKpIt!fX$LC+35m z&3&LC{4y7MLYSO+W@~ry7!mMzfx@2Db}n|YcGLg9VMx^s@j)&=pi+dtZ8EA{$Gx^b zp>jY=S26j>`f9w@`oaD{diYYd_0eRu^~w0<(@joAf0&nD1L{DMe&xUwkw8SHBsHRp z|GXvVi~NSj6;b$|K_@?2Ie0;4GnzR;u989R@RcO=9;Xu93|9sGIOXWTXz4~~RD8ft zRzt$yX?I*Y5>IDW+M-eAxfux^bt&7cD|r$HY@V@0_vC&vIVuY!X$q4&bL5{O2+LZH zb-FaW+`3poh}qS*iZ5+OjA$U5&v&O6vF_k+r+-foIMgR@oL|h7c!z)bs6Y=&=-*HK zJx6Kap2%iAWB`BDXzOq`*!5rh?l^m8e?8~{8p#iU@ABOe)eVSM@(0k-^U3hm9E>s4 zAIzMYT!OIFvQa0_t#@q)$K;d?h|&fL~p;I8;)^Sb8`~Igxq@}w>$`R5b(kYy}dByWRGs(9@YN}k6E8pL@(&;9`QG{+9SG>iTAv0o~h@WBc%M^^Z`6=NPSh zBc10vOGN!-UV^$&;Xuo$z-GC*R@aurmYnDXn?!;E7Gk zTiTe}Qc2F`z+2JouS)SPsc-~rMar)t;V{SFMz6nRKjmUVx4yu6d?Ux**#g|r=z21u zUc8^~mt)D`zSkhk+ORu=YW?VL0Ul8W{H=lJBimc3ok8X3X|E`~ux?xG!tX;nU~W%A zfcj{8%k}tiySN2$@wV>C-o!@^YXPMQOx(-=`X@53W7{l8SEe>`ASO~C4Z(BXY8 zyFi1?{sg?a`O`=J9M>VL6rUL%hqI*t>)jJ+?DY1hOI%6^iLV=z7!Qh@7{3x$>O|ur zDZkI{o_?6(24R5{-v&oVIN@aBRbnq<1^)exJZjp|V*d=1$BOMHu)(Do14P#5?*{*} zB22?Q+8D^Bb7%E^?3{V|QttSI3+_Kc@=t2+#VvW9^KF#+hO@`Wdt;+gb5X^dbVBg__sKFGShd48`kuRxmHchES z_QoPe9DI9%bzze!PcI9a72GSb*aoo*bQYnv$Re4PFX*k&9s(Ml;=JKZW%EFec z6o0(rH$J^=4)${jO{g=f@RE_vfY@~4IB^iGx3H6;iridH9BA)k;D)#sOR@i*$VD+m z88R>WT+_Ber4#T%6hLg+-*y&kqxo}1eZf3;qvs53d+iI@&FSZZ54iwZSptf&mQ|}* z+j%WI5th>a2w`qu{|g#%tn7Yb6_?(+_%v=}{P43@pUs%|z|DD!X)e>?d7ES>L(*M| zI)oK!CtKT1^_gMX;jUM7newI%GQ50|MjPYkcr7Z*(keU?u*$jykBgX7Wy^TMCM}ys zO;)?(;#{xUXsOBMM9;&5tYw zOrw+>mw?wzOlk4qU;0|m8G0ykzd~F*;b)+1k2_JtI!*@OL-l>Ig-o2L5?sPJ8I_0y zBFyQdjqUK1+gD^|<2MDd1W4}9HcKm9*XKe7gs~?!%T#uA6wxV?3YZcmV zI@g%b)tA{S@E&`=(N)>07vI|1&3&(T(?O{AjH>Nu0wLrNbn`BIM3ELn&+ozNR%2d| zzUM$EutcjC^TWA=sG=j|t%)RWj9ZsS%hJ*A2f-FtpL-NCt8<0-l<}S$j<(Qs@FkjT zyQMn7W4vSUm?bv@Tc6~n^{>Bhc1=yASdPfj&;|0$*I(rNdu6vJ{6M^bH6ODdvikJu z4)xYMr^f%H)_#ITX>Xzqhj=KTPzG_Fn2-Y~IO8w0EW+d_nkOlBIgDZ?%#JTs2pstr z_>sy-tC#EDZnFc`^0G?**e2CH3w3(@50h8xz z8KF!uH<(GXh!sq?W<6)w!&S!es+8Au#mbjbzd+Bpg$t8L!&u52dsf^pj=%YtKs-Dv z=&=9>u^c(H=FsAV90W;yTh=Zj!$i!J$C;KIlVoA98Qcq_8_xHER43;5q_2FlO!v2~ zNa#Io;eEDHSsf|jZFgTv4d1L9Fdagx>k!T!auFy7svz7QkF7u=HilBXp0tW^29xlM z{+uW38dZ+rBECfe_{t{&&+@hRllAAqO?vj?>63cFXV~)5CH;NOXu>mf;YGggP$LsX z%fINq#OOd!V5>*f(vOA`P>MXwBYA)8xzp+j)t$2egEq{L8@Pb)dJw(Q9TLTwfzH28 znQ&cX!3=DlT1?S_8M%?^Ce zEb84RZwH%D`SfAA#)`@-A59Yta;zgOV^i`&X(;f;Ifj z?Q+~Sm4laEyT|RKZ@EYDhD|&~UQUisSqUt02bZyYY7Q!2@<{AT^c$83VL60!wQFDr zDb1O*djx04ybtDOXFPa7K_G)e&hvz}bb(5HLxFb3+F#C2{zgSG;4a#v3H&}b9sD+S z;XIgLVL4dJ?#}|?yL$1(*6S&?>l|EQ@9#^@!Ep@#u$7ng)Yb4IOS8+#qMspJ(gzru zUFi~=`xL;7p1}MBDbEeCIHb8bgMWeX5qObQkCre&-iQ52Je=5GlKw&g^*30w0WfEI zBikAkBE#4z9&rjNR)>;1GGiDzg|+m=Yd)-i&lM}{%P{}jC&8?hy7%4esUOU;kywM^ zxLUCrICk2~Sfe3rNg8PU;Gv=(q2*va`-dRh5FSZG(QoQ{ zw3T#6Tn~RdDwdTIERfNpCdO3}plhdHBD+vy#~IzmVxPuRIy2qbOM8GU@Z}Azf0eVt z8_CRVs$@*Xf#uiUT7V2IaRaI=D`PUG(oHXn#XZwuai}jFI)v+5`)5S$X7xftJYGKr zh39`8+Ikf093-JeUw4>59REVd*8qO7JMh~C?@F&X&(=M@%GT*m1-siw5i^6f@pv`W zg+a+<$6KGXobcWb?s32DA84S$wn`Rna^K7FMz_j>2Hek)VncE)0|Nu_rN-jCfGDz& za?5La8L)=VjMpN)d;19B)9((z`=hmnC5XZMkAjIH9(lT9YZ=wFcUuvc=a8W#y+$7rdazMQW}T0{60)-Vx1RJ*UyVr{cmKNN9LeG}N7?3Is?bH= z!wC_Hd3TX|qoP3DW?9-}yc*xAp30LQbAfESJea@~wCH}Yt8nOVpcM}nDIK+ppS1c% z^Cx6H6R+(WC3KFRZfjIXf zO~({-Ns)YkBzkruS-@n&(#o1gu9!$Zz{Xp0nTBgUzvFM*rRe`YP-c zh^9G#?Jy#!r{dAjwl|`l2FY%ksNEl#fT(zF!Bf6`oh;ck`BB<)`TEJp*W=NF*dQ2v z{!66TX0#ws5>D8{_4nXjJIWDIa2dn@=%k_I5i5p%^{~xE$A**!m%}cxw@?aUQQBC0 zE|{Hr<4k&a+D%hD%woFrjhV*ugkxcAp8xuf)ANTt!ic^D+$(WoUrNQ%s0}m12+;*D z0l5zO$?&r1xQi`+_Z@3krg-@ZC4Dd|J|`g&OhhXJF~wK{i}mKPYeXVAL@8~u#*vpi zW%Wf_oCIZ2!bn8_hWm+M#1-qN$R%A$+dlVq+B)jDWt z!Z<9GI&!GYO&u3mfM~*r&&_FrN=9)bh)WArq#07i{37*Ij-1#pL3?GJ^1Qd z9CPREdDS|$r zTjaq#*X~^8rR(xXw%an~?o2giIIgydQ5AM=k{Mx8Y$fb{dXXVt*_t zWQ>ORqP)puiXTDE{`3mrvv&rx5K>QMI){t&`_NS-d>oRPj0 zsS5*K>%;_{Wj~USjE%##oLy;*(9XXr+yHI;rpvo4qysKPMG#+?^FbG=0N@`4@kr2G zxD@zLxP#&+|C&>V#2%-geLU>3xiYdI1K|wr0pQnVG7Z}02lz9|*Exj@O85;ppZi8& z8lXu>ow#4NEm$L1Z-;Q}GOJhR5(2ro)XsZ{RH}|y(}PQ~OmaCg+ptk!Wxb}FA`O+F zMYnOmAouH3?SkCm$axYey9XENd?G$*cfO%-fJmf$w8 zv`TOM?3={S28sD!p9KfBE3&gLBQ5qVX-8h|Fm8C9MSaCRA<)4s;n6KU=PIez9p(R> z*{I=7Px5lFLH&qbTvO^Z%#7+e&+>D-tAvM7zKm0m$i!iM2wl=(XrtX^f zLnpt+&IjGwg7&Js!Fzco7i(c~yJaJ|+hM3llas2`LJh#o+&s^*-WlMq)J`wyY%zYy zM5Ue+@Gdq}ibqfJt#=i5D&m#&jH;v@F+R6kv~n*tvGDQ1EeC+-$H-5^79Kt>?%z$G zf85N?Rivv-2|YW@cZ2F6&!)!JbmomNVQL}{IeRq(rhqnLzzG4zg<%5a2K;L5L zy}H}ywIiX^Z-2`Fcfdljh+rXLS=DvFKmBk#kEU1M$bqAqqW(bs()Q+YxIimw&>iHt zwC5)dO?DFq$K||mD@OW{??X~K#>`)=uP|6f(E#X&_WhL(v656{Ld-n7aK~Jrvildh z|EQ!pI8G_ZfYJ0!lM;vyBqr>d?e3Q4yZr-Rwja*yH3#|3e2^M=+ zX0j<&L5QMt9p;tudYP z2SESu^C8I_F1uPapy-Ybv+9c#gv6|94rZsr`;xCu*|ljZ`P+^8dxHqg(0q`(V79yV zoLisJh*!^o>qYNIx9UbN6CaxbKV?m;->s$V{)y~r1YxnN(dwksYOL5w&DbF1E-H(` z+%i1GV`hOp zGP-H%@2RQ4@p)wVs(^6?R6k&w+V%dndKJb;+ZLMI0=O^se!U$-f2L;Lp>_9PSJ@le z4@*X3t@L6katD9b_)b`;r*h9%7k|Xw8zL(|4#c_MA4P%G`v#=~*G5K~wTIetNR6w@ z|NnykwJUzmI20f(+>Aa-6Zg@=%!O>GmU_snn(_%-?=>l|B4rpwKyZkX!&^oyQUD?Q zp7Il;=MN4DudwH_y0FyPg#yfJ+;HNcPo#UU z(S3Eg@fbLAvjG}`4k42;#=yQp0B{sR3;_0rZvw#nCu{&=vwTqO_T(d`9qX9l4Izkq zP7okPBoI*dae<;!Ee;yJitMuD>Qq!Q*LbTuZXMD=usb>E!2m-x%_Um}Ge>|egZ zz%SwWO4KF~W7U#yz@ydMZ!tQsQu%{9M(^@f4hkn!mMno%@w7w38i{q+hOiY$akd(^5pnHUV~1t;bu z0+@~V;o#Z=tV|^>ynb03ny+yV>XJ_Vz8neIv9GC;=lv5p2)z2{ zE4cOa7;}&+Qov)PSQ~kpm%J&Oob=>xg+f&9-ZpcoCzK~Np+dPMPpj33m@+>uSowWb z0G_6-JG2!lsMVImei1P47K6S)#wze^D`50pX2pc((|LEjPrh{dMxc{ z*-sV7qz=Arid{DRqnvx$kY#qR&{S0p7J6qJgei?lSRQf5a|d(h^`QNf+*Ay4_=mk> zRKDQBYT26$I9by8=5d#d)LZH-utfmNz~_t8)wrmkC^Zq-J8l}^vJiG6b5W&u%&B#Um9^bmMFRqbfDQj&Y8 zG$$5dR%%uIq5gWW0`LEO{kah)%?j$6U0sf1jj`=ZDXPD8zcIw8!0;8Spe_ZEA#Fv& zA^MxjdrM)5TVQ-DkwU!=mb(gXR?ai~+<_!*3>2)h7Lyh;q&{?~| zXh)Yuv7BG$XC^nl#Q+Y&Ur{wa?%@3iNiw;+QYmyp^6D=meHaVNj$szJ=kt>lV_)IM zOJ5*?s*HwAw+e#Ny>OKT!*03UUe7lg-^`KrT&t57{H`d#r5ITLpKv3KFIFp6c7M4zZKZLx~4gyG|R&Qhd?{ znBa9X3|tw9#ap5G!U|eQ!n$~hytZEwKqI5Q?LzZYc0p(b3SmG7#h({A$E4nfszrfcvSZ&_a|wd$`ADf~;e+zRpq}o2uFx z-8n?;E#*q%9*f&KX3|Gi2l-nv^k-d2+$OXXrd`Oz{k+b_=~ztIyqXXZ_rveauEtBp z!EOW;y~xOOJwD;+HyJ@cA%9ks_LQ=P`S~>0Z1gH?TwzayAb2xSw2=b8D|1EGjYsrV z{EgFDN6O|GR%jDtzs5Flw`vV_<){8)i6sokk2pLDR79%(!f#9Po5$NHO)k2R=3 z2!8Pf*GdbkfsH{!LiQ~cJrNlHCk}+tx4##)Mxce0J%E3ROeBXG{ncn!hPaWpGs5k5 z{#?SrBhbU)YLRXd658$=TM|%L9~8Jb{0)A-2^eud*xZ!xeih3Ekn;6%WXojq>K>KI z1n&Io+S2&bCDQV!Bt;}Tq*EQaM47)jteKB7U<@!1EdV}UeBT@nvOz04+i>=Cx4V8+ za6{Z+PyPs-NbxVtX~cd_y}3`HaOFR)EF-LP+RQ~DYflyT#QAl<$lhaX-A7YJgipava0#4wj23RvmT7dyoPZes4mSQ$jX9;afpd z!zqrTTJWDd`?H5ODLPo3T+Gw2-sG>TvRwK0!9TBIO*TVkt$pWAjhoMp683fz2`TYkuTMQ0oJNBrW_7cLJ|M*F z>YUBB{f~|4khsJIemUGW(|pv{w;QJtQxZ6R^JJ4PIDTtJq$MAqxG4*q{k} zoV6svvG{0_O`0HGrSv<=XQ@$nf8SO?YpVAI23qw+hH?f?I|r+C`!1GV7AwqmSJ2EV z5wDA{I!^rRF?`DXeDLS+;DrU@B-Kr$_Aff@{&Y{9Htiq;J-bs zqaL)b8sBaAFcF?_Oo4*Hcps<)LTrWakj0Hyo@4!SxWU5?zSAanmspNE?_mteOeZXM zjrCBHlUC8BKdVVl*3Ql7%(hGUq-S6KGpAAX`9;f(hilB9EfB{wWtD_7x3nVkVFinX z6w+i}BXFmmjaJfh6=`fAaFIE9x!g@D8^mv>H5`#|Mm>=X78t$S0_X;#zES}N`5Qz(~XLAza4lMj~lHk_Y5 zx3>~AUZs=$CNdN*%g!2eJ9?P8dnC5iYnGtC94i&A2V=-UYVQ0JTXeBDB}YY***u8a z{A_Wu{aABN!ViWP27C#viW{H|kez~wzb2aayb&*!G- zia_DLR!6h`GzFm?-_1LzmGH5ZGW%@W7WI8Pj2#wD#~Q@W+7R?c?z*RxUdBd)`Lbvg zG~R(khIi^g!qiT+y~A94oy$;9ZYw3KAdS#}liZoTQ$na<-M9=E~+T{jku)ad+vW_-IJsR0YG){B(Xlv?QTrLfL7R zpZ9b2-^%{m`q6g2{YzlK#TA=O0$%g%>4Cu<$i^Cbc357P|h_0Dnl0+K4Xf4;!K1!{_yS}}x0#mBnltEoi=a^(#z zQ33+TA@ou<-Va%x$@!8q(hkfM6l!gPZS36jKCts*tM{&7;I_;BHpBt2s2a%rbe~D` zC&!|r(Av;=S~yGlo1BHS5PLH*m#TeH@m|5*XEH?MpcPOic95p`Co$*5aFRaFNsb*O z5g{aY%wQcDNVSMEGds=q!3BysId!1s6vn@{FM@g2u4+@_?|Vy!jdA(@U6p*jt5w5} z&A~$II%n-z$6p|R3<&(8bZ%-{P%3#XLD0_3A06P!)}x~iiEvT_s!RTiWaQVSS}6rc zr!RY~X+`7H+;saoZLOO$dV)$#nmL+w=O{9tIYU}zCXq)IJF8nfJHYEAj}Z@KlSy+O z2O~zp*sk3HVx~Hb%WY%G|?0+liL&C+Do} zc2zkCXMK#A#jm^|A;wFS#;->qUtm!{)#dCboW}+o6h3BnD-5vNE&CCkZj)?Zknd`C z)Pknr9|;7pp;g>|7xrb_WZJOeq>^YRl&*o2TlwU!8zZyYL4>Up{fj$E0#sbxhn8H+ zc7kWiw<@t^9=QUGzR9z34eiqqB{Tef+U$k zpr^RnZ&La8`Hjsww0BCKbNN1+`q9_{ahKalzVgC`{u8a``y44wY6XiMDa9LQ*P^sU z{XGqZ2yA~xn+HRy6^9#$m>VB%#tP_G0%q>%A!SRX_iLFqP$sLN9yg;M_B-1=0KYSI zgU3~AngqyKd|V`cZ+^!xR>JnrXL>ILKj0E2w$ftH$^^Rd&~iKS@R;Rc3Q8Cmf zi%yIc(c_FaDWHHOa&FXIUmgiCeP9WxU%OBBEDg#yQXUvoqyMOe3w$SKQrK z?~k?;D=sdg@xl|vIfkaHMw8pRvR9(Uci+2O(aq;=Cg^knWt|?=1fBHY^Ezo9#$JxS zs3%Lu>)N2zKfI*jOjJk1yz_fB`hy=ou3ODaCj6njoZiq*U$|3kVmldQX5Yjry%)g; zF}>L7uGItU?}Il)`M71_;(mf_3gIe;lujzKdBaW>O%@4j)#&zWl*BFn%{N8Q_|H6N zDo?4rpqb!C0^0VRRFGL|tMN`B>!vkT#4pMkl?nDI@*mV}&^&LOKcT|#X+wC8%cv?BfKv}Hm+D_l2oNq=*cMnSVyRPH$$ zW)vsQV?U5m%6rrZuncq|TJ<0Cn~C{#|MItyU!B5{duPi&Wn)Veq$+%@Lia&E-@6pV#)U z^bh@tZ;Y5_Zw}fX|9z8O_J7nNd5Hr|+>W#L=rkz$_Dqo-GbugBSm{RH*xn7l@?kqc zEci!(RhxWRdm%LiPQo9Jb1X^hzgT*~&)zmL+iv;@vzPuf`O3l0AhltYt6GkgheH$M z5{Q+N^e~A6OPif_iRJNwQXzHc&me(VYz+b`oV8b_se86qAJl!k1ErNI8%Q4Ws!M+e zX12jKasDv4#_D$c%N6VAkE_`Wi6bbPt(E!{150Rd#1d~hmI?SX*4w+`kdGrT+XPcY ztXbW2tXV}wtSL!L-XKzH1wgY+S0+t)3X`Q~Gy?;&`TM~gfKN-ooAV|1H6EK(va>Vl z6|iRDSY45W(;OpacZ`kOVaj_yn}V@9O->l31h2>$87;3#qn~4HZ9y)H0eo}*nn*ABrT`U{Qo|rX{ZA9X*Yvo%%sN|v~8ld^mgu3auH%|E| z&nSDsq&q5+>}BvaC{h*e)SFA_e4XXj2hUUbM2yY;T1FA+yrdGWBd-<$k)%)4HkB5k=GPuW1$|hXWq)(DP+Ppt1GZl02`}+5u9>anlBsoy zpV*P?%82xUyY6l~EQ~zrj3kd|#f?NBP;vp=T)9B~-xC4esGd#nxBCuzVo$EdJv3jV zk^sWbTmm&d_rwd!)DrH>U)AWB7OGIN&&$m6o#@p6Xb$6&azuhm{19?=?C<{us>q#* zgs>ZMbr``W*rD=#(l$Z93)>$rzxOJOA2XkUbbC9?(JnNzlPan8qI|?$mLytLWE%T^ z>c2x<(sNheaewPa1`4T{RLX)3$n(Si>ks` zDcYQx&lvr}_g@C`b6mb$e=R}FI>BG42)+ zN`DCex#CByGBLq>0S^zDj+U|v<+YDx>1z!CL3K!(ewqA8Hc?xPko%CV)KWt^30`{Y$K$k|-g zfobk7bE$P>rdCoBvU>TV-O;A5a|Il|i!_Tz#FxD;44FtZr5ETGwj`+jEn9#wKy|yI z?8Qi%mwdD1`@5_~ZDZ4LN*kdyrol3p+B%fGc=nMcBC7Wy|v5M;S#GhL@yQ zhfXilu9Z;xt&A6{I#(}87snzihIn7yjx=%ulGd4&9GQIj?!t zJCc=JWh+@b4PJdSZJE%IX|hzbp&ZxL_rr?H$@Gwi>)h*9*TdQNv{adwT+c6KWn`*; z3!&X*bh67zg^oC7<4PUrLjoXpwl18iKG`iCuoT$RfHc! zHW~q7g-vRcEB{MS+k+a^Quspc?;^&=3TAB0b?~bndbW-q=#@zq-?p8F1(m@!APm!G z9;)j$ev=AYU~}Hj{2A1#GXn10$6k2Z7kU)hnS8g&G>yrn-AXacV?IXyYq$s(32k+F#k}S-oYv z@?Qj@|3pE5W4su}P3`V4B=0`Rd9l(-Xmu0$n4n?%XUU4=;*BSBtL05(C9% z4}}t!63M3J&sQNq!J==b>6^8u$6x@3aM{5 zPT|uN9{D_b%D}$G5IjEGp;IezK!!!wek_wvn=ldF`&WwOt1u@MMQy@(@EF#oAp*4K zTP#xT*UeKQ*-Ee6aOAN4h}qFee=*6uw#KVpK3G2~HJJe#N=i~sC*?wp+ATyi^4Pnu z1SpMyBcAkO2+%d_^Z6gm4jfUJtP z`8BGc+R|xeM0kJ@CEa9iE;utz+gD%LRhRv71Rh|1vW=o8Bt_AV5>&d6^5E-WZ-RONnNfn*(EXMLBwEroN+!hiVftT^5C<3^;*}$)yW=E0&>o6eWcb1q>T&JMs#+gqu&GRS) zoQ$;}qUV@*XO0&8B{i!WOp~Jwi@8+>=L^>Td%IItqtBY zqnqE}&hv_I`d(53b)MhOYXn)=>>JXB`A&7UE6nHno$rmz{ZLm)Wd$_2#G@9<9Z$AK zg|}tUS|AUHc?Q{xI^=rUEN4a?;%LGV1nSH%S*(;{oGjJb-15jRQ!E2OiY0!e z{5)Y({;h+|)nO%ZC$N8^GY@`j#XoHXtE{x=iL(gy*CZ@ZYj3?>#tE-BcqT{;y05g> zH}%#ZD>6_G`c|JL`BCc)fW?r$$V{nV?5S^^3GU?@i!$9IMu`;Dz7SO@d6 zc0P%tXb)sb(e_^1rC$j%+k56UUB3|&6&|=GncjzofP2(mlKpkcpFmArZmBEJo6>?< zl|R2vWmj_*y!EPf>8vddqVCQO16u!+6ocv%A0BQ{*t0x)i=HU6W|lxFZ01p;ojkwWNp1TSML7t#+xNt7joZRtb+zAGH{~LLiI8ClA zzWbMnB)=)ADII%4Pm&+#aVn-}6$&seX0Sb}TvU0Iu^IH z3W4$yW%?DD;{cXKiSR-Q^d!?`=gC6QVkC<;*DYKoOA_OKei8P?*sk;vkRveAl}7j zzDJ)P{rWA1o?uLr@R0G7rVav2s>v}e%ErHjiAQiirUxkBJ*MgRC}-(qkUnG2_lDdd zuw&GhXzk+%hazPw$Ao070t5oz^+D_=d(1j1HoIC#JTUcSILVN!|Bn{lx{*R19owL2 zC|dUFd6HH^8yUmuQv-L-Oh1y|xHuCKd_Jixon6Co73u|c(Vky0J6;%hk6{Q;-&GYG z8JC-#%N{0pRHsz39nIW>a}`Q(O;ZP4$Fk46PyFn<-K+OwNga1U=(Ld$RE=2Ot^;hM zFBQ>x&Grb%8u&>^;UM8VQ-DL3H{z}W{5SOSV(+nKNg0UW`IyBJVg{6kKb^JB{lwwQ zX$hr%XRxAT+%)AC>gT8)aFtVe2KzvBzGZ0qfXYL!-rCP=AY4`^OmZaILyP;>q2qNd zKEIP#&OSlYvFW1UyqP3YmCovsHmyhuruDo>uy_{4Ot~&02=Q7x!xTX)e@g#hQ?H*e z`nL|Yq+cAOKXhR@S8a19fU7g(sC=`6-oGEjwsuEW+E2Ao(A{5w+D;QNmU^){V!QX> z$KJp0{femstk$PG7H-I5Ylgu4kmQx0Vr{n@R2vfUe$%$8UF3lWo%D*XhW=~06*|FR zJTKd|l`6GfDdkPx1h?F$w#bH@pFG*YyPxyB_gehvJa6u2yQqnYZc^cdsDysqjUlPS z(ei?>4Ce=Cu{LKiAnm|v&BP5$UrC*@aflwXe+cdYYADlERV5|_l$^eqZ@O;n_aC~< zeMJ*TmV{;Nx-e9%;IY%P@0YUZGiW|^EZcURmGqowE4D$!j&mTP1G3FY_B@yx$ZnPR zZtF3@bREGln~exO)r-Q-)A}d5>D`iw0uZZlJD}jHGF=A*{aVs+3g)oSnU%Pup-4wX z_Fn%cG*)$7dV!oc2BK=id5iv_KFoPGSh?yYT~FNj!2tXDPr+trV$sY`VMJpWNjSIL zxE_%ErvhaFhZkiyk-Dt5JoJ;wU{k`TXC0Tn$b8oIdKJZ2aw5y=oSLb^9N9DbqbvN( zWBE0OQ5wy-mwam>L-uTp@jnj3;BvB*tJKsZa48yX_vAxV>z0XSnw~o8-n1;^u@NB< z2$ZucDgu7+o=HMuv*D|6Ju^n;im${jpMmd}47qqym;iMfKcsF;t5;)nIxyrNJPe>HK}(42rt-YqkyN>FA_a(wGV?hxz(_kgM--k?3AAtlM5_S8iXf2F zTmyjC_gPoSG80K5BAXCOOd?+cP^Idnaqt%KGvbO)YoM;CtMT~_5oZq zy4mY3s)BVc!j)I0Wqw_c!FQ)`s5;aIdGc6YwunGSTKra8uUoSODvVXXa=xVePV@@z2}X8U>k~m} zm;2ZXmad7R%cb@r4(n3yt_(s33R7F378Mn*CQHs!n){k3$G<2=6hA^zMt<&F$5kc@ zw2St*&_X;1^5v1XGAU}s@^o{}Ec+eOT$cJPDbOn>5~-v;PvclwViL$D7wo6#>jn3D z(LiG+hzhmrxB^+Nr^Ni*k|w?=Y94&kIfp)t+tj?t@L&0o?Yg^o`_ccx;&=`z3S^MSYYafgmDR99*-#6P{}N z|6rB+)6YHjs3Y1$&xwTab`{B6x?W|{=b9e=3?c8Y&nY}awbY(KF2GA+boBo6;As>| z=^)!+LuW#T*|V^A6HEUfh907N1o;f_>a!4v8=|ieN5mi!BeAutOwYur1NX15nf?}!y2?=tyEN^!yIfLo{Ni7^~DI5 zKcz!aJ56)n=X&cGiTvHp^;v3|xQ4Xw9;>B+#T~VsqXzgca9tzozMx3;Im)M;cgNU< z-t)1($gQP>JvB)6bJvy@KCIRO{Y7c+icZehDv){De>45FK9%CC81VR5rh#Zxup3wb z^(^PY&ue+aiO3M#iztwL$_oA)dw;qRGv(8ZYW=}PJ>!3fwQ?pZa z_Sx6Jq>iOG5C@njn6`+IGgZjEbVkHSJV!41D4@Yy%;6N-AFIH6ahmy#Z&!tm1U2 zQkVEk6H$G~?og9zfh!I&wX%WYTn z>Yon=jc@9`?^2F)p8q(Vh*A32Ki}V-()+xHEvCfzGQU{ha?1Yh<;^ya`h)B~kv8Xd zD9ilwE=R>XMYfS6FaL9wLQAF7pZ{7d|N06kPl}7`W<`oN_t|FH=5VUj`H%e8uzmjx z+12cQU*jyd?XAP0w$*Kw49-!sjj+lUE}?t3NcgT#XZnRIi~}|%JQngRjQYJMB$WFF zH>XyjlP#TK9`kf0L`|7fYW_tz=h`ikbku#Mv8(5wLse7^1;(5&RPyTVaNr5y3kInyH5xBJj?;Q@ z(FvKb3%5YgUjUk(9LVhNscAZA0J@a~2mxH{Rn&o6yMVblC4@+0jVmkq6~&hxP2)c^ zHhh3kH>q>%vrl}L$2p?RK~V{j*ai$bg2Fv{Ht(?E+blMshB=!>HXcA+VGo(^cyDw# zEPB%_Bepp=12Rv@o2)cOqs#Q~EWkEAU|hB?L%%C0OvM53)fhltjHTol;UET((eqBZ z?;q^%{(a*lC?ce4?B#U=_EJ~3H&)3Pn~`nKt0>{R@{`FQSzr|?V;Py5WEJ`tx9Q-b zV(e#r0f>8!aTN}_p?`orm+kA&~+oBV1I3EGr5EA9PmHT8nXx!;CA zCEw73e4PP|B_tSuhQg?t1fZx$2R%N9ODWO`2=wvGny4GQfKN}6{R_s9c7Vb=T z(eZ-|A@t1j5F<%)#FazBr*08S%MYbWcVet~D!u$pmRpe9M!dYY=x*@xD}rsBrnvCQ3x&=hG~gEdw2W4Rim z+zah{REy_toy_G5;p1#w^R>!@uVjl%=YmNlrD0_e*>2K$sg>wZT~oyRQFayN?Y9aW z-lys;BsPFf`P0T$x3>~^6Fm0ZBZVMP8+Cbl;WSFDT13yv>$jIOCUhczIa>a?SPn?J zmQYm4ROZ0>o0)hv8q?0fwu3wqbIWLRmllbcS+nF^-KWYAI~Jd$zq_==WAxVzg;g0u z=p%<#M2?uE!51a|(s|SjwcYD(r9G0Kx#T8QJKphMU!?Hk@CBA%^c4lKC_YJj8VUIm zNzlp?6#wZ(^F=H|dFEBsP)h!u6B!jgzrW43Z%aEI^T6jHy0K_ zKF3O~c=PGc+}z}33W6&PGhdaTFZa<<(1QEwXR)E^2oFBhq4ngItTDaVK{dyt*OyZ~ z+I0;A#Gf;!!19^V1PM45j^}U-erpPtn$zhA@#V zggBn-YzFeC>~E=7`0E9_5{RG8+fhA>b|rgQz)7F?@7HG%hj5h1tc@^jF2p?P(_my- zq2Rh3Iqb$WkNtGB#IO4)OWW3WWLea+(C9GKgEq@dB2wZ9w#JU8eVN@vjgWK5Fk*#& z(wTK&)ABe(x6#FEypy0-T`-?C+%CC~viRYe6muQgU=i^IC;(uFv$GNez7NwihIKaC ze9ai#|MN?b;0Z&@yx5-89DN$!ALDwci7rF;55)5;Wp;dl{qJnBZo0isG};+K-jvgy z=lL+*m;rV7zuL`qAlJp!5&1hMi|ef*$n|~`7pvdJH+tDh$rL&|BhTG9^SUmd4S7ku zUk8QnsaN8yJ>T?ukn&|t3Wv5IXTpu8c_$a0iV@W-fM>{s01&GFSwX&jrn>Tq7b2s( z0!1bqp_}1X`7HO4EwJ12avJ3qKi{~oVN{11FE^?t_s~3XAf9N@HcN|Eb+w4BVfC9~ zWHU@qT*7$3FMyJ6wWChlZiUY3nCtLjaQ<+q#VfG$K8QI^Byfn(nx(n;yr}53#tRu& zafcc~_0JUj_o*Tm*S?7kxfsX{6med3HLq}Sh=t-d3u0ySbY33%dNfX%a zd>=f>nI|RHI6vP*Vm);dX!l-%^2k7g z1*}?$t}zfEtK34I81DZF``W(G$0q@8$7laVLroGjNK>2;uW7qcPi6%<0Re%%bvLnB z(6m=YA)LGt-QCvHQP8q}mSY@R+H3UFDSZ{6KZCWOKMvP-Tzhty6OFz(o~)*gNZ7k7 z`l$N7>kd`qnGr=u$K`plewo(o?$WKV1~_i3S(K~Pu%r8bRK0gRoB#hc-j)_`S_x{@ z-qZ}Oy=T>GX)7s;#H>wg)T*5bF>934qG%~%D>V~)RH;o!>>V@q&*yjF-}n9b{&oF( zJ+8;=@w(1A&+|N|5iK()CR6?#$0PGud2(4%z3V&eq^-+{Lw2et*;U7OPL@TSRq*0h zbYm5>@3iMB`Y31C>|n*x_m{-%1y)thH=c8xgY!I5Re9;uLJ|D$5qD<@Z4R7sC-loU zZoEF#hJ5G5_Tp&Nv<2#&ei#2ZVLfMD)4T(oQ@Hg4uy`wsB;dP&!j?9_!;d zn_f@Oc}aExWGdl`J=j7#J$=qq(@@bGH;)$sSMxI^o>v6cy-JcN=JA=~g=om{)3x6y zJ`05AV4d;Ky!>4Bm9(@X7Qc& zGM(OyWW}RTw_;|Dl^I2PMTS;3V*Y;kkp3w<)-dx*x+NH+zOWZ|T!Cv&hfiJ_RW$5Z z2skh>_K1yXXcV5d*%ie7k2f(An}^CBw!aXufEQwbXD{nw&PjbJXFeBtMI?}2v+I(P zjDC-WI_zGUG&9oIcVG$pVmcAuQO(9W$+y7r7CBGpj1iXlg5X8?VKh$dc?8({$NJ{5 zj2ueoFau_%V)Wrab^K!bv+4ZXP!56zp;UC&^G#lG0f9P`wYNt=@GVjr^U!R|k%L}7 z4R#kYu^&nKI8%3dFN%RFAPfn9NmEb;W$`4-)|d{(aRslr_WD&;8!IPn8Xk&*HYZXu5QgQu3HMJxA=E8FQROz9f^nHBtPcV1glr7teNX zle`oMTdQUlfbxQ3^E6!(qz7CyO4_z266Lg&p)exNl#p)HyN&`LNb1D-ht9?0dB{1P z1F*#vGR5huQX$9@^4fP}_LoS3t}k}v;uGiu6HeUWjmI=8H^4k?RP0EvEG%N@=M%^M zrn^1=K$bguOV3LE`bC;$M-Sc#W?_cccrF-vGn(tTeDvgQvf@jU5ZPk+ey&1$l9Mjs zpL=Da;*ddcU&oCNd>o{a%8H)NwzD#w6UE?0%r5kq_s5{|GgEQhvU{d&k`|Pi*2!y_ z5cqZ<2vqCIb+%inf7<_ayIhZz6v$g`Y42CF9oD&N+>gn$y43Xa*iZ{xWk%9&PaZ9j z)}vKFRM-ULge8#3+N{gXi?Wi@tFqYtOCKC=hnAT$`z$vF5DG7s(ojVtTB{3l&+gHW zpbZFX+{-8bi<;+M3swDfN=9t3`T*Kk8oMxjQ}gi31NhG*sruv2VQsVWtatw@A%fmG zM)qv1n0~@=;eD2-8}|;t9K#T-Gr+@4CDVg)&1Vn0E$=EQvP%a-aOpQBXGVxLs>&4$l8sU7)d;dW1a&uPuLYS=kk%4`*lm;g; zT9hx9`{kZW1v^6jNr9nwkOqKviT&8x0gV*u#A|)`J^w;rNa%}@TrC=ypEeukeUq3M z9HoB(Nv#rSy_)nxAl(t}J)cKXL7E!v_c-h}Hce{4NtpL!^?d=h0Sf6%044LuZ`+T8 zuUtFQA9!gK`xv7Vs>By0Z)DyEXmAfqFm1rS_WSQxAz4r#(bf_9+CuGZ4LhQp*c6&) zQzyLQj!7MBL)q)T_ELv2n;GfPQQAVxtnLw4<{~z#kBoPhyk;txg~%Z8L%pXSmFuS) zUzSk&e+}4pvwEKj?~DZ_A@J9H54=9Qnva8K;paR;wnvzl4HTG$1%(k@(U(C#M8+yP zY;4LzA+)c*y=UeFMH|#JKHjHTEu&p3@;@cL!9Y!IzbOmSEFC+jv`i18eujqb{uPuC z6%FO5e;<6`{MPkDYmSedi{p>?GM~h-gW)D+p4D>K3wg5m7^LGH%_{tb2OG3L8XMVX z^+8wwJwT0D(fM?6Ruj;wnHjdtv>>#NA%;7#(Nx>38e>x~rGh247W{23{6J zFQ>4uL}0Zc7vMY!$yw%?5l^(MRjDV{=Hc zQF%bgGk~obW{#N~t(zNh^}Kwh+lFWsZsKC(DGE!jQ#bgojCTtb3dF8;)4F$G1vOr1 zSvYreC#Lj^S_irZ31ybE-PRMbBSe8|N6!{q$$)fpdtb0CIf=h&Hxr7!xg-I$9jRNB z(R98&-@ncrN+FOKag=)+)jD>{yU*wemLmFx-&Fd2s~YiKnWlQMZon~VQi22YVyoCJ z>xq<2ak%?74tf)v=gD~4bHXRhkSPEimI1d3+nvv*5R?gqU;&O)v7TG^(_hEO`byrV zuXI1`Y_idl)Jhk)dLR1wJLqQ_64`gn%Q;y)OnhqX59NG>Ri4Wxey!8ikqk(!GB@CF zuk^Ewb!_}=DpNUswTd&(D_-w@`P#rJ7CW~KuIfW!w}L3}turu?G>EZxm?F=YsMZsF zv@>vyc7OArpyLorx(EWv$pI{XodaDz4beHHYaTFB&~n6<4xq+;+MLh_vCAEcX-!-M za?8`CE&ajxq2tj9X;tz4{H@TC>}CFfJ;m`17>NaaD%bSY^U*Z{9fM5gg55fnyxv;Q z6Riqif)(l7bX2_&+^(-r+nZ%6+i>D{1CPLxH;tIj!{}-x zE_916zx|I?)Pjy{855&9{d0BOJ2X9JKGc=C+^q9k-$S`Zyx#tg_WCb%2#n_5sYWF! zy+n~67e?X5+|NJx;V3SpjO2+Vzs{USMd0hmN0%D|23f!S%mw@{Bji?#5KWB%$>o&j z_&}{2Jip;S3B`71(q44x`s5KsnoIOCT!6&9NKs9%@6nEMYeMYXeiwDr5x5&k@d&cO z22JSYE0Ue&OX zTJz+Fpsh;ajO;Cm*>{hMqNlap=n25*Xger3ESx0ZA<9I|B&sR}u%emIVs>b6d1!)p zGkU}B1wS8VJpM5`>`D*+82DOG_+V*>G%D1hRaJCP7En2S}MR2@>fX~M-9=;sKAC$?d zz6%H-6tKF>*>ZKx8CU9id#(lq&%v|LuS$G4Rf7)W*~5SRu>4p1Y?0QTb7YMA#In`Xuh^?(*%I>KxjX7ebJ|4`{rEjDfoF>h!Nvz|F|Twg(L9oB#?UdBY$Po#g8 zW6nE0)WOaDlnb639WQs%fAa%J7v7$k=j`a6RTboI;jvqgzB$V>mF>83tUIPU>HN(R)(ZcaG|o(}*wm0WIqyVs+$QzZzlqXggj>ejb`drV zDSshjNyHwHc65V7#!zhS^t!-MCo|sw)Q4F4FB#0oeYfie7%q*LmPzB2T}`oQPx{Lk zRviWJ0!ddUeT1F@tGpY~_@{&n-zZ)2&$8RlzHNC>{5NnDTllJc=Fd4Y!62bdWChH# z68yA}iplSaLxp3KPBhf=UGc&P!lwQ%@b}%?`9dl{TwNb40T%wrU^-Z)nR3x+H5(QB z0a16}jqrPo(_+g?k4?{kci^?&NElvyK;?S`q!?QAA!8*yWPfpR z6N-w8%FPQAnp6mY2KffGeao}s?< zY8YQrloXmIR4K?j zB^0sE`5P1T{@LyEpIFR@U*&bIqdKjPH!NA8&xu!A^4b<%CMFk+&L4mW@^@73u5Kh-2s9LKg7kZ<0t7Ga<{$-RyorvSyzY0h!a^cuPG2-xwA zQM~|s$>x}|~76yN481R}e1_ynMLGLZXb zsWeY9)^H_A3#FClovzPX>3MhB_@JB^)v$ZqF^?jWu{X8KFL-_chq`PqGTbKeQco_Y zkM8`bC|17Q%%17+sGL)4$^C@UnY4;fp!9CAruNAe1cYe<(JRtzvm~w;b`~ZzGOP5v zqv`h*+54qDSLzU64>))jf-jhHJl~~sFtU_31ggDc3NIZ&0vXvNaEi-`eNe5WP(sTy zTeGIU)yTOaqMAad;&zIDNa8*mf)vRptW1ibDoNHf%Kcoz)-_R}&HPS{VC88K%E~R*SR?)DNO$uw?mE4*2htGv*=3Vo|ScVZ`hHk~bT4O?r)E40*FPf)YC2 zCZB&SB_0-a;tL|Hyco8^DaP;=R&R`4m?r^aR+sg82hHytzFJD_R}$Nf&gOu2=RMl0 zOlAH9@hT#VV;cX$5swmXj|bH4M)C(E<4pt-Nww@kD@*xLDn6elOCpT3>BVk<-7D7^ zD^k`~^efBda~-~>kb05kdKAyGko;T#gUM1sp?CeCXY;VVZ9o+(ho#Sd!|E|0=kBzL) zkdsqk$)mV~neKZ-6CaCd`Ta;?vdsP5WB<;b!e-;EH8TYY8I0H1lYN>yScqa&u7haush^_3futZHQ;F`}!N^VdPIxIbPE*|~} zqNVCoGPO(E8N*WWbBO6O$AprOF$QNxNy^|xzAc$o8%9wgpxlb;LYI@ry6;iTas`yT z*8c4S@W^Vo_d9`HcRL<8e-|l^Sw)~}GClf1^7dO%BaG>(Al_Scnog6o7PD|M1Jw6M zOR3TabP6ODIs3epQfO$%3$d0wIibk=;lOq476t5d$GYV0ZA?vTX1vY6wj*lwP3 zZP2mC=y^)upVvf(a}Bf&&fTg~7cH4iQU2k-QM`@tWYWJ!PY}qKk)6l`H`v|~#bqbe zH^6_z76&^O`+Qv9Nc}Ybd1P}6x`m_)8Xix!D(_EPo%Gcf)k`UuX%d^vSDtm?os3}G z8AwG%`mO4h#a91a^t%*vSHSztz&0=ICUgEp>}>j!firEA4)?7NXE|fO{#_io)N$r@ z^E=SZt=wFB#if*btN8HifB7t8T54~Esx~i&6Kl*bPl3;8_CGeGwsfR_{H@Ie;?&ri z&!?K%$6lQxv;Y@{xBz3i``qgZ7ARe{6z)p+g9KN`>j~~%^{hK-BGD!bypf4SMyidq zeHC@;)|*6zu!mHp_cHeRYs%Ld>kCvOzB?qGXk+qU;rSX25fT=R>{Z=vxHk;*uLXEN z*tK4#a(|j@3*do1RlWY0d#5*%S|Ku@&u>W8by!x{$x*hASsh-a^_cFb{B9eN_0el` zxv(L(Cm6@p_8a6TfT0q@6H4k$4d%9ipvCnv>R_w!PnGO$jtN3k)S`M>qn{A|?C7{gz9>zUvwL^U2aKuV(+@I0 zY?0Zh&?X)Zw{AhB?leXev>kP^{_kr1Dw_+L-9Xvi3L`=m}5Nu=TzKczMpUEax)=Yxp z0V>u0CBj(=qsmfru?tvk*9wwdNeY>%OdPp`@nSkwO51&g9_=7bk8zQ;QOHIMbw1!3 zVi$IDsS4k$2ucfHQ^mhBKM-LzAL`q;D`680-x>AY=B}Bzd1H1Q?Y!-j**mV7nnW5v z7c6~hn)u|(rhi!Bm13muy4s&F4*hgCS^C(K`G|e2>()dJmM2U-P5ahjg?6sWW^K83 z6Y#+DK@!sqOY*Qu=cn`gZ~aSh7?Tf|iNi9mS5mv|;*X_YFnaCJG!EbKvG!DBmqz+? zck@Kbm9l3wN$peH?z8OZ5NXGewuHuZ-& zp)%*LXx*79qzFTkP3T47rkBG7F3b3FOzw#5 z1v-*de*fbINUeMn`&l<=?UC+zHAhULNouKby=9=himUXo%kG}fLzq5`Z!cD2^i~F1 zd?ICPfw?~EE^=F3C&*7~j5qjc*|-RM+&BY>$a?T}y4RfnMoe~1=Z2d`*EHG&T(ARa ztcGp@GNQ5f0&SqIK}#^Ym%#Bl6P@L7;QFK)bmnXzonwZDR;5X#X@945yxG_H%6rXD z$P0klyBI4~2V7aPaZF_4>$xEwLHmaXx(>eXZ{GMU(AXA>q3asyMD&p$MOn@tz1|>I z>AM8_WRN_FVv(yv<(mT_`)wUui`oV=2Ye9~Di@?5!UkTC|9W^K7hESH zQ?`vHbN)4in_@+}UOmR>9bC2$iL*L%A>C}s<9+{7w0ju@ASP@&qnTvsh~qon*qSrx z4cm`&o$yG^G#(<;V&pNLdHjXdRFPXNgp?gUKW`_I_NRb&8=DR z^t{YXTpU*fvHIdJAC>qVc3dDb#z)f;i8ZT!9@_>=4`t{Hro$jb!mL4GpF!se$$%|A z3$a+g%J(}nhYg91JWkKbf`3DFUkyRvTTK^N9*Ef4&{oH7me}lz*72%R+0n@cnC)l9 zf)|RgBU59HlS38@J%tX;(@p9M@;{Cp5Y_FRgx)U;a^g9RH^j`6(WB+0fy1UfIq->z zRX*m^i=1~s(|gCxdA`;`Z*G^wXNP_nFSpTjnU!NJbYn$SHzk5Mr8Z%}{rgg^;76Y`wkH4zn@t*gTmB2y7$47y9C;AD4|{(`{vL zR^PDW_?DJ#TH@N&{X}fZ3XwGdVOwuP_6MGHE9Fq$=;cQ-$g=KzDKK<2!wY@PpvDet z#C|qq`qXZ7rt#b={8y7^>H^nxd*%s|l;3iPS%~Pn$%aeFVqh`nML+JWpS16w zLf_6R+euOCZ_IPH>63D^h%%K@f8$<_=-dif8z_P~iT0az$~>hO4*(BP3`;!eQbFG8 z+NyxvYjtdWFI1S|)uXpC2Nv-8t3R#Pr{x&ws0HBN0lX7$1KCP?4M8=zKLOu-n;pn{ z+qGpq_?=T2Uc%Rbu4MH`7`&*x;5z(^fb+$utMLkccphQ0rejavTL*tmnFr{?Z;=;dXU|jTX=PQNR{o7G`4K>Hm~sB24;|N6 z0x#m_ADt9~bSAWMPmH~dt)xUSmwe)ShpBSc6T1eb=zZ<-m)AmR(a4)jFvr%&9qc%> zuYn-jhrAyiJfb|gJRiMm$)BbL(~qtk%}7!9*#J!g zh3zev-0=h~)!)sr#3ODh8~FH^#LgpWC*o1G<^3{SQpIP@uIL92vP(QPDPKQfQvnN%*ON#d;??D1n!XT}pA z`dRIRRbPbdVqbO>3$Mg7nr0DJKbL>zI*FYhVBWetQZv)!2{JPe&i$KMIVTd612vd( z6sT|**|f^UEHOC=(Lk@&bnJ47rNq&5_ZVhmyO}IiH$E z?FpxQ%2b6UY__qkh>Z3_ei0VW?!ML`(zKZl47cL=!2YOn<>DL!^5Gh_zk|_i=ztzw zput%R%p%ehggNz)4w$DC=-r8J-dbGfZ1&3P7HZ<}p`(*?eAB?F0g%jT+kdq|O? zzZGnINPTp99kV|2%7fMn=tb_Wf*X(P>m6O4k*I+!$L%%L>{jW@_Ue@jJ=XD!j&p#+ zn>TMdYFbf_&Q+bH$GRoeHJv}VbgF6|<_{Y*|@|`iYG6PoEGsLzb3viyH*r zy$i@IE|i^Ycn5R*EPLzZ9qdP*VCoBVSbM<_&$EuVXjfawr6~$ny|?7g(=jZI`7Of! zk8r&f*$EkDW3k(O4ey3(Txd&^zRc)kdPi0Ce+=0`!)qFO?R~8b_=R}bmSTNQ$dBajk*IIbih^4@+^2uWwei?_}CD98zoI!TDUgJd4RJeC}s}2Mq z5uF!?-lv&a3oCb(w%M={>-*NqD5#^{eV`c^1JDaD$(LjKrAv{Vt&QUwJYRE~ zdzi`x;3V~kd#9>(aqlmEI2SjdxM{9@4!SQIjhSV5daMxwe#d|Wl@=oEmFUkNuqYJH8QMpA0Y zo;(T+W8;5p%JC;v&Irem*>yuQNQjz|Tq-B%KD7n>(?6q<{NRdlb<8tzwMjR0M;+vm zZcN z(&8GjNbRl}uR#8%znOF^{(e%10^y|g`RhFKSOW;sP0#8^cfHnvMl zM~|7Q_P+lrdQk4o_qLm-;F57$R4UZ!5-xG@R59n3*8AZOi?kDJVmz~~kvQy~nKx7Y z;mHf3?MlVHZA9UCdhgkONuK6--YNO3CJ-Z_j|E8~_@g)1z^@#%Sw%g?6D#u_mnF7| z1|xRNR8J4HA|JBM^sw3Z`nU`~xUZ>b+2rr!>uZ&L%BYpHp8kS5Q`>PRz=0xAfzHR0 z!z%rRqhc8;yC}1oBW1nwlPQPIABFR=q6$oF-tL0&UK04PBIxDz zx(HXcpUsj3v;Tkf;6FmDqxyfCh3m{uv4Q5fHj>2UdHc|)V?HUuy=!?P7UY`a7%&Jy9GP9T=}b9ZxHS=JC%! zLluAdNlQfx`5Ivl!Or$j{16DMlRz;nJ2sD-#4aD5{4lz3L_Ea=2U_7lp2-ea`Lk!B zd%zf%M*d>3{Fcct=tF#(ms46(tGRTcwnKoqsID`ex_`~DAN=p`U6B(3MrS=g1pQL#mD4Qt&zF6iS}IgM9ay3A~$!n{53Q*!@7PF0R5p%UH?*{ zvdDSwWEPGR-SxWMlECG}6E`+vM8tmyEls_H6?~PgDcwdayKWoerR$J_@LO@x7j4)j zm0Ib+!aW&#;Pkv_Ge~?8e<${vJi1!Y`yW4Ana9tl+LJN{oAud@HuzO4SYm&~NMM_@ z9i;JoZ9;o9+KxKo{Jg1tFJfKA;l%#0OfLNvq53m;;WML{#Y_m+MPY;yboXhx%3NF@ z1v9)1;~(*Ac9_xbBZW5eaiGQzomok~4pZIk0!wlWs9t05Qd3sF@tMfptXYSbc_7Tf zAmWx#|c>M>6#yc^T}vQT}xz#o%r3llu>yMlCa zsq3SbNaCLAPVUOe79z9sa${&0_taJQzeWO=s+7-czz-Au)sgVjDpE(^;7F@+X;Ou-})=Xv7)a$pVEtUMz)2>1ccD7`tMdAd<#P=TIl5uPkjb^&ur0{=D(qE z{{27p)P_BFv=>ngO*ro2F$+uv>uxSxY-}WPSU2At`1M$}Bt;!V=I!^5OlL!ieYsp5 ziM8(3smgF|)p;R8tr#3thIW1spQn{&o?i#Eswq5}nK-QKzVQqqs!2{?*ve$%3hd-b znCTbyiLr1fESJ38NBpLITIy2Qn)S(nOYwhud>+X`(9X+^}Z4PZK4(8Q1k6UaDQI_ zJebWX1)Ua*zSR?0A6>O3Y(@U`Zp;^j>xY%BtUntBXPW`#S|#Z>PzYhG`=c# z17|(25L_>E*uK|~ZnvANpk2mO;~HgifAMpMc3TNp1F4IWC7d*kyWhBIh6e-diN4i? z1vDCTa)1H2`LO+MXxRnZ;D?f_K;t8eK;j$Gv&XYSUsOE96tJJ|G?5Neer}V=kx%gC zpnqaX*g(H?&jk*vG=Slu1A#kghvsR_Z6KZp>sH7Nu;B9Yr&_EwU)UC7AanDtrq)bEMf*C5GkZhz3Y=E6wSc@~Bxs}t zU~Jg6P|e&c(hF)XXAnG+=;WnHgLwt>+g+|Xna6GZ!dN)#jeMwwS2GZ9zpds|J?~x} z1;2Dggbs`{uQ|oto{@^J#*RaIeFlKp)tyeP_dwHA@Ws?a(ym{?zK<+a(>TCoBaP~Z z@>ZrJ2Gx4#=R3dkKgrmb3H13|G8Q0x zOJc&uu|1Q>v-T{c`zNZCwA=KRw8$B(UScK7oO7{{3u(u#oIwBUnjXA!P2T_rKV_)VQdRFL^HO#b#4{XH%d_-I|AK^Y>w8 zz$4g#y~4Xne)`iLO+K8jWO5CqrQ$pg!DvER2p^rhY3f?7$%N1YM1Y@_)&PHvgGy-O zdUb1QM}Wge288Qo)FP66O+!r7Y!RLwi8mjc$5?l3y!U}TQCs6Zul~h$9yao~%qxXA zNoG^SeM_L`vl^g4Vp7dR>xq3Qni#>pnBZW0*MqwlSfI4|1drHIcud4eh_!n#iGYqLxM>ok}^sg0vs;W+B0P z=c|Sf~-tN(Qh-xb>B~2mG6wY znC{4O>^w+i#P!h=vR$D3X>sGmojE93=?r;+3SJ2gwDIAxJhNk-JhX}DaN_1hzhMog z;|yJHmuPs-m>Qdmz0*9*VJ80(gOBh{Je};wM&J`jjE^7V;xdmnc#3$Q!5UC!zpd=j zd)Fj=vTI4V0tn6n?Ni=M;)=v<`G3p_PUtvxJ`lmrt&(Wp4EINhG{Jg8d%f8+CoNh` zWv9kC6$zG%zM>-#zym>1jUrQfnyXGmGU{Xhpkw^(8I4Qx_8Ge;kv|P%6}%FzdwzLy zFhBHkxdY#G)Vv1g`7XJU!SxdNgf8d@Jtu$xGDq zW^mw#ehEUz-Z^3unX{t8Y4j;jvWX)n%e_Nl=@PhG)W9Ze6!E;mQ~tGyK~XGrku8tJ zjvECYOwJ$8E;hVsI)9?n67pAIdq?Crzo$kRgF7<`TD)|kZM}P)B@{tQ%rPS8<9%pl z6>wZOS#q0PTXHde%A-xN#wN!2{_Fuw^Nc6a&J<`9-)D2SQ_&h3((AiOwwH0dp3Ov< zk^czO3A#)D+M%aKF_S>9TekOi-hj`vV&Oh32cC`9Lgruz3=YB8R?x62q9e!2bWUzv zg(zRj81?LNpfiGdt86v@5pR_%bg^;n9{zpH@4Mv>+LJaaypLMS77Cd7jW|0UdCI@PB&@yS0;9sNsm z=l47N(r!P&n_gX1REjpTcEjw%FTECkc>0`HGZ%AAN^pF%5UbUAzP`)rX{~4_g7Mv(+>0=GOtd=7{wq>SQVGSf*ORq(lSdUWy4S{!}5c8@w?kdr@m-(y^8}> zY4d&Nay5I+Myz4FRmVUbf|yb^%lFuJufN~1UBH($(_hND<@5` zw{!}LqZ8_ez@25CsN{57EqTZcc2w&%I+u(f^o7%WP%^toB8SnuSex=h5epHnlpAJB~-(u@eH=|ySJ&#;SchU;`(O>9z9W%=mnB>so|yGf`y)BE1aFL zjBxMLg}(0er-PN)wcbu*$%s?qH&GXV8O!xbFCsu!z$Fn&>5o}y#TqERPCf#JN;z>C z*Ou(eC?;4%D@I%g(TL%?fJxP?zSdYh^2Eu zbDD+Y^}3%G1TzCz?zey4r1a-Q$C|H=N%+yDfHzthGMuw0i>my`T$)t%wCYkLUrt`7 zVLY@!S+Syx# z5yDtMrw)jEQp=Ty`jpkvCPFR1_(X};hXhfoNS{-NRyPf}G@q)9w-u}eJrGzN|IH`vf=~DHGG3P99h6WO9cE9=R8ho7d;AWV zbib|-Sz>OCQ$2lti-%qQ6^dN_XYb=%X2_VOMs3Z#e!upP+O{wXFJ(Jt$E_9#ym654 zSC{Li{-7IflmR}AxTR7ojSqAqmg;Hl$*U11ds7xhcDL~lxNu8_KM!{C5dj-Qc4{l# zu8%w6`wQ-ft8SqN{@@Y88}iI7&uX5!xc8?1@SIpo>$Gaz{ulzh?ri= zq{0j%VfFT{%a%J|NVIB?*;YuO6k;5dOfMnRy+*Y#7#rW!}EbcgWDqF zEu_2c5#40uB5C3bmdRrxv(+x;yN90dVd8&5*Fao67}QCqO3W2O7LmSf z`KP*Dpxnn^c~&yNQblq)$0CNRx&tH^hue5(%=?ZGu$B1NOgQrrt{Mqq)#-_dKor6g zNZ%^ps^%1Pgwd8Ecgm9?7EXK^fB(LB8>`x5%=-Lerzc&D6TN!6jQ8gNAkG5P$VM#L!Y4@B7hDT6B+MsV{(}= z8SlI3kAD9IqinNY4Q!4}rxBzMoh(Lpmr`a7rUh%-t^`0Ry4k`g%t@zR{i`blddq%9>zMW}326a-33vL|`v|H}6Pwp6E z&ZIvv!4n#ErQ^dgcTiFmORYZwc0p-OE?^Y3c%x*E%Pd3A8AcElvwdFq7anr(WehX$ zP5E10_Tg3?@h@jUO-$+H|Fb$~hP8y$&;KBr(Voo=+gSxqQ{#PKoh+3>%i+~R?EKJ~GYeic!?#9rpax2_{f+|4^q{xN+2{a}`1hI~^5Wz~yRp|3^m_OF z|Hlh3g9NaD8q8;x0zQp>Qbi3}%Hx^dRpJ5o$24lUaL*>jXg%zz@DjaVfm7xa4P&-h z@xMV&UQhuTkC$_E2h|R4r#KV}criVN>uKlnlKvR=uLkB&>I)RTVY}+_)#{)x07L_P z0jZFL->;BNYvI@bjJDkZCCYqY$Jq7llK~Na&9>f6>%RuK`;heHqK~Z86vV>1?kqBE zxC-;hN{u&D;d>I<;RdApg=~voew?IITDSeIX=Dzg)EP;g`GwK#GSl*%@HAqGEIM=# z66DbsWi^cuk{ZTQKFJu^7_ejNDQCX6KDa&b7SI8qQBPQF25AU5$L9=t()F_sceE-# zvOEm!dy(p!Sk@I%;}F`eZ0pUF5>P4h-QtmN%|G$mlZn_cJ@N-1J@4moTwxM2De97? zNcM%FO}uZ#vcQdKQx=qm4z&H|g#H=k4&c-dP3>92(S3cBT>OSOlCk5C&6S#`EjpD! zo8x6rghi#qxpT+!Hw^o1(|fgM1hw7cka_|XOtmCVF3DvDbX25wYeaeh&ad{z!zUPm z{A;THU0~f%yOMcrKPu<^W_?^W=N&P_Q2N$v_Q9oGBD%%?3^CPbcWEzJF2%fc?A9`K z10};xbkOg(%g89N@DZ{&VmKYWAv-4Kn7?7;T5q^PC9}kpDVi2}?P&hvlF=U6%nq_) zZ7LZa9{E0L^O_OX$i{hNAA8ekk;UO~zUcx2=1O@Mf9ewd8JNvz*YozgRH>jTQqsqD zx=Q%f;jS4bpzLDL{h;vN{AG?ohD6iS2hcCMy*Ix?c7M6YExRf#n@^%%LjIfJv4w_) zw)$a@&ehJMi!UBzH-D|q%N33Sh5$}?LP@{R-a$clHmFcDL2;@vN#=TYO#}C-eS{6> z1vzV=aFdzBQp4U89UEOKx(#?CsS7Ho(%`3MF^^jWyI(kTKUnv_P__DFM|Lgb*%?oxuTK4dRqzN?hc$kv1EoOgPb^?LYrkN|{f%h&Zb@$8#oO+KMJNP%1U|Q27qXJ? zDZgIq;_=7F^G(bkg><2{bXt0kpT(b8?eS=4>RV7@NU8-Mnd!THF|lBx6@);hE2jLg z2tZmVBt$2~mbtgbjhh9b92q2771QCY<0W3!V5!jJ_VhZHqi2UiiPEOBr0L4fY`cEzccUTESeiEz*W0rg6vP zX@b2$a^M6JPZYJlnhLkGPh}}o5p_BH1OsC#s&uoK=`Jf+4>7k!TnEb0bjB4DXhDPV zh?6RP`3~||S{`z&a;(NU4j7OA0~=cQ;#uTb| z&Olqid46Q*F(Py-4mqhZ6~zg|KEZro^1&w!IZUjw?(wy+_G*nC)cgOCXSo55tO;_-bU;hGwOY$3+7e6n>N<(&+3UlO~NmX@Ar>O=FX1ia>jmyB!0s@1wexS!t!1e&TJ9gnhp$RFRTB{S>HHu$ zb#Nw~i38x*W9G{fm{|q|gn| z*y+KQdhmBUF&yRan`5GG|4TwC@;(TQa zG>16R1V~@4K54&T?X56n6@JYf^`6K8VgZ5YBi~hPu(y@vhQ5EPS4ttM zToDo!5$ZxFAmun=b5^A+LdAFxQ2M1ySY8R#MUSO}eZT`+ zIJxCO&(M~}|6YQ5P=QhUz1FHOa>EVWu@xOAtaKirGK z1Z}!Ga>(kltWD;9-AFu4?%?G}r8QW5)4{A;Y8+~#`$&}wss6> zGAB_Mi8yTWyU}sdg#$k1=TT=)*dA-0Jym(wY&uQAp#TfK!00+Qifv_;FNT!lyjnI; zPS1Os<|SWVT_{~WlAw)C1&p4s9zJ_hb^SxEQX!3sP{=3aQW~Q61PyPhw?UmkPX|j= zDeoDjl?x>oNr;H7i}rwnyQ8kZrK9ohT*^th4GD3%K!-lEB8BrOi#I>*qiOaMg>}EO zl(BDpK7|EkOW+kZ=PP%gf9NfH8-J}sZYSfwRH?l%O3?EkAj{d`_4=^usyM&azkU-f z>Led4>1;aQRV4Edc)}l0Fsd&0?9RsV*5oQq#?ilX9N|=@dnV>xdjY5DiMqFmQ_h5e z(44U;f?JG8IfZALDY{_n9VN}*`6`P$&GQ2Gf30{#SmQAQF7coII_#xjkH_J;?0VN5xiSnR`gcUO zzFCQibm;5))@U_)^l~@Q7Y6w_47}_ZnB&i;YiNm5ZZ{50KUwPer= zdS(L-?Ls)r>9sqZrm{WM!5*x3w~9!AukM^ZEk;3Rqq{v#J~FvJsULp^ToW68s$!vL z@LD!kaK2Ii!L}UAKLU1SOayePEIj%O=8M~o>0Hf&x^XT{v`w__n%l~JhdQ^?iP^jFB* zadvf12bft;+j{{Yl?LtC#+9vHpktg&6|)Z7{6R zChsjWSTZI0?X)jv4R5D@?Y1XWOlL!-xD2C}v%#WQlp>GXZsZMKM6vRzEwBxz#|rVO zg|Ke(0a;fp)FCC0k(}^HIMfPpHcj7FnaOEjEAG?1QQAo05Yv>6(-S+x9BTTQDKh_k z3LiEApq!eFEMZhFcyu))(D!#1Q>l8eeO2?9lwf6dbsgm*AHXDF;4ZW}xSVtg>PTzU zM_cj)$Z^0bN-Br{fl(_I6GefYUQ-mw{J==oObFQ&$GJVU?B04sQtCIiRQ(vQHWGjH zvkERd#}6vb`BFD^4O&(|%VFCvK1@G7!U9iyKF*IHuW@CkwwZ$S^9qWNWlwnqvkQ%r=st{{6f5OHlCMsDtzVf}~tq`hcqsm|@m4P>Gy2ar1I5pfJH_f@ymm z0-Zi!Qf>+mVr}J-8cW>T3piM3RaJHtx6OILd)Dv65B|gwx+_90WV15=YHeVE z`BX)YpX}p6m?K(dNULYTBU8k~^xb#P$uWHS7iH2VobjAnz`rL}bFo!|m#)2|HITih zPJAR}?@X(Noiqn=`jj?sjJ;-qHe3)7e?Nb>6*OegH|6TT@mr+q@;7XfF5r%GF%fby z2u~TbU_q67Obf&_AF5tTB~9fU${kyq^VP8DS&YYnsq?3uWU~!^e`0N~iR}Y#SNRJ- zUfK7u z@{5Skh{ta8dD6<=wGoB3d8n$sH_PoFZWeTgMCLd5me`jyFQP!-cPHGp z9UEL`g+}N7wCCKVKDWogl*+O`> zC0Hm=HkHstY)BXxvTop!c|%73g%sCe*ikiN@T+E-Q(vJV%eZhgN_V9RP8{no3eUN* z#M8A?3_-G9Y6L1ve>woSd_jfFB^XfXHIBC*7;~Tq%0#> zq6O^(fR59;F1_M8@!SnpUlPi}@eHS@_0@EOS!Z>xk5FZ3K+-5T!zOa&vY5%vVuk2- z@Z+oQ19)3#3*dmqc$X*hY^k3(7I!Y+E#inWpxzoACW`K3OIj2w2Ut1e2eqQ?}l=kn;4eTrDr^SHlCo}=$nvOU)ZmDHFqomY{6c_H~p zTT&@kINklec_1;scWN>VA$fOD;9dgfw4xQo``x;Vss8x z)hL4b0icNwN1EssPbg;{S?;LsbX+i->e{BFJ7_PF0+B15wIiy_G};{X#IRiU^M{z! zQ3Ua&y#>5hJs6Kygu1v28nIUaI%ll@&R#x8tCwHUi`0*dP)#Ub>aA?6rjE9tHcF3u z%K2l8>+C&Lk>mX5o=m(6p+o#jhb%D>X{MHXwzR2T{HWbY9BqIcJqTsq!A)0+YK9W* z{^3|4No?3|9LqL|P#onIci7k&J!p5j-vf+B{1tEiM(qvI{;2)xx#E(Ji=rIFt(P#0 z&})wm($$!`Z8}22Y99K4)}hYH=P?Zi@!-Jp@SECJ-$=W%$KW@`sAni~DI1!>MPT%b z`8~+aAHJb{T3uAx2em6v&cUHY9X0+W#`59 z?AMD<$T&!ie0E}7G|rW^oxlqX*~n%#)A#Ov-z<`H7v1}b)y7RA~? z@2yrIbuV&}1AhT-4@EB`u;vWnP!jzXzqsVlB7^P;JS4pxxT`>`iWOaIu)agiFcLGa z_R~nV!|zX~44{8K=_}u_HG|{o0fY4iTgTg&lzJt=+NyBViwHriR`-6`!kaDuOm9DDLR5(!yWj!d-m^LZ$)kL6UBp@kSyesNUjzL7Mw; z?IC37D1RV2QTvmWdXaGztnV*x8_h7E2(x9b(M_?!l_s{L-l;ES%N zX)M_iZ%f~d$PexRWJB|LRALgImqE0yyb^{o7oEf}hjsL$IwKPb5MpvoLyNr^ztl!8 zI+3}Q{2tfje_`LOD9j~(^{l7_#wl-oBrWeqBIC&~nfcC8-*JsU>U+FI|e zJT?X(Yj%cBNyb#{D_GX;Ch48>hQ{x2T{4q*e7yML%W<=mF&Vp^@9h}4S$p#*`(ma1 zPGaU1S26tu`LUu$7&sMSYut5fct@M#3x!>N8#xMP!S)|Kv2k|#2Z;F}^m39FUhtVy z2$)3YD{}tNn~od)16@3sK2kuGJ^L=32#+$I9erDr5ErvT*XU*>o^m}KAz&=V5>Hqp zIh!aadQoW@qz?}pcD3gYeNd807&vcPy+S2CknfZS z?ekvxAEpdbaKEuO(&bszN&gicOEp{*=%Je(*oAWy1_gH4x|WIq9(#U2XqpuEx%^|w z&aa4I4Ea1Utbt-x{doYxYuT1E2bPi5$mCQ-(EK3F$#&m;$AuRj;xcg@;acoE2#$cN4BJ1drgNQYGaHc zG1n?apt{%@vNfN0^Qvz&e!zjw+GGLb7t2$heDi7HB!{7z;MSqD4y9d)ZON{y>bD&V zDUFiGrbTNP`7+)-p`mpx;1~1b6L&s8S4%ousk9eR0;Aqt|a)pcOJ7}kd)7w-tcKEmB{^?AE+G9 zamm?FZy~t6C~fC0IjQ26>cX1Yq_L9t`9WHjs%zRsDdTy<>{?%i`MHB41Hjs+xIgW1 zCzXFrU|cJt{jUnc)c!?=baG;(FaJH(j-L;rfsD$ne+f1a@m*tL`jbQ|~Gt0Q!B9l2Krd&L@z zUb%r{mltZD`QK7};{Qj8{%bOFr^VUZ+dKLaa_)7M+0wi#UC$USr#o4hvZrfc_UASUu-hldsv)Wu0n;d>H(ajGmA z=qKwkFZ3hfLxR|@0eE?CkyEy|)v(OQo9xH%*?6=gOT^R5`=FfkRKH4lz1;X#n6GS- ze!`2SMjGols>bMpqSmm_31UV0!|K=)iy&&}Um>H1&AG{E_R;fd+5kv1Ik`(64C@pUyw%iINudK z5;bo3cEA(6U(_W>^x{X&=x^;&pgJH|!jC+eE}}7wV^lfG5z2)XExS(z+jRhmB3G=f@PuJ)sU$#L z?4j#J_;h@NTF;%i0Cs8U(@de6kszn81{5u2^?1Dgv*x?!%YtK46tSDrnH}}h&qGo- zGj|~gL&pGPzymfK{->poUz`o^AA3|*|5309>7OptK-=|BNrMU9?F1vZ0clr0ZNM=2 zk#i-WoOtVouAluSTYm1>b3RtT%omzh9r2Wr{D{Xt$Tg}+j3qtwSz_E02!PbzUAV-F zGyGFFL~MFVZFz8Cvb`Vk;2%JrjbS1N(80uT47!*;)MCV`Ul zTY~8f)~i#uzDgj(Fbp(%({6iD!Gyl{^;&cDO1rp4^&2P@3P38wE}_`QN1y&d?C*fE zFh?cbf>AQU=`nkG~(1CF~i;Eh$UQAmQ9K8mvqwb{1No4EW~z$otX_OLjyp9d~VJqEMPuu!T&3 z{@izkkj#22$EnC%HwkSl6xUwfukAextSZ%Zsz3i--c`8*%orf_d0ni=^xEkh8l0JU z6(srWr=Q6?kO=SG!fJ$Hi+e_rBO8x1GfU<3`C?LPTN}5oG7yyD_`3u;^4t>5Emddo z!3{dlGSZ@um#+Km6?{S>4+Q;T($gFWy1Kkv-ws(zJj%SiG*??F?{5CDf93A!X3wnh z`|4_btJ|m$mfD--TmGnkg(7NWvcD7xherc+TkGBA$*3ni(OySCF+VtzL|5Wi2Ff>i zSZ zugAXc1ezRBC~=Pk2rj0M3DO~mG8@CKwXuV@TRX3_z@I@(q;drt4C`?Ad%q_Tkde3t z6kQTU#sQl7FO%jsmEyvY53zp1a@IwYuNw#hFP3hqNe(1=D3X*V*0qJHg|krlA()NP zzs$C|!M{J4^j!tI2!p=IwXz6Yg7O+oatKwe?@YZ9FtM}9*Xtk`0X4t4l?1L(66?k8 zL{Q)Wp0bq4pFWjwIpLx)04eI}hvypKp28O@gR|q$F>9mY<;R%wV4q&m*z?ut#a|_l zkF@5nTQHa7{D5^e#rNe80=y(;-zA&eza#RjlOQs+Z6aa@RkU-JH7a4@j{rG7Sj3KW?@jJ-Jq=g zGo$sqc9vgce^Q3_TvzViUx9_AM4|Fd^W85{`)TlGJV2(LnK2ASTZqmLa3?>Hz4o|= z43OXjv!SQ&Y_R>#Q{{!jLG3nvVgf~}@MoUyt^*h#f}pI=1IR^D|~Ovqe2g}YQdk7Z9r==h1UI!??m z>xYMgc!X(6AHrDqpOE-5y--kAc>$oG_IahY>+`GG+`N;&xtdjnznPUxUO_=mPXQ0k z#+D6FiPb)bIbX+nt#4VqpztLB6H=Ix7Iao)cKWko!pu6Tse0u>y&JyS> zLg9uD1HWqVF=htl0J38`cq;x38{+V@+jSPY+2_x9y4a@tm~UKr_gIDeO%@rhWKIOP z6g*R>l{}K8cvQe%A~ji_pe<(e)CwXmT@fc%zHpDV2e_C6mL^>9_@W!Y17$l(K}ou1 zqo!@d?-SO~gxh!c`;Z+I=*D-;aJMS{`ob*u;8g^_?=s7V1|s*3#=M4|2tpB!eht|x zTYYc`npBRX7u5eTNa#tQ6W^oi3gk%)C4AV0C#0-Bn7++LMN}bT=;}yY8@yWO zVlw|&Ll1togYN&vnhWUKMJTMZ@tnZ&9xm^s)L)1R4%%$IE4bW-z&JQ|T`kGNBsV<% zp!$a_Ac)gX`e(m&it~JNfEf?SuF$&X!|uvLa%*GI2q&wF-h<;m3fFV3tI6uHFun+x zTNQzAZ{A@WJaItU8wX~SN**Vy%E>{y^SG7k|rod-_`E<=i?;ptee_^x9V=uPtvA$B*ItEhGyUInQZa=3haw+Sa1c zrc@1Z+z+HUda@5Gj3?|Pr6byefpad|zlQLsek`h*89frB_FBJ2rzN6SN86RKO}FWV zO%U95rvek_?vbMa27NFaR3O_Jb!58(zs=_A$}~1V9{6k8noB`D|Kx34QAqC`YP?t& zu`r4LiiV$;p~lYVYte)Fl z_};oZcq@HMM^LTv=-T0c+kD#ZMMqXf#6YO>T~?uRwTT{DsIpLpHUqxcBZ(ZK;xw0V{e zZ1#T9YZBu^XodQndr_sH-~|r%f8KWRm9>e1(J% z!MM2j7McaRFz>=`uUg&}T{nZh&tBY)arh$^vE+SbX99s&#;(li^r*UTcsKv4yrHA@ zd4_$-%YVm1xq7UZE+JQIZ|ps5665+(;_R1sjomn!opdx?3KpNwTd5Jy4R*Drb#dI^ z^Y#)<&bnUTn9!?014b-iFI$9X_Rn*D2etXtF-^Tk?x|8!d0?{IOOA8JM(rLW5B@Gb763RIaVDO@zkNGsrya?*Gmm zi|(YXKBu}cYjaktGVX@p-Zpeqos3vgos1X7QqGaIQxcLtbzI)|?K)URW>st5*zXrM zJD@g4Dz&y!X9|6ev`Gw(NhJ}I?~IvMMb29UObn7%0xFLqxb{sPt%3|KwetG!RsebZ ze9glDX?{a2IDgIGT3kSdY*ATBSminIKAmK_h=mx2e|1KArT71?R4AIz!eg6sjHAU` zZ;+J9ps;&mC7o$V7BQKOiJ;JV5{^&Ib1rCVByjo1_ze{^mo?m`W`Z?F{ws(a?i$0t z`AnbgzS_HRRy8QO9c!^c#Fp>yYh|E*4hHXTS=G??Zdl0 znnQ67w3`x%Fed;^uZGmFO&US4`WfXItou3bttwwX(t;II;O9hhc$u=88eEU#L2r;`zpgzqCw_F zxqno@H=F;)YrqtnRErT1P9A(DdSVaJ5F^-`dA2RJJD$j~ zfe7sl-n*z7;brfPmO;lQe@d|FcOOiDwE)@s^nPe%F0++3P4ZMSj~tzF>vd_yZD%ze zo|sU;Ma=KibzOD5fAaPB$*tfkeDHrh-Tx}kLljWRZ7$67qF^@tNNFhZx_s9%>hUhl z)X+?Vx4(Itrvsgcy*gD?=kML~%oV}8lEF82Dv}wDN?2ZIDlNbkg_d7(hvF`$T+WYy z?BL9_&o%6W&+N_KmdgkiORK8zc~tu~8|4_y!Cn)--4v^d<&q`eiRB%D+I7)u`G)b= z#%gU*8fJ@d+ZihYuG{tXn9~v6qt@%+nbe|RXr1yO6`~MUru%O?YwV~4=B#V0b}Hny z*>EI_X$=b5TxoVL^Ii;IS&DOoT>oa#GHu`_N%yl126b`l!hK_wm>g3#M(7#^%fsM- zQ^lNq3`LVt?^%PXlF5x6R4nYW&9>S?kROrUBpTB@EA9Qpo{A^0^4Fa&%&PtO-VMF) zL9NUKHr&L3!C)U@nY9#?*~Q=1I;OR$78J8oZ>A(pD$a@)8Gbg~j&efM&ZkGeE&VKr zkmjcSaqc;5P?M-6#@vQuJz6omn?*64b@yA6$pwxJI@U$^(iiGAT$is>bkUR~{Uer! ziZr)l7i)Yhk%s6->!=(nbp1pFfQw&zd@u?! zjIF>{j*xOx!mU4wWC$J(@_njWbQ08~OW2(-_9gStRg%Q3H<*m0R?bX*p7HH9>8{-q z^!#O7z*YFRCrT%nfM!LoYtjb7ckHwX7ZbQmzipr}9<0^JzA8W)mZAGE&zQDNTLWF@ zyvp}RYXbJHyZ&wo%KZ&$1au$IYRx{UZML$3CjPJ1ge3GZ2+EX?ckIkZ`!9L@sM&fn zDRXOE?Kf?#VHI?q9ItQRS;YA`Ai=5Gd-lH?RbFXTSl;G)+`v}Kqa=Q3sb}uV8tv(V zvgaGg3X>t0G(#*QfpkLl_NHC-9v3~`K_64Qh7BXw^)#qv{>IOyxHH=k*|X{q=_$v( zB225VFzy7}yQlaTE=tSigJj_$8RJ$=B}@k@Upb1)j{$b1bLz@bi%Nx@IdPx3b`@Ry zEgb~y5otYy5O$C)Vc$wRqlDOSC#59Y5}=UK%;onZh2(oGCiB|z@*r6ND%iNvJdt~g zCM02MzaooG`DBz#Vy;78`?4yLoZf8#Q~vkj=)lj=`)0CyH*R z!*ZcS8^KzP4U3#lq>a_&$mFW=ke@Zwl};Q{=!1HNJ3?kCRtLz;dli@?FQ7XP`Udze zo9sRreT}}S=K>)d+#TjMvavP5`~KWL8!5M#AXeLo0Q8Vvd^w#EH&nDCxS!|U|KdQr z2yB9Nsd2$KB|WSQuF&O+dx$5in?j&f+7_H(836XkA8*F*xJwF~Lr2BE9fa_K^{Vt% z9@PkicArL=v=j@6nChy48CV1RvHotd?sdU|(I#@c^<~iM*QC^4lj8*wN)m7SSGuII z1;!1SIYxS3K+%Ep37#lJAUbWBGSie;p!WEwucdA19}2L8RmaY%=hwCa9;c$yv+n)) zFF{}ak9eIR3H^QR@zV5*h$hJd%D?+KdD3rEm1X0{N3F46-#?6fE-$cJzU}hOx~Rm@ zU|C^66!U0a#EirUAVW#B_ zM>-qj>UhKn#~5$9@Px;{9{3gv2K&T4AE+{X`m{csVAIqYC|40}fb1|=CIL-;3M&v8 zqWLhUhU}c@l&6@gpWHcS(M21~Px6eQk1{XV(nCK=131^ML-HQAMS~f$Q_3%(GSFro zPtmrtzI;!tO%LFUqs%!y(M}8)#HE|te zh?7eoi1F)Arg=uLfh`$hb;PcNTV%8AhM@@IV;V0jFIcsNZdo#_GOLmf=njw%tm<~(W z;7(chwO_uIw+Ue_HACqddOg!HTP-GAY7K?8(m8q z?^C7Gd`Ma(taB4{@-9~F&y_;bwzqQ;N~_R88}^F?A4A*aibbb>sx%s7pg!SdIcYgx zx3W>E9F&dzoNBG!%Pjz`RpXF8xv4_lxDy~SWH+0+b8PC;_HQPA*}vM){J@Y1mv@Dr z`OVD>>!&D2|Av!rTVMR30bib9Uy7{OTsdWc1f@L+(6ZN6r0Lh?`%PV6UhVx)yOme6 z=>#3Rbz?j)ki4zLYt3DzAB;!wLP{MV2vP5` zJzpOH@#^H>{X3wInW7o6`z~`TMO}_GH0ms+P~9?&?ZD%i84IB}4lz6MNA9~g+6QcL z(kfrK4FSXb(%u%e=jP0K9HCP|V@j0ux0|mF7);wn15$onu*SfX!1!cH?)OSM3=HHF zvF5aMFKhxhKDRr|#keF8nYvN{dDYom0XsshH3$S8$Xm*XW7s_yCge+QWGOO>z_ zn659-%~kPiuI?~+L-9-AWT3Sx9;;$(-l_vGj)oRr_Bt&@9tUIv*aE*KE7RdWAwr5- zkkt^H--7YIpweI*%#U5y{_++KnNCyv!YFH6Y??I8tmsfZ1Wh_uSUz>Zjh*QBU|YBl zX7R`^xMgiJxEF=7?f~*Q^fKXd6Hc=L#gF948M=?qpRG+$hw@$9_@M^K%deFxRSXO) z^I|Ker-{~ct?nWnm#BG^w!Ccr7`88d56`zkSYklh&xWv#@Yc1EC68tpTC}0G`#1O0 zJ}t7=KQR$4QcEV>W%hfg%hljbf84H@ck+(QiDuj1EOq zhB-pstW#MPkPeAHAoE%k#V+q)M>z39 zgsi|O3{WvrY;hs@K-mGiG~f6e57(=fIL{8+g04#IxFI((USzs{EvgdQM)Y)j`WUuITW7?maMess6!wrrgF_x9>v61dKk z_0TY6+{4#LR zd-WZe{Tt&64Q=gKmFd)IM#6)E2s>GXbSNPlUioFf*l_oIY~$aJ>vao2<;>q0XkYNf-kye88mHjMVM4AjNE1Nb|c-@j!j1>G4 zi>)llZNQcE6uOA-FuWRLTr@Qu&AbC1Ri90AZ;{%~6UiLaO1u={GvW|Ap61=iQe)o! z7TszXtn%uEr_gu!WKV7ZG&)xDj_aVb&n1@=}d+Mf8_TLRp1gE zNn3XZDcBAIdl0y#vJ1FspNGFtE1FM#G~8=rPx9vD+5r4R>oeI}hjc9%(=v>M9iASp zsqDr>dU8McU-FY3t91u{!2VkHR*5~;y!g0Q1a-^`NQIf~_FS!E7&m~+{v49se^I^!p)knwuC3MYsH>-hk-xGF{ld*;|dr00ybazL!G`H$F0B_|? z20C6~yU(G@ZX~w`Qpe6*neEh*6-so{HZO86Qfp_s7pDGEuU&CHy9DU;bL*>E&yD=N z(S3>{i?53ZoArC2YC4XJXs%VtwbVS68ZqI|hbPwc^-q;0#60_(K4 zu@XN?OUPcUvFg{}vjCKG^wH48@2X&7$)hjTRW`-o8l6UUpbLPPPW2m_&{(7c7rA8R z1G=K;50U&~q{SAIZcIgy`4i48cX>{yrX>`#tRa?=FqE3K5wL3^MV@*7-Me_=$D4m9 z@J#WZevDRk(Cu9IFtM?!1=CVDI~x?ip5q-;5xs#DuH!cUsgTPcGekU^;WiO28pIQ& z?PvL&NUQdC=8Y!DGO2DJ@4QL`gGBFsH(omdGgzu)C4eZO@H}M@w#OYZJn_UC&&ORYq9J<4 zk8B@3Y|$U_e78jpVu3UjVlx@#v7Vt_32jMCSaer#G`9vcV>@~~>yg`qPLplzNPE^KGh+XiRJ_8Y`sANQ*_-uAAw z&4?(q!CMjLQONUbeIlGmnw}C-hB7T=rRLNejcZ&Mlw&I$##ONM1u_Q3Hz3`V^_RJ* zr<*0Fi|Gv0k$&rLZTV-LhU?OHpyWwOd$#k@QImCd7h{j1cVuS2)m)4Fr~tJ0jpDK! zH#%jNR)K;J)>ck#7It16y(0F`Y)al|4J~I`r##q+y);b!*p^#`# zcFFL`xG|aa7qBOJo2^3AnJ~5_HtKLWw!}BzU_uY0>=rwI=my&dUlYrnWDw+x*DC3Y zO`0-EevnoPBnH8x__maxaH&@-CdY@GR>h{g7Fj)p+66yHZTea^N$c*Sy4EBb%R*os znr}Qki$JQ!!EzLgu_I{#t-D{zD43q{4|36bHl8IV)ZX;Hgj@GmvkVpxG;Z>HlE{*{ zxqOV746GwiwK^BqOPk%2?9OMnpwAa!$ygW>_BSTf6^Lvd@%LoqSJByRY_7_vs(Ik# za{S0-Ol1z=?K(h)^xy~*{CbfGal(tqb?Xz$Cj0a`;xXw-h0G0yT4weUg)Cn;f_iNt zxoIyY(}Iq4?FH(6=vo@m5%#3l3TLu=HlB}6qx*8AXeNyWMLpNWluI8=@Qar<$N^VsVU;Nz=fy&@i6 z*S@+$FsypxpDO;BcxJ!}ji!hSI%s_~n#qu}7*dLmJ%CN}n(HhzkLLwgw_*BfjqV6r zEzO|HhI&$9Q{f>a<))31J1}85P9``S3k(~1qdMGEWm3w&$!V8>;wJJbWfp#LOsr8< zZhr@EZ1u;%gtC5xS1K#SC?UB-p6T*}j^K?k@@F8N6EoGI&8B-9r8_z$e?Ax;no(Sn zivSe3&ujS^9u3C7QoF2RO*fwJy+^m>Wf;>89@Z-%-{iPYOb*~qlD7ErEaHu+jP{lB zB*}dea|(k^`-j7Y%g!L&kvLk_DqtCrQ_AN{6?J;v63X5Y0ruMNwJ=mPgs{ywdrS!WBs7neuDaW#HJr7{Ho&@d)U4~io~E*bt1ZiZ%#JE zZXA?)P-N&~7x1B_^O(~QUzew^C(DQyG4xW^a(NtG6ytK;jlwtt#@@MJttX}o&NFyy zCC^Fr=Hw4yTql*+mHm$0N|$+Z z*rcML&hayYwAB=O@-S~w=(YBzY=%cON#6KQvu?*;4D?`C7uQcQ_>Xjr(!&+3nx5M9 zyz{)_lC{(cy4FbkYuKT28$9rbYOCi~;!Bl9!=GXgi?`$58C*$oK55LxqFw*3;fJB| zZk?t1zNcz4uWwQ}x?XaU7Bv=s34FQtHAl1@+Op)cwRv|-i|)S6uRl*$Al)*c$Mjml zjTxK0bqAP$tipBKg1q2kfmWPO+Ux^E)~cpGPC+`8W^BbeX2! zx=|Q|a4{yEOv8$Ux!cnS=^TCfZ1pz09aMCeb6YJV{JGV%j=!119uP8mfVE=ey46p_ z%szUiH;xN%uHa;!NuQ)Iw@{YWCQqi=u5kdA48Aa1{c@YtA_E7378o7Z3rwFP39qi30gHAK2$AZ2AdY9#U?t{ zwfXJ2d4dTZ!w*hYlVC+KT}CwPS(@M)&ZgI3kLm@eH(}I>0za#s>bsTk2oEV_BzhI! z>&&m4(dZ|KhZOvUk+5T>k1eZl3I%yIU6z`PaI{PeO+$%b!uV^WYcM_pe&z6hUoyYw z;&azUSX%}`|M!Dq|0y$>{hL?Gf}{PL6v{>sQF!{YMHf$`Yjda|DLXx}?}TgTH??$g z$Sz`z>f=}LHW&)lH@}SYj!XUhK3Q|O>kwLFVO&4{He5J{=bL1-o6{(-q(x`ehx0yV ziJ1&ojkP|?ZrO2+X4-9xlCA^Ur6rS(=7e~;X&-}UWyl*wLhCIViCNMQPQ%5BRJpM!n;I6c5~dw80SVd_j=({a3tg(*n#TeI0s4ePw-v9P-|;1zBvbd8Aq1cs8cHt$@7o* zCeA}oo%R^J&y9>UgnIwi3$XPGRAeoCk`ogAa%E)i`TxC`1>!L`3GK#7g}hvTJGW8| zISOGu{at^8;c~(1J$>}g9jc7=vDA7DXf&;`QK@aGaBgco zzRT=LUYkM`z{~bSdibe5H*hjm9gppJ(m8@F#;UwY?d_~D>*VoMaS_s3wAdzV*yGq3 zS|tJCuL5XIxg!n1_vp>>sR2T;a)r;aKVXw3b?Qon@R>b~>UlNMIDy ztmk+^74Ho5EnuG39fGxR#G2DJhFQqzCaMWG+|~GcpKuuZ<%s``wsOdM=8e(oIt>Hy zQ|iGlW5jrnjb!i+HvcDRJkopHUKWbYTS2U#E`e-Nq@MtxfSltE9jssj-ya@Cu11ypKQrvaaH_;uHV^$v=#+Ap6&i ze-K1~?-a;yT2^~f`z?K3D8q^Tx&vYGCAwNLLW z9EhgJ3qn(L&8M8ll(Sec!1%9-+>kS#@;FaUKt;p6*v%TGZG>bi&THzbEw4+OWT#!y z%Bt3_cUsjpi-I*z#gb*Y2oBpiIIVU`Ps~)>EA6Tksa@~f3fB#hlfxR)rW_=s_qkAh zMTm6~8+Ns|G(WQF=&De&pjIioel<0cr1KLxX#%NnhV~=Y-A3^E2MhJ(g9?KSm37wS zO!GX0b@A*m!M`f%pK+*XBZq!|P?Nqz zd5#9ing$8(UGLo1{xJK;p>?;$Rbo55+Y%Trd0R@J{%Dv*Y*ea+lo;b`1Rwn2L?c((pmSNqriO;B}eaf~~=Y4xs{G#(Xfi>aR6 zHtLhg4O=InV9bk{mX56V1VmND<$MhlSu1S%CvQ{@iyPagT3H);HKZietv4msC1S;u zc@zQMIkQ@<2Pr|^nu_C~_R~;zWghhB*AZ+YqzbX)FMkrw#l@&%=^x#v(w{RSD;rkt z6IBM=Q7QaB?a;*4`|21G&_yE3UU3tcnT_{7sVCx*9l2GE7V@ppb5N2}1W+TWu#QtV z5A@73e#rG~E?eOVMF<^ND3xN4CEDh0bGJ=P(>HCgDMm7^eE?}OXgqz{%*$fX@mD_O zH#(^rj7K)9(=iXNV}m5@zKwfUOEtcug-McBQm-5=n5VAcm_h&`dT`(NI#Qr zy>0R#YaKjL@Vin<84kJVoM85H2=DQmdlZsK5||g#cOKWiZkFf-T@qC^tRII<76|j> zc`_GyALkXx1XUguNjJG_&){~ywcCb zIM@qvBjXKmbG6Xg98F(zAFc$ijcqsCe)u)yW%q%-*8WsRfwKAZQ(*=1d{xH1e5mik zW!0fY{h2CL^D^B%>zmUPYo1!!qX@N2p5@Yzke#rVEE=m0SHs3+Mjuznzy)0K-yUKc z|C|@~8dDNok1{-eR}YL^N24S{qY6#|HEm$%L_8kLjKMrk5$A zXmR{$gYZHZD{~#D`o^lwu{U$+9JL5HuuwO}nfU&c%13rzlDBW4MqRR&fb0QEV+J;? zER-~3+$S%_nBq0toHcY+EMSS*49nc|fUN#_Kce74ff^d|_%T~{HEOOgy&dmZyS@E4 z%4cLNlBL2wGcq9`WKR6{cIy@bM@K5Z?Q{(r6FH7|%E7Kbb|?kchFsKIBv+#<GCl z^J`yQTd|&>WGi)xyB(f0H%pPFp4jb55bRY1R{zL0xgKo?ZL>)~m4k|Lq5F51Yl&f{ z&Vp+NCXQQTcbr?HatsM9Y-QxpW_GaC&DPhD+;jiLP7KjHzl6h>uK}n0+dhXQ+nf2GPirYe z%|Zghk~bJP%L5-odkx44C!>{WKFdkwDjC6k;Ts8!{xS7b-vEC7QH1N`2{ox7lknh1 zN4#i22l0k)2;bnAMF`e$lkyNR0gugVF9NrSjLt16Ds=6%SH#eAuqbmhLFPgt-`06e ziv^YJd!~aaS@SuSPh1C2gS~czD7Dpg+AGamnZ`ISHrJ-?3IaHFa$gpD|BW9TlD#>K zj>u#9b1PcC^nWJ(8B(0PLs+x>MTzIHENhdQnQ!`bA|)0ceUTqSx^=#E1@FGD{^ywX zKW;F4Jm$j1?)TdJ8^#*UJ}BF*kpStY#h`7LQ7PHeCV!r{EVHMlq@LfnEx4)xtK6Av z)Cy&iuy{YUzW{!=2*5m^Sp_azJiuOkm1T)x^V0;LGR)}yiJ8oA>C$w|LHsqe6HsVV zeEt7p>%8OHaND+Dd(j9L}c-n0}k zY9`dE5S!4~{XF;cd*1u~y#HtXbA8V1I?v;K9>;j;H5;E7{qki#=@~}25c=TXg6rjz z{M|Lo>BTp=SrMVnJQ8vbbgDa5r)~JiXtL8_Mot10+ITHlF-3mxa+HKl0x;_0?X)3F z#m{MrzC{p861T7sgj)b8CoVe!d2lud(H-YHjlZWOb6=>+6Wc-YEiGMWKviW&Yx*cGpXAaox9pZ`$Bm}WY0dt6h-=zFIbAiy9+->|S_Nz9;@@UYoPR+hSm9Q3_EZJPY~l2Yz}7{gvlal zF%JeNdV)uU3u?<>XX4BRr$2^?K9KdUGMDyJeN(n{I8rsg>#NZ``}Gr?PM-3jWopg7 zvszunVhMYApB&O`)u&d$XhzR8F>CWh#z)2*Hg>n*2{Wmv6Ij zsnj8DeejbTb0WHFYJu6FdR??!$Q z^6emA?ie;l$qDl`z)sdDT4WPq<}6;j1ORMRkK6y&p;v_ZJh6W@Juy2GElWqx`5u&A zS!OCAz+MqDM+SoQRUZCxq#?!B2UqAhKK!_a@TK)g%o{ebf!nTIuosV0L_es&?ZyNR z3aB-Qm{Ut)1;`}{WfcU8MV5&~T(_ySPEF~&9$LFZsEWNk-lAt->*g#|{u&9#AH2eS z2H$=W#5>|>6u?*RNgEj@fRujk9y6UUWeu$omQ3sjA%x3-Y8)XPdiunenPr)&2FQ9a z5Ej;QE+JGV>&*`e7RnO6rs=m)R&1eDn-heB_aC=aYILZ9#{QNbE7b^a{hf$3E(-DO-I?6QRRRXDy{wETJ>8WGG- zH=ytA1?+b9^5nk~+b*uHS2)ZmeQTB4R0YEd|ewB4**st_>)M47c)d8}n0* zs}*H2cJ}7BN=iM8sp%l2Bw~o7f-3KkU)BSI=(eu)E!AngIU#}nSwg0Uy!}1T6mak$#twhN7?&- zRhUA}&xW4RF5*Pm#;`$EzbiPK2muhsz6f6<_Eef>)&YmS2u>mSwK}M2r}AYkn&L;{ z-Ww6H&H^ggQ`lIdukH6ooP+9Kjp4yx`;qN`j-m1;Q_#=OyWffXV{Mjg;aftkCpWqV z^#fHJ(2jiqfCxlAv;6<9Z()Y7IxXy_K!>VJZxZ1k?D$UO<<2pVo%hmOs1{+PTrV?4 z^mXhxD|03|=0qz1_-mh*tO2;qghV{;wxM<2tPj0d$r4_sEPfK9^*Fro;uGKX z__t}fM!g{iQQg{HK$$>JZj^wu`zWoAY3LyME|iLCMv+B{ zFmrNeG{G62Cul4~fZYl|ZaHVD^pbE-W~EhwOuyitRX1SNuep%LAnu?dz67uE|JK}D zQN5zaw%AJf{oX!5ywA_5&NzP4?Jz`HB$=wamrQ2PHdlXDJ&Qnwy|YaOM1f6SjVvII zOT1f+o;`*n@V#KJLRg((Y^!OO{bOZ1d4Hr(R|VbK_{>!rCW1QjoQP4|tr6S3$%WZt z&E1#ttnHfTCVP_OJEII77ci z>%@kRShl*YQYyaX7LH9o1hwVIDk)XobQxFWsaJpg!jCbW*ca+J=-_F4Q_3MatkvAb zHfI|6$XpK@8wkx-0U#VYnJ;&mZD0IL-DWJ&rh@y1nON! zRm7Bc^|`rvGr(VH4=*{3aG_@n3^XIQ$P5eW7sumDUG|GH%_JN9Z33R*xs1_9o(}GJ zvR!eaCOTPCw!03vkA1x7x(+@02YBzi2ph#qZj_BX-8%1O6|^5T@&9?DHm{DzFWMf> zdAAdK<=gAgtDf#z*%wsP$cye&XaBug1>Fk4wY@sDChU$mt89Ex&zkeSJ=5fwKCO9S zFJYN|tD5J%rcj*~+M*m0EXlRzL}G`W&&s_b{#~CSVQm1pG(oneO2JS!@9E|G1*FE(~<|;S~R#~Sz;nhZtk`Z?q;OfIj+enO_>K*qqqypWl zEa9?2nNe!wBEi%cp+J6OvIj5aGn{&HsB9+3T8g$Y&CG0d#s}`xAdE z7xmg!8ATyPkfDMKsygyqL6x*vQ)@lDu|1W+@B zgN)I|ekEFLJmfoN#JxoYvLh=c2B{ZAJYDH((;hE0U0@1SLA%$%gNwc21*UXDsppZ;~UAm_F#bYI)h?qc}%4oq8i53rX;w zJg8+|9uDWzGS{_0-7Fhn$S0?E)Z)vyi}YY-z9}1>%SSTt7C)yhi6Yts_5GV?$zc+e>#LezD|j7Y|9e) zkm)Gj7Gj7ne6{1jc$qt$!WTB8M!>vq5J^wYc6iZ+?%)lERP;M`emF4MUkqe=T?px6CO0bQtR1v zAMI~swJI)KfI#KO$Ek8l(mHF6=1>91X4UqWnr>siJ9A5)vB7~O6^A`kCp7J~{Lyss z#zgCnb$c-%u4Jvas0Ij-Z_HC{*M(c4UTmm>FDOe)ju+2O}lq{}E{g}Da@DvniQ z7N2(=?{FnHggNX|u%n@MG{WScw)rtbn_>N7;Ayk;wJhCCDV8%_cB9$LuVPaTa#2|M zV-NrmWX%YvjdI!Eebv_xb0bIEr8i6JV*M3PTT1zB>Lst^MXvuodNG?LZ2xfUUclut zUJ?$e`+K(Wu73O<3TA{Zs!fcpWur8IP@U?QsZsF6P25M3BKx&UE?YlQ-~1p`0RNOw z$Y7Tip*Sy42OZ$ULAEZ3>*imv+LL+$$CwTa*|Vw*=yW3vt9u9g-Ayl;QHd%&dCvRi z7B43o&a`dsxM#WdXLc__=}8NQ6?-mL?n&oc-a}?>SX6102uWsfH-+1|3SA&MwRek1 z)ANqC=r*gF<~YTZZukw+Cq?PL!X)2uHj^$d(6Q$akS|gPV`NmaG!pQ$CKDG(^53eW1UE6?7rkdQe3ZULxvstcMsPj;C9lNvMw zV?O@)iFyYrmU?1QsQ1%FHLSNo^eBY-E2bQ3Y5oM5wJd(NDh_2{{6Soz$A+)*?Q`z0KY+u(Q`xWmB8CzR4WW;U!aDn(UvKQlL60^*?G6~J z*nQEQrTpNM-@>sdI8w9n;GPU2IKrbstyV`tmro?e^e`p0BVKJ;7b^#F;fu`Z1ISda z9~O-zp59}mGK?SVNRi4_Qs_!;f1$ZJ*=XJuvQl?)-Q}+8d_n`(v^;j;#R$B7Vm9D8 zwHlv8tlZyM$$Px^JT^5w3tVtYXH7;Hd)d6@TtUbcZH=8ux%-Cg^O#l>6SfsQ650806h6Ms`$6Gcb$l4EYZMDuF>KLdM>L^l$>QM>5AOk?@ek7= zy2gUHp0e#dIw%%nl`;NHt+B#FE;ydSMDYa9qXh*eXLH^Hd1^7137feGu6b zE+{ca+*UzoKjULz`)tIs;=fus&6`4>-60J(&bD|srP)kp)(>$KzV@=ybaq>>yooirmZPW6C4U>FbJ94q;+*JOT z%gGPqP>92d;gUP5U1J9Wc*0lJVo7JwZiRd%G|0iUbUY`SKI6TFPH1W=xO{KcM07>T;P*CKxo`xeuG z$7U_$uNZKXSJ&-0Ee(|0uMHDtI0x}Gm)LS1@M|e9)HI~f^-xY>|J6;C)#Ia|N$K>u z;hxr8Ru}3$a`~y3K}>sBU@YwW*_WB68j>~Z{$DM~C$+VO^o)MABH$tiT((*jqDJ(^ zon1{ZXS%mNy3g$uymD>+%|85XtQ)!itgQEJ4QOK2W1_W&#w9t`w*K9};8fe)NmR~i ziXyZtBG0=U5H5?KBCL29qEjgTo+KS--L%0sGFN)o{cn?m2ky2mIy_kj=n0*cu9m{4 z$S(xBC~jAG4ZbKVvN}?^9C7VW>oDuiNX>oyTiZagkbv0e9xEqf2F6F>ZmtC3SzY2O z>>_l&OSxql53={fIV)2MV5OZkPSa0}vz<nq*X`-6%)zWXF0~!KY zZJ&@QF{6#|-g1VJh=v}DG_t{2MB{0C7RmKiG_D|L_(P1Sdxk?bd<4qO*n*hBazfHs z`@52InAYEW)DHpUgj8LSO-~p!c{HD6>#Qa#XhR=r55G`8yOB60e$4ZDqpC1u_g1b*{xDdh?e->^;H9OkUKmtJdB2{h zeZrveJ16Y4wT&8fRVmr~2{yr>Y-2_s8OF8nYoo;A5l#$0Z`uS?rW1)} z2JU^I_?i_Oo`t2dmGINoV;j>7U%CPbV){pJ3+{!=hxflzb?RI=q0lA_ZdEv*jZj!o zwVvEF7XwM)g|%ve&2IQ9yZXnP`-o0qu!g^AvyBS%gw)xSTZxrx3VxexiUg`lGMBc^ zbQ?DuxXl{%)z)M%wnN$hB5S;6$N;%iAqW)f$%*GOD?l#ox?rtC1FJk zXrC72Ny3rz``gx!P8FTl0Rx!f_?j(;LW`cXro`}cGIu#Jtn1J9)v(@Y3`uYQ1doV5 z0Tf>A6Tq+ig=M!^XMQ-7FB+69T2cQq>EULF3jnPjbDukI%0hjb(S1B!jO#9{%4>0- z40aHS8LU&~bNihPxq#{m3}JJ+wGDPchTm%W8{_f%ANq93H~JfD{g<^@fuG5Y%Sz^u z7i-{UM9Np1F2Dr(Nx}cH03r-O;%5H2?5`^B(hp$uGu$}zdrpcdc&hll-pq^nBb@ly z-*uhyz2u{|)}vV@&tAcLmIIM6jmzh7u>8a4rLiMmh@^i0IL(&YwFF3@{{dIlP7)wz zM)W^Me((TDX2zhg0DQ^(l3JHk zw!2u*-RcjbntUVfosL-TbsB`=!%Kls=HKNKe`K4>YFSx0np2aC7VP*lN_e5OoP=^! zlu}31uomV0KRZ_Nq(9{apLGxNgQM7aoL)WF(gO1BpWVmI7(4&~XVZ(dZeMazIdjDi zg%=^a&zAU{oJu^oZy9x*HBK`Rg6s${cS`s_D!&(yrEw{+Zn)|D`^#^OLs6ihhMcDv z|3q%CpOyq&^38O5eZS|c9WJ(OOf}ZVag6cDv%f7T?JjaO6iZNJEJS`w8VDjI4O#U% zS6XQI2MUD9OB#zy9=VxYJy|ak4qC5DZ2oikjbejNjZ}q{x9xddT+yxodFx&mO)2b^ zF>re%>h$tKE65{UvNWnZFmV&dvM>iOD5zKHe<~?zesmn}(I1c2HG|n4pSF8~5v;3!Bc3s&) zxvGIKVM5VKq$9((re0^cPdfXhsdc&wpmOXVa6MXj1k%0dw4qoP zX!HV=Gj%+V9^X+ET&-8EG`nj6?uZL3zh84jh$;_&H<$BHr9nsT6%3lvcSC22S*kHb zmJb2^sA&8=gFrBAdirz{qn8QI$FHi6V#YG?Udg~CE|`Z`MF^qkTfgRSf1HXmB@hrX zOhR`cCH2}Hd-%@TclEW!xCApFSAH%arMma#r>|~wr?zt9(79F-?VY#m@vS;-&IQ|* zW|d#b1h;Wphr3zxzUr7VwbR@J3Y}|#mvB zBz+v-Q0l|cZN9cRTYE{I8=G|%_zR+1D|)N-lwgedoZ*#nLY+@Dj9m=wXAp*3d)D%L zr^A;k99-_Yon|Q;{bg8r(RClCxSH`ayyX+y%UqZwGNvpD{LaJwmwFL8F5?d29vH)Q zH_&H9Ec7gfKOCV_E&I26t>{WZliP{stuwT*@?I4&h)IRrP=y)=SMNQO3*5SS1ejb! zF9xO@+t@HjhSUzesgHJP-fQD53&c1Clkxb+DPZWR)055IeWi~mvjxf!C=?htm{ZhA|N&0Oxn zuPhh3K=aC9UjL)6{)?@G)Kc5}Py6GP8tu%HvA62G>S&|eQEFE}i@V~1Vqex=#C{@S zM#{mI2K4NvFGV`p6LP|h8FS;iff|Cb?~ybQ{U^)PdJ(~~^HU$94MqkvL`l`fzS@(Y zrtWHgXAR?pW%rQRg)HPk{arih-j%`s(|EQ*dhy~;-i z2811YR|NaV?{#*ERy8T9GTP+pQA1X^{&a{Tvuh}vM$3lFMiMsN1y;kV{i1KiSKQ%N z0{CG|T9SFWDT6BoNqNl6sj5xp0H{5wSh;!TK58!wiT$ebNJY2MLeBL*kl3ai&xbB~ zWspqTk}B%y1Mm0z&X|X>H8zZov;@%0o+^AZ%ohvbn4YiLmR@jX$7g?TG2(pb^(P#_ zIXsL3MD7)91X-4ctaVXC_ERE{6;-K1oyD+9bj(Xt#d!*XpbRtzK+AsZ8ztG5oiSyR ztP!_3EV=!limsvW70GqaI1O*o{=K1B{LK-{frsHf7D``&eNyx<8JmqdlB_OO%(W5;Q7F57Du zOj6gHk@>_#?_Z*`xuVyi=iqeP=_2m;x-t^=7qx_{#asA!i%OkNaZkNi%^VAHP;}f> zjWBEZ)AdgehACifS{dcd*7s^hCvo2|RgQqWPseC;pEn=Pf`|4CkI{tQiADR-p#9IT zztcM89%hU&x%7{84+5miPo7qY2p=NYx0W-^m?PHNm#(xZQI!>yz4F}A^?1|G>{YwN z4dpEUUKYPWR@1L$FCG;>dZu+C5YC?I>dNcso-KZrdX;N;wZ|&J1RQ4(SSPs#GSQ|2jf3oz3bSP6@O{8a>^%NRsLS~u}u>R(Exv49aZ2_ z_5@Q;yx;QUz27UB!*<=DS0H;Ii)Mu61tfuvtYvi{?dvg8Vw-y2)TZ5&T2G*Ip8XL! zz8?z{^e%uRBF-6P1FV~7gXw6&)~JzW1do9wljZ$J4^%9G_c_13RrY<^ zz0qw>7!PaQZZ@))?BoSkwMjrl{FWFC;#*JX9frHSwwGcWr6hZt#1)W_hp9%g=eJ*< zE|Z-douJ#2+5_uF@vc`BI9%Y`J(S5L;0HVJ>zKjw(*(t3FjU76S6BU|&_{08FNpGC zbypdd|Ndz`tuxWa2D+}M{CIGeYVBT=A}=J_9ZM`v4@on)ye59!`}#VDb_=|g)Q*}Y#r0R0otU-|+^GV6yU zs`6PEmBg%Uc=JT8&x6PV%$+_U!5pztKVe1BAzB!1w(^L!5&N){!U3e`#jskX(iSL~ zpK1?ARjr+po7#q}?}o#MWj%g=BD62;PC?g{{#LCtG%Hg%>_*o<*FF|<&}-h-{%h9D z_@^H^;+p2k^Y7DIZNgIt`xgLMqV>f2>vEnNaS7 zdsij0J3^(SrK#*GQwBXhzFXue{SQPgQX}uZx)^IkH9k)9hk4=DBqxJaZ;oh6KbXjCFOA1xIjS3Khy}sye)K#0R!JsYhygFlcWY!l)33>DN(N%{hIcp z1vIDJ6-lV|jlyUZ*UiNEXqR?g(e^Yi0=VTnIhH?kW%8aHeq zcQ|$L<8Ar?aw~CtrWIm$X?&k>0z7Ol@C+V6F(4m zu?1_pK}LKsJrX_p>euuLgYs!nvXtkn!o*e3k0l=TI4K)t$@)5WM3cPbm^pIhY-)*h*xSx(iXVe)m4$AI9Hskx za?_6$Vq`93byK*;*Q*p{-Q9EcnabcShfv`eQmQQ}6vv^HkZfi*-Wc@woN%Y6BFF*O zAHEWvokSQ^lrGQBH-uu?uE(d(6im-=i6d{v$E);;%lm!Py?oQ`!#+J6A7r{UZFo6|hcCS4oylz=Lyx_Dbt z;v0AlR>W#UVwG6IaOVaH&Jg^(=voplmK_dp3QSn_+JIUrNCa0StI^t+Zh5RSDI_?f z;8kmEs57*weCMO*s)fohTH(1SoAwvKzT$qe(=4>XVmFvYC#b7Dp?`-_ZM>8?5wvA{ zhq~=sD0MB7`$`VxWL99AXEqohag;EAZE>H;MfGqwjnN_Pc!~T(wgSg>S!Jsgc;~}$ zSCTHx)K#{i&j@TX=&7<$yPVhR>crt@@2Cjwju~WKY)aj!2v%+%rPxq5K939Rb9c&_ zmf=#PtE4$WvaLD3)eI-kCB{;VqqOR~XLKI-s%}l6gI!y8#B{gV7@8=G_Rsn|ZaI9> zcC-_ZWc4~s-Sj3(`iX=KOSyc2jV$&CjBJ0?-@z$;TMY}Il(?jxTI*IgWhvM+OR?#m zt(n4(sdiDGq7_HG+?(EXOP*3}-Ddprr`_yJdMdutTJg<^FAVd_dqre>%>|apIKYhi zwT%fjz({)y_d_=~yruAJ__z5Ze@%8QUs@=h6oD6wmPMLj~k@d67gji=aEh#YX1Z(nSB6m^37{B@sBc9bSuV6#Inw65kyg`^O}x8 zG85aVT~wK)&QTh={NR(p>lLWy~Fb$++5(%w#ePKkUGBP$*M)uOrdu*1B2Bw$?82>06dpAAGywTC3h`B7mLg zXpj)$p4hmFoS4P;o5)20Qzy+6GFCx>Fszkw{?X-Qz4#wG$d27@&kb#Q?!hK2q}jx zaN2D=?|Ky>cnM3qol-q8yNrbGy8F584A<2s&x1JHt(my*O|d{mZnh}HGK_cSn^<7A zG?WORpnWUPU~l`rjJC~lQnzYB<@%_#^H`Gl@lLXj^DPqF-zm?D!zg5G_P0Ao#_|JP zD17%vV2t9LREvMU+IAE#9reyrg~0%B;ScaE=x!D#H_TIk6MhJ0?F-CKR$b&>i+8wk zsKac3E?ddz2?Q(x?QIpdjtKK%fntlqosT$#I}dv1?_b<4i?(*y@e%J&UqJ210rX1V zn%(Z3r^Ng-?HZwv60d41<_Vnl#eHckG_&77o3?P`eA3#aV=D;A$ozHsp!>6PH9E!a zBVf;qwwUJz@Bwol%VR7*4V4IDm&P0Mr8dOdUR|=36&c(fWKtlt@H&Ut4lxivi4}9! zXwGS}A{qr!_eA62eBXQwYK^hnrkTqJf^Fzlbv7hry+}mj!n>Uh z%e&0djXy5M*@>1wRo+@ouU1DlnW2Yo>;;-X8*e0^k#(SUdnV+B>_S(mgT%dQX-+yOT`b<4cNOshxrWD_kS5LM5 z2}?X5d8L|UWpHQebs+XVQ>Rk&t)K+FO7HE!^t$*DBex(UG~DiYc9wyJH|G@)dL{Tr z0%?hgDB)|4#2J3>(p(BK;>5#W|9m4Y$~IGp0NX8skt=}$mk%?8;HEoSgO=7 z$A4A zL4V*mKb)?R1+ZI~+P^UV=SDvhBQ@llvT!e*nf&Ke{#WI!?qI?=!EX(U+n<(0|M;s( zUNMNpA?)NVY@%^Iny-au-iW+Sw~3Xi(tRKd)JTW-hiM36?~qV-P}R~gC(&s=0Di7d z)zTWD0^`1VxR9&%yJ+3dxY};p7T~;MC#h8?!GbYvFY_UEl*DDY*?fQOiR`1zbt zBp%KKg_l!IwQsBW8c-jk{Xh)Q3^|8y`A2qX#=1zR*B>Tye&jFLELo$0Ity?sAQ^ z{#=hw`fYTDYMe^spQ@?n$t|mE)fH7Dusog zXD+>5j*K?7wYS=<@*TjdfuMo3iOb5=kYD|s-*O!%KEwQ8gnN?mPt@xKy~s>_JJ~m- zWhS-7Vst3zFY0=R-G!s4e)wn4{sa^_Rz=604sHchFkbEif}~Fx<=%FxH_a|R7~X$j zQ3u@H4`JA6G+C7`orjpJM|)P8E%yOd1yx_QsC~Bd{7O+%++LKZMth zmREI6mN<7DZz!l{(^wS{bzDu+ey3%-7JNE-EG0Fjdhm_@=+;6GS79SW-KyEmHBT8W zt}~sJ1aA1h8lznVwQb18M_grqE|Kjmk#c&URtU2cLfDpSH|f z8H@ou`|XNkL<#1>-NrkK!TWxdO+tN^-|bY??)7#9PW5^Qq9iS3Q&Na3uY7`kli3s0 z!}1)QZ;?tV@+-y9+D`6B=j^UA&hGbHQR9>J{bk_Z*lida+%d(mQw008D*Ile94{yH zN!<*%xA;!ms;p=Q^6KX{n#b)KI)phtWekh>ve{=getV+=H8}mWWudYlGPYy_yT$?J zSmCw0b*IN}E$#0ZzC2;5rnOH2%MTlA$KAVHpn`xTJr@-kvnYl%DgCcZpzTS72b;GwLkni#IHg{H}jm% z>eX1m^NB#)*0zRo1{ns-lP@FYL?2`(6*_bD!W`{^XW%?A%8e z`0ZS+H@O$?Ar|}c2f5$eg_z`hc60xC%{+LxRG(8|Rh55H2e%K|eBF{A5i!~axKW~p zbjC#E5(jaC_9hSMxvUs+8;oz>ub#X0{N#Vd*nf{cY{n-g8aVSKk~^qKMi0eq_nU#2KsYx1{- z62{8X$3z2kk-HQuz$z#ERR zZItdlhQ+1++Z04$E0X3J(a=4Ng&xWP_p`%dRt7M#Bu4^XArE+{JZyB7udb8 z_qvnmS*-scWjj1R8G)9eHn*~h8aof)4eL0rWWDtKGs{+;_gdr5k@eQ>%M9LTZ%EbF zmq%BwmLtSjYIRo&wx-)7OSyxO~GkO-B4Q}x^&n^18KRg4 z8|AH0CE<8)1h}`W2;}rFgW>t#_r61r-I`~fR=g|NyZ`KC1y^RE^vC(cclz@pi+rE~ zT6?JBX?7P4^nvu3lk}6}_8vIoMnvKt8uJ4?apypbAMqE;-C~4WHFfQFBO(Cyzy#JX zqa=Npn=J$VXPG~({TeybC*0tM2QPNWNKrZH`*-yQHosLCQ0*?AMLg|sG(-3 z?X;(=uuf|cT^t7>7wnlRN{%=T6Ro5$|GqHDBDWj-142y8Wd9Jsi3XU7(RR|me zVM2;9-JF(idES4?{?COFKmLaWxa!om_;$-WtIIr_Lr>@ZRL`n!t9UqJ2IY{0{2V+s zuPc(0UMyA?T3_P4RWa|puL!LF;9C!9$Qu?ywxGmE-eU3&5&rCPhxlpy`vha%`-1X6 z$U2z@?2VsPiQzixKPD{zb(^g7kOJ#W5yk*skv8T=qqr82t{5JzTA{qSli{uVOiQ}5 zl&!|yr?MCFeKr5@Fuu_`mYHG6mL_=Rc?>88E7t>XrCqVZu`?xk?gJ&^~_3X_b_ z$O(&i^ZQ?Hd#FECT~29dE~9o@ov5N`U__5lICI=sNVILVigB#}k#V*1^^8Ipuj`Vg62| z^XpkG-}Q83toE~Y+3wg_7XT^9~^COKf?)7ojlVF%*e<@_{>yXJ( zdD2YelKWJwZx9L;k540|ea@Lc1e^@f#dbYu6SWfMr&VNJY?yKh0!I6(_mos+>ZzA&DPi|gUn<&ClwvD-6 zgh0^zE6Y778Vow;o1FaZc$U+I;3zez(w61d;=~-4j zqK`+Fi92C{hw{qqe;tv+!K z`ELmkm_eK(%(ToL%#ZD-slln77R0Ue%%RX^8y>N$9J00gXaY1CQSk8Zu1jju)Ja{! z>xM&pM?r>BOo7UF8sldfoNU$TFqJvEGT|0#J&Fs@e)OghZDhhAYRom>p4Do`wIim& z`umelrIf# zo543tIw}EUK$hO#2XyZ_&T_#!ynd*W$mdW4AdYh08qFxnBXjYse=l8o^9D2#O9#7Q zG^Lqv6KBHkbk8POq$u5Cs3kt_#0Svb;{SxY#kQxPW4Vt34EISX`b`#5e3da_aJerS;P!tP1|t9SZky$ZQ!-ppH* zix)WQBDLSn{P5=b*6^D_4;b`U$glE*9WgB9ZCuX2K!lrqLN|$bi-oMU^QNbJXgS&U)R8*gMeen9vYWdrQ+8|HnV}WmP9CE@;eNE`Ra<;<*@ zO%cH)xS(OgLdAbOK0D1$EP;QK$q7*>F_#*oeP4i!vG9iAZ34GC0;0(!AVm|(l+&+b z7Qj@}&3X9jxR!6MT=#I7qXGem_{Rlw)9r?o4O3gZ=db=~Sy*WgZ5jCr z53CE8sWQQ+byn{JK>XXv{idsT!v*;|SriS)i&r=^*sKt@4;2{+1&^r!X0SR1T}YX3 zI-c4_@U~O@#6rJI@r``a!}7sXC&qJ=$k*8e82u7=TcdNJQhM28>_HlzlxHbcP6*+* zA2-}6?F1mqrd_Sg{sr;&JH>w9o9X5!s;6*^6K#{17sVv<;32ox%5zNwVfJ{E2&`X^ z7i6rSbyjDWo7b0#pPN_n`&7bXqdiql&fg;OuHg@;;U!a4w$$*Yr`q6OYmt`_*tbzP zN>a6fFuZ`6z7tb1hhpfYN=@+cOZft(kzT!PG&$&L?Cg;b(OnE^Q}>WWh_9 z%jE$M&&^NIwEKG)@>xL+!w&66y78YHesoJ-@VCTYROq;ji0fRHx$R1o2s9jj)4OoW zOb(4MP|Z+5KH257s-#CkDo0E~wKLYyACh(U8AQ0FLD~#E zpaO{U8AFVK(!Sn=_XzquPuTA^+#s)=Ln*$_o&8rs**_f0zv`#M!zJ#;lZU}0k5G5q z3;ewlPfPxq#WCJ`y5A5Yw^;s^A(v&ZU8g|kp??rRqglTVwIY@Vsw#~>u~oK04f|y; z6PVs2a6Qf4x59OTj$(g2iF$OUsk3o!7omvk>veN3bY~g*{We&J-kt9$0R%l=%inWgLhy=^ zB9*WG+Wj?nkuhI=G)|hhXQK_$!%ghmfIcfD44mo>q<3I287ZoWh9?3!Q;5qZE9)%T)#o63Fql zpEwNbcf~2ru9xk;cHfuAbf&Ao^Z{7^qZM)DC|dD*&lnGAyscn_SYuXB;-E~ijl1iU zIkDbHHeRquE_~S=}uSsp>3>rkNV3L5W8d>iGmHHktf53q@=7RB3IF`Zn7O4 zmZVI0ZNW_)ULymO^ljN@_<`mL&W!v#KtuLh2z_&fGwbV4_9mRsar0{F>ps4k@G~hUJQf;PcN1n#O48A~FPHTAfGJWj_(hGD=|CIgi!Pi1F8`27N9sI6yv*!_L4Mbv@ zN2>?LRhKotxhf;<*e98W`mc;qQ};S&-4KR+m!0b;vBAD9^DdVMea16Uf)9mRld@m@MarYbKx(eCiI{7JX-9T-dv{h;VKhMJdA169K~FpsNu!bB{ugUDc#C{F(~Rg@@BpgU>m-5}}dL7PV(r9B=jJ>%^% zc1&ADoR4s@FuD&7IbA+flRDpS*55F$nJ)Z~6Bx!Bl_>f3QvqyH6gM1LXkmX|&WgO7 z^mzSD?O}ycKk%4UDPgkb15{iw1NIP7ko~TwAy$=eO%cv1kNkS}7N%JsDm=6`)t$W} zdYT3QjiUFA)(4MS1{fA)$u)an5(%=RLpo|E#@M_PW>X>zNpqdEQDWhCuh*#QH25H`RPTBZaoOq3wuDY=?5-&I!gq%5MGwDNBA4P?LAcVvAs zwoASHT1<$Kz-F6lp<+?B`~Jzw4(-J6?K2HXmD5;aq_&`ZmtnGqbt%ISvc7^`W7%98 zxgWy-cHQIMp^R_BsyV{{`R-mRE&*xz5N@% z_Q>K3&ER9pmAYLNo7N!Dmm~G4!Ti>5DR-LUk1-aY{MgDhjj3$h_eYf+v0oB}a5-~3 zng!qQzV{FL;%fd?!pBd6l4HOXaCu`bEuF-XY9u?4eqpBFo2s`+SDM@k5kd@YSY~-_ zYh8`D-*>}VZ_}>_@C4tzUW;vKI{hA)&W(4xf=!k#f5=Z2S|K{E!wN5|8UFK>vjuwp z^+)H>9WQ2BD&#{*bQZd-I^4L?`;qWo~T9~LI-;}y6F zx%ysk6_atce@$_vF!51*2h~&FhWMY4_8BQrHGC}*pJBz&Gj%g)&NW+_jTI7TwiEZ~ z!p?go-5tdI2-h|K{5D}mlkk22^pjBq6(_n>T?a3_)Mrj)7r*aT#v3N@)|PDfm-V^~ z8-F+X$Bg#3AOnLsjbJ1vowBY2yXuEqlH|W{t5X1ZqJXKyk}z!ZbUmK$@|4;HY?Xd# zVO-Tp=s?w<%$``GlwALuFSwqh;ADjUg9dm^T*yr%wJ~oYdUoXK2*yJ3P3{Y?{Fpm- zecf<`u;BR??;8tCiU`y7+AloHTE1GL(Zz;WOTQqp^w@LlBU>txZ2+vw1q1S@c|mng zfb+DOPzwY%pb@I4djEv`k`_y@0A`DzdzScx`-ci~yOWRbeNMf!q%sa*DBzu@36diM zjZOXKm-Rk5m-2S3vbx8VpzT2O=kQ`@wg(^P<-O`bZbv;;-fGYDWV!s(xJUs}nq3u6 z>v0w)%A{s%$}#kdoW@C$8>TaZ0ZEgxz7S9LH~);zVpmnndd8TSB8Z>-SGJZ4*#2T* zRmhoa@VjK%RXX_;zquu&_L9B-&J$ldVX_yL@`rJ(kN_8%ax*xGA5^bp_HX`|jCE@Ql%LIQw%?wosRM#(8ksfpBf``~O8Uf1Fc;eW69i6Ss@-;q0FrifTo zCPjYSSW)z?GK`$8E2Wf|dZJ35>M8@miofH~u+iX|e5%jhxVdrpE&9{?@K^w{E}ESZ zBnp0Nw-|9-0r1ODoV(nXL`{{`x1WD!X=7vOaoAqulZ(@5dm7FuN{kNkWc;8JBQOHD zf9H#o#kThsVyJz$Xn);%{-p0`y+iv!r}>oF^u@m^CO^-geaXrqHemS~Dq7{xsOWIB zQO)u>)0?H3U?X%l==5shC$f?<5m|3)%^6QC^I{PB%~&WcX%sz`jsrpU1nj6q<_7@cT6CbBQ!v6rPO{H zai$; zS8tu&v6*IckK zB#^Lo@s^~KeOKnE&S4q($blaEL-B11sN^6o9~Xbf3qqAyd)2ALDCKH_me_Lv3xh{% zKC?C{-UmtdXL-k@*&kHbCL9s`N9hfRA2NgZ!epHB0!-B1^JUV)IkqqY^2vbb#i(a} zdbS^Ca$POi?uDq|(1yWT8AwjT?MbLyu5+t%VcJ1gCdU#nTI-MpJ0Uv1Em5_zNnx&7 z{L9-2EENhbyy%nPU@OZnC+*KYjc|7J!o%A!Px0?%yB2HH`_@-{z0!*h+}pSCpuvt- z?sau}@j7ov6|oK+)@nKl%5~M>GhB3?93txrDwN%h)EPpwqWaudQ{a+0;*!Q7>mPzU zC!mdQii`TE(6hay}oSHQ~%p2VAG>!^LOYgrtv1>F}qr? zaL}VmdzcV*HX2HU%$2Yo!u5xJyV&n%$_cyd3&dp6lNxnr-Xel0-uJ&B)nWKeb&q}x z4b1y`kcI*a6SMi1a%BRn3spjA3@cWcy6@M^vt$?jqMHfuTBS>B)~?=< zA|!EmZ{2M0jY{eUqmbT31nUzcSS{?P2#F;IsR1?V_j+wk!UYzUPlNfHOeEIur})_L z*M971tp1qx{6&pM)YM%MmZRM$btu;r0Q>AMeIM!{3_~z9bTq7%Ye1J!WL8`LcuaB^G!+A74J)+)}Pc;1b^ z!!Oy7*QvXbf5(=SkP!bUXRjx`Mmv!`$rpZY!u&xAFi|%N+>)$P;7h;k8%~*VoG14q zKK8Rz2kqvc{3vddFX~bK=#Ojx95ANT2}Yju&jWa7RH*0f|H6p{v>bN%w{q{Vl^4te zk@mRD?3FRSUC~DEO&B_^LKZl%P3^*=z@6P(qt8iUe9L3a9v?X%8gws#V@KMwue*98 zD2P6JG8apS(A0FXPOr+t;)4vlyfEY9rsK&GmWM8#f0_xQEmao}9gP@7iF6k04LnMuhiBy7#y=VRiNxR}DtWVifZ6wi=@ zcD#g0=4TZe7Qm_zdyw4rZB376Pd z8QktQywyVA&27h2?s=h@e<{Q_IsT&ewD$;4ELT_y7_v~3!P_$r5B4^<+-e#AnmD}S zqk^JeuGLBuyP6io!G;V%`K!~&ese!&QXkudyZ85n;PpZt)LXo&cuCdYKhhRx)6ti*h18_NS_P~*t3%QD8=JtrMssk z;=r0dOlZ8bFN*Atn+)H4?z#Z?wx$NDMQ=OW2C-)edfM^V7W-3s_^bxOpcXSNzNd8Z((*RHMYA8?1Ju%15&i(862AlYOfzZwlOpGjr{cMQO+QQC;&?%j)18DPfl#HO_ zX&@Sfz*2{9vCTW1jTl$*wLChT4=}O*>UA(|JPPwC<-=>po+XP{RqT*Ku>YJGgu1*4 z5V@?h)P6|TXWdqhtgc!MX*KuJW%zruzgg6J$6B%_cv+|titr7chg~-(UPlzni-+Hpi0ON|Tp=ADp!VA7 z3HKNNm51IskD@j$xZAgqb2u}{WmM%-^3}TM#^b&Cn+EL-1SCxb5*nvj74(emv9cfv6Dotj}A4Gf#WXb<-wa_Ds zZSQGC@H+4#Xe$QZGCzndR^6#m_YyUsYstMsNxII!=fy+B#r|CNet2R?I8 z=}3Gw=cWN=De)telia^@coCe9&1Fe=!boS+>flJ;Ak3aews^bU`PBQ&njuJpGVQzn z2kY86oqDP0_??;$kFkRRp2B*}Dew)Z1rpS$s10|;mZ*>K58dDt)6|b9@Ye|Vb ztPyr?eQYrjTjd-K&C?f6)2}~w*Ft$%p?WSveP`B-O+(Zztaeqgu%@FsCS2U zTb1pv$Nby%udX<*KC|LdV2{Yy&x|p(Sv@Wme8zNS-&)i~xs&zdgoIY(qmlV`FK}YD zNdY&8Eg2wPdH>P9cxEFvV|zXZ?;J>h;FWKjV(%gT>wGHLNqWy;oh;Gr;MyX?hec_E zkN)C0j^hM+zm%!cdkio%0AnU})}xNlXQJ7|^G2XN7CUkeG`~zb>b3sM($R{SyeKaA zS=(UpcgV&wu4CXnkXVo61$8=Mr2zm?IulNjVloFcDo9(s)*tINs+IYye>Wc4P)N^B zf8oFiW4PgkVv{UDDNFbB^T|$z>n(il1$<=ju3ZF>*4}g4p{>q0_ilkL+a^=QpOP&O zXxsbv1tu+?TgdyE`L6id^JL@X1H-+)&-1{1;|4LHcReq4&}t*SFORz-?)8F)7u4~ zj&g@S{{ciPN?f}g$;-i7!5)H2Ck6h<%l)WWi%bqjUMc&LQT=tKBS`9G^^T7iNEnuJ zXRyEiCRJEWX6!*u>r7BF?Ec3*{o}wexBJg=mPWsh+Xpuqw=Es$IV9N$fM7kU7%6WK zU5*Z(9pBoCg7+J&Y%Ui^CVd`KIsW0-fHgRihmR4w;{24~`k?)<&VzN5|KkPF=?Lyt zSfw3dvPP9T1ZR;yNnt$_?nqnQRtt?y*bkZq&kDc{1V-E14^9&=G6%HJ1r(ygDygOn z15U=~)}kr7`7r)9E3m=?(6H9FQyH)glj#{;-imF*hK9FeU@#9F5HExbOsa@-87 zbK3m*86#ya3%BqYkH$z{yS|^Xz361vxww=J22P^Lms(wuy6-ScudH^ewzs!7&hgLwgfrU9SscMTz)~Z^yNTm89 zFla`l_@7V--$Bec9~VQ_jz2I%-%oXx?r+|mFQD2K?~Q0=TR-5RubG$MNC~cmw(f-|xpVog< z+h5FrPkUm-%%GAvXW^eoh(mJ84ia8S`-N`(5r?@&zhm$3LXA-VnpvzV?=SZ2|7YK~ zTc|@S24M)~?tIADt#t}G`wq?y4gW`Deq&sY@dfS0?4(T4%jWI_sN|! zVIihBcV(EVbLOS3{`i}cPe!27wKNQUKfAG?{N^XeoHso#!H+yDvg%2X*(e^3rIW=k zLv{4r#FPeCkD%6zMOllD8 zX7s&Z)USvsuz3DCd#Yi?+AkkcT(hjvRk5O+&`xl+golw*hJE)`OzeHmxbK*rJ}34y z9v^x+hE@zvaDV;ukCw$3C8c2sWQm=Lw|jg9@)+dodpCcCKB%56Q`Wpt(61>^kGG{uYB&MmOX!qCcv z8hH@>(ycJZ*-#)RcAjJGO#J0>k7m_!dTh6b2Zd1U9o97ganAeogTszh4ZJX15TPhA zDuK1@U@vEJpA4R8p%SXjio9N9T~|rwz8+_Bfm=uFf5=|x zs5j=Eou;Do&U(~M_|)i4Nz$6ux}MUyZt`xZ+=I{<>%f@H*&3B$)*YqF8|!xc;0Quz zGY++UJ&j7(AiP;|sHzGR?*j|&5UbFq`uj-+C2$xS9@=JqN_y-l>bAGmy^i{(Vz>j4@hv2` z_Jsvv`0?Hck6?}oa(KBB_rC4+WWhxY+aKqQ{kFowLZ7WSB{@n#8;B{^emLmD-+wy7 zI+GF${~~PG)N)~M&SrA_@yGU=c!hM#|3EAH7VT+DY?xsaOY`Wp_m}^|;;!mYy{o`J z=jk^)EB=MLd?}k_Nc62+?rQ&@kx^;qrYS)_2sUN~nb)GHMc_!ub3D3|;X_fKVMx(r#nTY#8E z^=#nnG|vGoV%Bty2T-pYsBJlllWkOEHZ9@Uo$-E=8KHc9{+xQY@l5kDni=dw7wV8z zudwxM5&61Vggkw$>ha;s2ln5f5nWrijSs z9>}?owYMw45=i?#EU6dd>qIH&F?J;qV`SEu#`d3B`JJ=cR$H$6Bu00{eLi%*eG6CU zOQwT#y^C3j1M|O*Vo|xV)4fSfogg>M?HuX8|jHV**fVf23px~6y_rpMXz4p9y!H_fHs0Qvi+|Kh@ndVt2#Htq?4l;_DqQCW;DL= z`6-7rJqY{LdHc>VL)cZDq@Sk$x91&K)-@0QZ-u=Qh)BnOXnn-lAKiH-={EJ>q4jub#XoX@9pBvdGKau_{VoN2uNr7pS}nd_GnU3=Rm%iz2ET{Vj)JIbrPp?=}u3La4P^b zJ6G@}XF~>ck1|w6R7=d6qLS=D-k*l1Ry=DPwA{pCSwd91cF!d$EsR9_q*u8l)3r`N zDX8ofTkb+x414JyKi0UV*$ImIYeIe8-xyMUz}m`a2N!=)_Y2pl%g@AKpif=ZSE{&j z;XZ@bTi>dF@t@Lhl!$A;*t0qrH0)gteA^1cz}88RP5(B>|C*vR%;U$&AeNnz-QEH% z(#eK;TzGjQ0@NvJ4nH4?8#?t>=yyvwcOxn8Uulsz-i{3a_sL?0DmwkZ0m}voc|s!0 z7P$BwA_#f_tJS$8_Favrl{-QR(rx+B&TANHO;d*%!R7744xTlJ^)C9@lsp z6Z}&acRT1Uq;9Bkx!WoDgT!AZrQ7^}ljh0&yS|x?yPM4S(CA%|iv{1c&eK+TtP$kn3^C?3b|9ABM^4x&cQjf$pX`p8w+)dDc{*e-3RfEaR|gD)Zd04>Xm*%tZU2_XbFKq<=92JS9GC^7$vu*2Xu0 zAJG|cx~DlK8Ijr`<4^ozJhqBvKXs`sNjSh4!+oUBpcml7%dK(RJD8Vq?;n5SmdHl# zy}UF}solVZnbwjEl-`R{>ZW~@)G+@hn6@eqNKNsEcE+?zAcYdx@Vt%*=pj=c z=BhLItbt3p-g@n6+N(b#h^Zpd^v1nR_WR)N7Td4$<7vZ42-G~QBDKyCS^pGl_^#_K z{qM~lXL4)f_?{6dsuA8_pYHWN=TS#GE=P$E7r5%kuV*TgNhAg|ntZvpB7mwD8`Mpp zBR7y^?e7n=fs3YOV?zXrILOsMJ`d>> zy`L;zlwSE-X|o0Rz{)^dEOwMQ{~0W=Bsf;pqD7|F=z zX-^94H&;GI09Gu+y!gtvG_}$`!YDG9(qv67xIQ$He?EK2;UyP3d2WAOIK}$OH?uZ3 zWrOhpQKxrAzOK~sD6GDCY0<+)36D@uV9a9ugQmW8NMT(s^QkF=E$G1{)_{IOdgYHo zAW9fi`lzq(2UOx47Ha6w>K~p8V7wUQXpd--*(g)q)KV&=dA(>yj3`M=#1>SDc2Z+o z+8bTFR3@cDx8(^Zxm#R>-}# z2?AHgpx~`x=S+L*>tXzae3)+2AG-<30I%k|r$y(ByLtE8m&476$ina`+w-TDGGU)v zeO9PZ7hZ_se-ASYC()%Lv)_voff4vJtzzx@#O zR=!auG@3z=Duutrx-&dQ=a|`pWvq>H)}|HgWKC?++gVh9Vm~g_VN} zQTO&>(z*MVhPI|xTz~hr$7UeS_zq?hhM#(71Y;8T- z)C!#`y@4+d?O&#c?YD6K#9tiNe=0vy`$FdmRHbO!5%#mL+=& zCSi*1ERk6-DVMUMjzb1RFjO(s7tB|k_uGFy?t2662P%!0jWjkWQlW^8#1ZNNF6%fd z4zZ6(vRvHJn5w(;Iqoo;tq!2_COAs=8EyTbF9@>G{qVD;yBY27jvO!4?aL#sNl|<0 z*O8mO9)`?TAbCc;<4+H7GQa?rXEM?We1_tUTu`rWktm_%40XC=%wAxWa1qO%roFt& zD8TM1)8o{x9aNct=8TBO5!)ykf@f>@$COw&t0W9flb7-nqc11{Oa#>UZJINC?@-Q- zB0`hKyVGkY?VXH{cRx%u1&wAj8RY)T?#-JQsfNwpL|+CV5+796ltmqV;B2=2dT%0) zq12<0iTHYD@Aaz$D#jq%8SyCoWjdnStSD(~32I-*{s4BFbcl?RZKmZvXU0#$s-;o=#s)L1H_#zqU3;s`NS0ZM4pzDT8@s%8 zPU5$2jZu@X*a3e#DjkEYoFj{}unO1?>~s6(rB$}l{^1H?OuTQ0Dd^z(ViA)qA8OS! zV`uiDj-9f{X$%g(Kxs`WKE=;@%6)IO>-G~d`u`o2|4rX6y`kN#?ykwu0l|Z}CSam4 z(ACWwx}*_}jG-!*Bi9XMPjOz2Lu*w%x&qu}R(zKf$>wPp(7uPi*-xm=9%Ws5WEcWH zZ?wG=d8(z(7C{-7uz`$Su@2K?w)2f+elhas8)Nx8VE3#!4cQleKOahu5x{ew)M|x3 z+G9GwIAs>FOMDu#wR4lG4A8a6WB7<{l70qw^CsoB=F3N-d}n~>_Zsa*)^?021lqwZ z`IiF|5HP>iKkKVB=T?Vq9L633IqoBZjFN>#vk?Or7lvvE39%IyTy?P!#sePSi^e3` zy~Z_P1gDja_HdOPRxq2J@9ByN$dCsbjk&)NZwB%Uoz-cxbNTxXq9|}{wMZ`kJGzsE zNkuYW<~$u3T*pF`y_#^a^je>KyS{>%pDEIiy=~J9^7IE;d(HigZSw^Sdjfwq)6A0! z89}b$@->qH@<6IIJ0p^@0#%SEZ7xI7pZv+fQx6Zp$UOLJtdA%&&Ja!~KVu*yDMqRL zv2~g^XQ23LW4YZ8hsuodE&QH8{r(K;QaB-vSc+~=jSXTCo^x7x%Z zvZSF7FEIia{4%|Rm-E$Zf%+cMA8YNMe~RAd@%EI3&-jcNLfV}cwyUr zwgyLQU3c<(;J{bym61q}FCu60=Fn@EGW3wCUzy7=mzgLDhxVzzPI)wfSoIP5q=dP3fl?+~!w`$S`Ox@IKGL4DG7M!#{ z>G`N&r>ZMaVfnv)^IwzcHWQKYg1;pJF`Q!M{A%OS*B1TjtJd&;84|qtL)t9fFP{v> zm1v8}C#KMW$$h_Y+j;dTsqL@T-NBEMsC+2@5}B%=P2bO{N%3Boj+4!hT-AvCW7=oG z9hBTLx(02tTO&R36w0(qEqq4GcR0a40cjlj- zo4S;neqcv;`TDc3VV!>pQ>iyDlPCpCI@Yzso}8S$97`qLC6N8OL~4UvzW zh}~p$8?NKs7ttBfcMy%X2k{g@tFl~CDyp{oK}E3uea88 z6=%v3`Ob)-B(0$75eoeE2kmpKXLcOSTz?=`$$?CB#DZo^a&oke1T zPEG%%_(YY9bwU|1&^`+^L5U-2@Zl9DR$P}o|EPgPOYj9mH5 zeEx$zd(QW3mW^I{$8h`eq$0Ki?K51}Msl!U7gBcqj?5yYJ<^YP(Ld3R-4WS8n&n@Z zQcC5NZ1~8xYk{cLdq=7fg=$S$cbYos&;teih76DT&s#Qy4*r&Wt9jC0VSRlxIc_1d z@eMDN!wMf8Xg|mf6BC2&*B$*wXKcV8`Ju5b`WgQxgJk)uQ=f~tYa{B=IauZcntSEm z3s)0%2On}5`e4y^Kf3@oF56ypJI+8g4>#JBc!I^s>l_muixNS%zTy?a5ASa>K2*j= zZ?Yrf=zCXucqG1|^ETC~G%R?kX>$@9F?Wa`2FBG=Bq3{7i~|!|+B~G00Yo%8xvwh9 zX#A$_SNo&>L_PDd_AWWi`y(yFtYy?Q!VOoQLIKOZ?=D~ghn3vJ1z$tf^?>8!9$vT!!C%I?0F2CHR2 zh0CYj=`;{^p{{?Qi|bN75+MMCWLVhXDZ^NsGP+#UG7ppP8%mj_Kb%{52n#+4l_+Hp8ixNuNLF$hA(hU>huV1&@WGYXV}=PCVtp_l6S0QgLCZtXI8@X#}`FtEz7NPdAVXGMM}nEoF;WOW=?d!UFF zWIaM9wP^+SD+Wh*iV$wMw2+Uw@!?s+ziZpKv&_4W)bwzCxt>Yfgub#5AoKyR4wDI2 zGePx042yGtId#yv`FVTktj_-e%tKe=!bb6tN68(_TA1{pYEk-_6d`O-FJg$L8C`8n zQ}w^dsQ(^V_s_Za-*(s3FCI_U?e>{pUL{?o6`HgLc;`KSqenPj?J)DWz{z1;onMOO zCsWum`_SWJ_f^v!|AslRh6B85is(PDtI~31#5JT-X&7;hv#PxE52mJ!U{ryiBY)Pr z(#>cpM@RWp3SgAD21Z*`ho(pG{mQ>3Fc|nnN;17O%4S5Do{^Wlcjv;0P{OW#)3^;m zau7X`h#iq;x1%A3;b?zy{!EL3&pm+0%c@IrU2G*!7jn*tfI07+>}eXWaq5k$e&|q) zycI35^476=TTX}%@I7%W&T@e~h^G$6c>u_Vt|~n3<+`gX&m^+*>ev&tBP~#)`{r(5 z+yHo%OIKuVbECE-Lb!=GoJ!;UTw_9j#>bD`Tr3A~SfM8FT+T_(<+Unw+-A*eEcWFP zQ!1!moO5DVQLOp{O6dVh!^|y!q?+m@B$TMwlJ*+Y6XH0tPeviP+X;Pu)4Y&cmw(thp*=jBk9imkwPLgWhD9!UZ6=5 zBE?P){B5Ow23<0naS0Tlc?Yqi1>=>9l$r5L;3krX%=-!N4f@ME3m?(E{2R1RljgQe z>KD49itfn@nR`m=ZP4nOto_Vqfe1=jc%d0qBPql9{50NQ&i}=M`xhDWouUoTn)T~4 z!LMs|y#=SBXB<`7 z?TlLSb!fK2-UJs_B6>-V63XZi+Wd%-weC6C&iQtt`}(Y?&%x?S5MygAM_k8o)hBvN z8}WtU7d9&kKTKMlt}obq@Yg^vK2bWF5z7|4-tHeBZT@4}WQK~imD+$K&l_Xkv3Wdd=TI|Q;(d7UV(7vA^db0>jnlG53yMarhX6ImLlY8%~y(XN#Z`m zH?D{%ZKS36C=>LIi0CrnrFu(!#8w(%fb_SJpMb>|>;UhMq}x8#4cgSond$uoLqZr$ z`xp(;^!}r`14AyZd0oP+#-qT0Oy&aCS{q#^paa&(6ny;0JN{Xp4b>u2Wb4?E+9i>S zE5W&}L3jf%;8ce8sIQ#=E1pTPUjJB zac>el-N`QcW|6vpRUm|W!zz)+^&l;)Z|!HHa{?h-$Caaf{qD;`8Kq#g&^Tw#3n})4 zj7jup^ZYcQcmbuta^{M3C_f}psr9A}DJRyx=cB}uN#b^vq2>Sa0+=V0)5`|(lXK3X zstSxlpWTY1Pfeh+!09qH-TZO$cBEg+tjUwQxm4t|#ZgGWqp~BMV$*xi(`I-k2gd0U z!|}-X!0!5ek-!@G%jFVha`Ms7{=jmT9)|WPm%g5pqe?|PRo2vG{HE?AzjPwbQkF$0 zUbhY5CC*2v4P3Y}jFm;%gDq#H6E;Q-6^(pUpx68A7i7`_v94Fd8y|1m)>yy$4+j|GSF)> zL~0l!dcE?6?NEz4^gVYRf^Ldr~EL3`5DuzIqiD!83NKBSxKB&}66JMuOr9F{836zX>KCZV2a$b~JL06sV;Fq?H-@A)I?fj=N^t$UWV; zKM|Nz7ek=f3Oh%BQB^q--M^5%o7}X=FA+%VyD2I-Ia_M)bbIg!%SHLVhnMh6j9rTQ zhxZr7tFC<{-g{P*B2ngn7LUwF$yjM8vJCd3!q-QL61=&iL}+s-Dunws2#vU#j)& zB|Qqhn5$>t+(oh=P&Q-_9@RQi+&Q{2VQ2>u>%{-HS+ShAe{P~q6+j3HkBxrXQICF= zlKPjz`Xu>{mLVynm*)HS8^tW0@RH^E(q_x#Uanz+I6=|*^B9UTSJ(uv&t%<{YO|L_ zLdsv@r7E|MwjZv5EtodVyFGs(-JpM18GEg0C4b(BUmgb4jtz_jZx1@2i3B`O&>sB@Lq8v2Xm>O zmMz8y_JYBXS=#A@Cz!|;Q9|*&x`H;+wkP&{dweCOfBjbNKdd7mQRhdwCzAl|8yFiN z97bPGcoCXu_`0{y&IQ+SkyNujNzcu3P(Pob9Dh3Ol-!C8PEjx&iujs_Sd&;6r&X(J z5^z#ptj)sH5Yh@X76lRqwV(RD@NO(n2bF6RNOp{>1syjAC9IfUH1a|?O}K*5gh6}+{LcOX&@oOoFKs}tSIMa@4i#nHHx>oR5E?MF~EqRD_wbA!o9 z*zpE(=J5@uiNP9$SgF8D-0#uncb=Q7rcT9`pn0&?D z5{EBES2I|?LA9v&9vPztSC2MQ6m&u>6fbV=Jy%*E{Yuk9*SP#y?^Qo4PJJ7Y8A+s; zPeyo%P`pX=Z1W$%&P2sZRv)!Yq~ZQ_zFgF%QDgwQPq6G*p4~Ls{qg_(;r5-O6tT9l z#97ymqm&KUUSAN-+hRw{j}F-{U$9@s$HgS{>n>-!9-SaG#SDq>Csm}tFC{2Y&LGM{1w^glbBe*Tux>xbpf z(3E|dYrmi6r0Zpc1w0@zKOgVFeuJ}rarxFdP|r&2Ag9n)>p z;we#W0SOP~9r4IOBQ`0|i!3GdAJ=JVh4hA)EOipZAK&Cg&)WW#EZh$Wi6}NXXo7!t zY#nlHcV!ohG}jEwqF6G*nNJ#rcbX)jR6oi&l)!c96fW%-lc9b(qJF@WHvMM`D4r>^ zhgiAv{Of=PRaN#Z7nOwE45f!b>=-+%R@Vh@(6;5iJff5km!OoldjHjA8BxLt^TPy{ zBTd+{gfV%*s`QE5h~*4pURkM!;|MRe^G5rl5C7emk#KzEv#z?i%>K955q@`&u_A5Q7@YuzKR4&na}J>oqi3_ zF}HIiv@l=B1lA75=+rFsda^o;(F}qTpIJ-mcTKmtSTDNqijOF$wDTS3UP6rRxMAxZ8`?jo3htx5WvPlZhCl`H8;O{GCjqHLA z_oobfGPJsl&rQF^2MDKlW_qqz?sNNutVU4?AARC_+RrxRpU5_~@WdAzie(f&jrSYP zzh{N7y00{-#aoHA#yjP{pWgP)i##i9!FcB%wdETOvG(WE!S5yBQ;_z6qbq+ga^z?Y zwzaz6SYkWx_H&c1#!78yh@wtL8d=fvbY*$;aM|m}q5&8Up51M&F%ubEk(3Q+?b>^< zBhm8_|ENG4G7o+kW*3xy!3itV@(Po*uVL*Ly0mo9u7i~$yM;Gw36rZ=)FmglNn>i+ z#GzDU@s^NS*IRyEX76eIubg!Qr@}-X0c+E!fE6A3n6?_<-|zo3g9}S*((a-jxciqZ z=c~}FLiBFGyZ+ckhS-U`{IvAzs|x}KBNi4&nBM?dkI8N$FgsWsv1X-T`V@x5kAbO_ z<1tVO7q>GY+;8&-Z5**;BYv`!bA~p1=$e~%cSt>y`zkF1%;A?xZ^Egxi+%@0>~d+$ z&Q>NRz6Cmq!PtmCUVbA1I6K#0HAHWMUvOh-{r&v8?Be2}`n0))-jB)j=xv*W-!;U( z^Ac!=3ZlHh^z9;w|{aL;K2s zo30wqFF-tPmkzn(GO>!Ag=V0<)@9-juLIXx;F3yIc_@i%$m&@ zb<%#{SuP?UV9`w9%{(_?$E7BpJSya!9XgVrUi_Y_51$J;IS?q4whzBkgtp+?OIzBB z1)SR?&o_&I)=AK0jGXu6Z@!}4gjz;{U?YFp`a1db9~lc(XXVQKy8A!=HH#gUxJWzh z8>+U^sBm)os}uYS-KwMc8olM&xz(vdBIpoM89F8MN_Ogwk>~-9Qs}5oeS8iH2Lo}# zS8d;0Fom-od6oG-of#Bgf^9I%pvPxo0*#J*lVxA^Q`QTc5}ku21DMv@ByZ0#}rUXFss3!yza*Fk3sI|uJ?E!GP8u!KQ3 zN!kOsF;GgyeJs5o+}dr&Io}gHjOM(`AdBm&A8N=o4g&M<&?%Kk^K!hpt+@d7<70jv zt~pAP69pCVeFjgmc*ZKi-jJs{XPReP2qbeFre&coj6X+KfJN-=SDE+|#EuLPhzRp& z<^*Pa4{iw0Q@ad5jK3QrVyU$DZ z7JT-ea(pAb8#`kzTA&PULdf?2N7j2rHNkD$-_k*9K$>(yuL9DfcchAR=>!F(g94#< zLhntQ3W@^KrAUVWp^FLxq>}`sC!yDW&bjY-?z!U~`%AthV`Ps#)|zXs`9r7M&3NN; zhMR1hX5!$CU1xZGsft8tes=Nzt_ z1p69C@wF&5NIW9TeOsewh>P(F{dngWKN5}e$SDM7_jfIM+F`6P4jT*ZSA(2im##hz zuS%`a6j|YxaW2X?mYv+|ld}$}Q%Q z3AJ6W^u&n~hoqt6M1Dh@3HkYr{*`2CqqR4zbT93kMTNz^Um2jZ zD8+^s9Q@&+iZG@k_p16}$a>#a)q*p7cwm`$NP5^;3{tLb4*e6b;kGgu9u~w{eUO^i z?pk?3SH$7_un$V*qOzHgRYQ1 z@iC&)tA86`JHgd+xk{D$KMy>;i2t)F7N6QHN_|svwfFpgml8~u&k7qiV)K?l z_6`@5DQUDKP8yBAPRY^K22Rs>kiLXU)B3o13kq9k+bC=3Ro{bJC@42Yr?;>=o&Lr= zJt>^E6ik%ceTLX?$%@q-Z(jG z!qiov?1UmkG?)8-g3GN^>fx&e)Gk$E%`_m$UsmqvtlZJq3&KVsLd`(-%KUx>+lL|J zT+s{MTI?LJiqi}q@IhP9^WYS#Z;lm|JTiXpenQsyESEMdb{E;F)_+j<1U>j`>1mS{ zA)%m$7P)sb<{9wMcvActG;SH{6?M$w91{Q@q~8Ndn39Pu)Db}U^Ad2NIE7asW3oMJ zSZM(r;>nrmR1;Rv_j83bI9Y};X$ard^UQ9^x4YSUItTOhlku z=}j(hi9K@Ichq_7PP2l=zQ)iSYp^Y`)i*K^2%KI zm?3_Dzvqx@+1sciiDuaqG+O22;a4UhcQdevk@t5`%RFZQnFRy2v0dslFMa!1Y0_UD z6C{%|7Q$n-_r($&1d&JC3MH z$m6>?&Z0M8Nq3?psKrh%OJPZ23=Z%t^7v&t%ZvFrmGdaS3F;|9Dc7ZE_L6V>m?c8P z7~-!Z(w{2-$A_(Q%Lw;y|3@n$PwHj#QvP;zSgJp!tudT)x_@?pnfD#CEmWR>zjlhsmIl_GJ}wiG+uka3YOm9&Wf3ewV1 zCp~Vu{~8tM!7Gv_P7@4=z7st!m{6}G7%7DQ6x+!CMH@^L3jxj59sxJ+HH{%FNHCH2 zC>xgG^X7wgV+8zdpFO@|V>wb=Pc>{DevNPYCUQW zPqml$xaCuA4HiYjGY6W?*|YKQgYI48g1s^wwL3gUJD+NSDBs+uaQv!{jd_xQX*af``h4@(MZ5o7-1esx9I&hOY`3hi9>I!AF}KPWg%j-a+>J zG5ilKlq=b8@KyFMfT)BXeH@$&OyPk*Vp!#?3I{QXf253X#tdo^}1&<#qLXy1!&_kQcLu{Xk!G^2rJI5%9eTWzQW$rV|qUF6m$Y2xk8? zLF?yw5OT8HIAc1~CgLVdY_R{>pCifD@(``;Bd++A=PZf;nlmIu^Cf`3O)B^M;1*Pij7+BjsB9 z=55)kr(L_aRvha=yLiFNFj_82Q$vb&kXLW`Gm^xrNgA9dU~r40$_W?Ic zWFe1IuG~5uB+%_nV}pvEvvqw+N4* zLB@W&0&{!%y>F4xp;{uq_khp)YQMREe@nb~Oroq56a6{iOVyL`7Fho*m`yN^Cy-gZ zu_%lI-x2u}mxq&?Te9jqQM}KTxi%fKB+>O?!I+!LqTZJRX393d6G-Z?E`;^Oiz*nu zLaSkma(7a&H-jcbW5u-Zz$B}E2Rjla)u8<|x~pdI2A=e$2gK3(ZT&_pvAO}fI~-1w zjQ-nQ^*;R(fQfLKctx>z&%;Ri6t=lcg`!l`qBMCfzl{yC*mfdf8F}>qWY0Q@9A4%i5L=ifR+xw)qB{6mvU+Iz9(W}xgaBxIOjda@U^@NlHIA^R zd$hgaz^>cS-wNHE2&>Qj;LXmRE2q6UReI!E#)hhYpVpr%P^w3pBi%P_xkHW%DgMv)$LA0R}LZ2iq(1PSVE! zq9A=sbxP=*n;1YlCuI&qP4JdtclP9z4ZHdu?RGOEREk zoK#7d7A8q?AF#-!W;q`9+e%Sij+)eVDw5y?6q{hMnWuPtxiHx~C}Di_&sni9BR zg4qYtQN5`-9`Zx~;%Z9CC!nU|O3mwigs(4(6zaAfB+)XJ*zd9XBM*)1_T^&@QR#Mm zT`4U_}sS54?R zb=XkA`CY|%XR%JYGXPfHl~X3V{ItR%4Ob4O#uKqy4~nKkT?lN6=?sk^=Yw-hDj}Wa z3y0qAg~?NMK>K{cfFUY^mW%lzmV-pwZ0c2y@!ZoaafC@3szm7`=W--wqV>vBS!7BN z&0BVK^Ogi$dEzuIPO83C8KKj%y%LCT|8mdJe%aTbod1BbTX2P7DrC5;wI`_J;IsLP zlwfC0_UO{8n-ueSSYS|~lVRJS_k=WS^9WT?aNU~j+1n3d^7czV5W3T3Y!OXDq4ZQ9 zaVjdjz13GE+d&*~lk-@eG7%CF=?}KPdz8<6y6k_c+|r$iTOF^iCyc6Xc7~Mj_ndu? z5WMvIei`xp+llT!RqTx%>D4L6wO#w0_L4#{@}A4x3A4o!}g{0f4UsvEx#H`Fq)8lW!CO;VU47#G!Xw|xMgQbRLy?5Mw&w3ke!z?AmImjSizY| zhW|6JKx6m3gEqTdJu*&JrDp4oO^d$^kB3)_pPf%}Q%?$Ki8L{-)8Q*GzzahzD!Vr%*_cx}kLYEooS zKF&$2-u9LR>a8?1?OUdQS!Kn{FOjif!YWOm+=v;Cf=+k<+pWF+ji1JVf)YSTYl#Bg%cRWEI6;SfaDzMluWY zI7lH9z;1N;DiY%{NU&i_)0?Ked_C)b?S~BG%VUPI%W45)*336fMBklVFIpjE z=upPAatJY&270w)zO#(IFy+h31kO4*G3_e*paICC_ErnlTaQ1o`Q;~9x-0pvmtX#Q zZ7HJ|iSTvU!7k@0*&gd|%<-*x%K~_%@k3w`{RxcARv!O)8nZAKgPeupe zH%$r6dcB(B1bch5+_aKRv72S6*i;Qm7JFL3V$Wc`v++4(@yX(@o4UNX8g&$P2w4&| zrae*hYEM@kXLfeI5>mMr*ch|t{4AfzQcMSS)DdUGJfZ}ZSl7pA2MZ{|c`fo*wk<_E z0*-AxN6DZ!7`{E2??m#Vt{1mOzQ)Lu`NjupuKkhGG0i8=uiLY)o`qYc&#*9_7cwy~ zmomDKnIPJCv#NiWQ=9wj2r;n`aAm?*LK_Wl;`og(Y5(H|5a#5JrdA;9@RPDyDsJFj z_h}c5%DO>#nIkp}Mv*u?Bb_fP@cKLcA?JRy8L+l zVv(oci=wxy0TUPD6;z#Up3aY+M+1Xjgft4ts9*lWOO$ zq#~Yql2g`d`U-7#&z>u%uZ&co{hs9q>g~s}Ja9z~6o)K_IEQ<_!gO!Yp7BbQb^f>< zdvVTbQdle@oY416YCkGL(TrCKJ2t${H#EF{867bYQ&Nwf#Hct+C`PqRWMBQ{>+PyM z2~(mY#yG5SbsyMEN?viCJjjo@4}AY6HEoE1j%68MrGnGelnzufS#RS8sDKQ)f*acT z)wM5h`5eD zO)}>;O)UgeQMlAeD!WdDu(CK*hkT4A2Dr~X@Vi*!{pcYnq$#QEKWw?pf=+_+Z~y34 zftk6my{AP}zt^@W)P+RQe{EdIUTIj_gu!bJ;@*K&o@NL!2FkGB1un+;+*7GD}eV%oHz z+}?KXu@9^D=3TST>@4nR7y!32s}5y}6=Ie^CIfO#u>vx(;bp_`3V=K!z8MnSU&(?* zPB@-@_!s>O$;)iUyuO@s`j&dDkPnl_x#O6z&B#2-KK zwU#`U#XS7cHUVwa>>kp@Gk8ZMV#(^^b>0zz1Qz7pFNWnjgm|6w7GOP$|a1Tc4~;b?$IeZJufnHdy#3V!T=C?QVVqqc!No`=RInzzc&&ZyBGYXr%t+%+YQG5GtX(H|Tirf0SP zsUPm+M?54Gye9D^p# z)o&|b7sZE`7QJcw*)p ziQ)%;@AG;ADrvnSVc1bUkh2*JIc{Qhc7IbGhin$AFEpNq_LeGDYNmL2fOFbAvvh~r z968f-R<q96m|Jm=Jg!CT-3(7QfVn|5&8SMx5b2E#mto$!`i)U6B zsb!}iMe{Gcp!2b>W^p7Gv^MO<>NyTo*093$a!pL zL2>E>jIG5HlJ3f^dyTpO;ojVSWNxf z->93)`wBy}H^|5l>+C(*-Z>*gJT&`C1f8Y%DbTJoo;Z6!*Z)Za9txxT~7> z)ebLl3#K)`*4FVSnZUvoq!Zty^i56~#_MQFDrhdn6dZ@} z)G5;xLNx={xaj~LyIP;Vn@y^x(MEAj97n;;$SA(xTnsN}fjZyIk&uj#$8pgD196Fu&zx-_gTkF(LA?0=h@7)Qk@Lka&K~uPEvo`15$a9 zK0*oG^ebTr?oaFeW|y ze%V3zVrZmQ5i4a*WkHw*@oecMV6GMQ2dTv{{Rn+Uf)|!tMMfA`UTzmr1Po8hY1zWB z+!m&eZ=1$95_`r}?OO!GP8h?-7?dU3{)`-S1^<}5iE4m3;thua+b8^KQQ&5>m3sL5 zvU^|W$tZPl8AJlMrlJ#DGS(p8Abz)IfsA2qM1$bI^owMi0MRrb4JewBa^klc_q14% zN_*Dj?%2sOWS`Cs7J6{ZzEJ1|Ao{Z30qt5iHNpzZTox-aJjTG7yLIltP87wU9a9YC zPeY@_h7u8hf0KJY29d{y_I949Nd#U=SNDPW7*Co94w^=}LPB*K1sU;E1k))R+SWtX z>y}Tu{m{Jmxnp(TTMSAR->0y*-;fVn{Ec`sQS*<~h09jv#pk$$503^Ksg#GJ-B-lK z!AN^#?l0}%XWb}k><7OF+wo6y>vZ6EXGXTd%HN{YY=vsyq)*T}E1gHdz2*|Rl16~j zNaOYPx74YLG6+Ry%wmOxqnfR+sb!ol64eRKksYV(fM$SmI$g0$dI1o_!6;m(t$hM9 zUdk?_hu5eV*cx_c%s`Zc^TzMCUVH{^r^&<;pbN$hpisa2{Us^%sdHubz$1)^7wf(( ziO_k~nqi#)trw}3Vt$qZ$#a8Mf|1rCrlb_e)NgwJ`Ij`)zm+{43ph?-^L_-Ass0)j z1AJvo5T*#oV*C;lw*UsU8F?4=lqGzaF@#019ppsp}!_*)6ypJnCze-x#%` zj)y=(pH|zr;N*ZJjQ6~8)3IOY$U7yRpK;Loh$5-e#{RuSXnf~OP0QnjE41OMS>H;d z8^r;vYcQRE8bx{4+vQWuavQH{E`0*?J!Hfvp5kTzJf|2jG+DbFJJ}bN0$T6yi2PFY zP*L5#$x|3v21PemqRPi#qOibJp3vYXeraG9jn0oP*JW>sCoh?UcZ ze6*Z0G$52ntuWAnMdcw;Z&O)lX}9GP!~dS(=W% zDIsL}kXGM+yzqsK&yNeQu7FJw%OXpzmJZ@OPKX8CHh>tM>Z<6XDW`ScMayXh0f{sJ zKrEs~Tyl8M{ZrY7ET=}Wiw+viY>}1=ub)XURadjyhBdOmu7T8I%Kp|-5X(4>Pqe<+ zPn2a{w6E2_No6j>-9z?Qr2ezp3;WFSW5^&9-4sW}^M!3p_l;GNLq!c(q86>OuB%$c z<(fL(#6STVf_sU0jO&>MY@()%{vHJUd^M%W3rN@H>y|o&1j-dE1RQEP3)0V^6 z13#}`JbwuKP1PadgSI~7tRO=jeO<9Mf^?se`n~loAlZ=B4uU_fGYE3ZRo#S&VI?aAC8su1MsJw0m14)Z2&PSz1;b6ye4a^Lr z@NK1s$|`d}Ha6@dflx_d!9#s~vEg?JlgeYaVt)0jgvCuMpVnly%>5O2%Ygbeq8PMX?o6FYp>8-n0BOp2P3*u!JH((dh@Fva0((r9tyrZ5yaFX6~%+&n$xz5+NZ=AcF zA^_Kh#^3k-?0&o)c((#VUo9x z&}35E+1nqQasGaFS;e&{V{D`f`9zT5c{h_zl879Oa5yv0WI?Zr7*ho1mtEp8CCN zM_exf@-OmmF_C*7QVi%;g18$XKdjj-bnA-;Hwu2jO<$BrS!v3b@@)CQ{^8TzZsa> z2pjIem|q8_T`wD+ZRo?36wu3GhP21?!`yqk7=+NJeMTyWr(Umq5sv;r69owzu&2GZ z$lVt*z}l2;4ynx4k})oAUg8fC1|1?nS0f!?zkfsH?4u$#>o-KtJ0|$@n47%BL-sM4 zW7O??J_;M}PmPon|JI#6o{(M*YH>Q}X2Tr(K5C_3ygWx=pC9-MEDe@rx5$i_+2z-U^5H18HcV{1rQ&Q znHZ!sPV30oKxUZnQP*#ZOHTAI%_l@$ES~`|-ULnJyW>npTa-cYH;G~#NC*gTC%3#Un>})AVWqBb??9SO?x8 z0Ush+t4qK47USf<gu!bNP(UU<_|XEl4(Ffyp=>+2I%bh0`H8NJrslI@{R|v3 z=;Q%PxDu*nz>UwT7^vwQA#qaFbFO#YaUoGNxgL7lE>F=DMDFUGXq6D?>JX84w9GXw zia^{*mTE(adjAW3)Av+~p(^TjEw6>2Bq^+>LI=5*iO1%zvgY~-W`~A!5HwfO#%m0I zP2~IUEWGyp0?$<~yoOlnM_h*vIb>MA2q{X+RIYLPr6IV6IH`a2_*TLUSulUz6X-L~ zc=k#L1kq4om;*-U5$=AhGUxMv)BQ}~E0O@|d@%RbD13kRmh9I7)PR>?MpGnnIW;9Z5IG8zA_FzeO04$Fk& z_axqIVsd2_hm)KtSB|eS^VSY_B(NQ-j&~_rp2NBxPnKth|NOUb`i_I!XH2v;=d|bc z8tHwz4?>PhO$u-*fP=wH3^Brjo9f|QWSI@tOLxhVuheU}0@&;#ar~PF!(;(oEA<<| zIt>bu83s|(L7cP5qvUx?+1^9U`hifz%*<^&h$T7Y8(CNWcYd8fHR2BswG0B@1HDBb za<(RxG-lmv$hpi+(4ExV5>36?Q`i1UyG<_7B3Q6Gv72@L{MHm3Tfxwrdb&)N(S39MV z9+wUprZig*TuKG7qiEM&UM&Ej>Yuoa6T_OVwS1Q{84st3=qjW_05$tlNxl%F5qu{B)vl;r?Z&o9pp5-u3z^? zI6WnU2qkxk9ieS~WAdZ)iB%O3X@{Zb)BUyXo=m_M-nMmxWj1mt-IoPtN+PK`m&@|x z>^G>Xx&XqdcUFq3Mwg3Cq5Z{&VaZP*5ej3+dyR64C0@UyG!F@(fCQ{kz_Zub+WQ^o zUg`AX2h__^!DeojCZddcKV7kI_dduMG?KM@vobh}G|RtDU?NZ3l_en!J|HuI*K)_6 z4kNEOBgDw*uAUX4NWD;6A0@*{G_Fw+iVSR~y_UlFYe@kgfiJ6#MW~z@Bj6!=@Es*n z>;AJqCJtp(bi-X21{%^W%x8;m`}EAczjUj8^YFCm)v`Q?+hIFo0;9t-nZh<-W`*y< z2eZ_-;%@)DYw!+N5=8crsGRj-t`EvAyRl0Lxk~m;`e4U%$o-gP!@)Dn`|GkZiIOCo zcK}j!mN<_|ogb-0`B_GgftM~EIK=;!vp+*W=?iof1CY;R)$hQ)s{DZDwkL|hb6bZg&=8zjCMzLal$n{79r4A12Br%?4! zQSo0bcblBmPUc0p_oo)M#B|U2WVx0z84>7Q%)R_>q(l!`gb=tOQM+=}<6C}$#I-X4 zy}Z@x1G8`^*Yf^PK<+g94qCfD9E_+7gUi@`%T*R2;$kV!Lfp=kSdgG?R7xhyQiV9( zH3I0Pu04R!PU+~RhQS?~8>SemQh|w3Pw7Xy7?Fq!8yBQKynq@U*N&nK9gM%*!dw|K zR_89=woj^}&Z@jxY8W2~zWo8yM*uN4%0lAL6Q+;>K?Jvx#hG6{vTj-URX8y*9}QoI z7I%59ZU}L(=v$cJM$)=BlRAF3XA}X7+e&|hM91Eo)dQK%;mW_b9gzHf4)R4e2;|U2 zzEd&%S*L($RI78Ma>&N55bUxzfaYSX%u3Eg8Q4)zTUdm%9F0%SCi z-GF}F{ZTZOvSTOok&u~m_l$(Y)$h}!#ng#~jMh;tb0yd8Vu$5j=4%?CJ_`=zU(~mT z=<;raf9PUpFb9VX!^r@BYc|UX+(kK+rOf2go&lGsd>n#pS<;Rg>5W2*ori_DE`=Q| zVp>XM1e?6Ns&_A{_Hgg%>;K<-x|~cU9ys0;GEs2L*3uMx*`6zF*s z^Uor!s)i$h`f77oiQVM+IknzF?=q%#Z^hy!v?J#7c*}L-t-12m8g>3@=k1~2bh~Sg zafE-cwUUnyy(^%G^KqhFhHXH|g?#lh`2~XWWLu3NlMzC?mM)!ByCOQ1ds5OcT}yT@ z2yFb7g)+bMG$Z#fXM8k;-x~;sMFGI*OzoQh)ez&(&CCu+44F+cR~J2O_3}-@S0%RgsWCgSD)~+UD5$h$EXQc`QiOx%U z=?eDP+*5GQ+>d<@3yuXol^fB3ejvh|8Olf`eB7>^$t}TC2~uQbRx60F&{n@>Q8TE> z`M`6snRyptty3@eHae1`)Q306x9~nf@aNAUCnxrlF|pVFlYITP!OgJxrvZgDNkc~1RVZx@F}4Bm0=LE`(DG6ZINs;L%nK2)0FeQVv$HU zg2W=7z*jLPy8eX9DznR}SUs@wOF`2;0S7khUwJ0ZVWr^!mwI0`{YllUJZI@{FMB_o z<^~+kNMD^0%9x2|@P$KQr<1)FgLu(Ax8XBT&c<(p{23V3lNFzq&o~yUseGT;b7ey) zEJD=48Uh<*1rvk4hvf~<%VFmH917&!bPr3AsC zCnD00DKJNTmtPlOelAxy{5^85tpu+@&XLL={ewF?`P#?N+YY*w7A6sX!bJUZ$tu8{ zxn6deEew3?kEZDJS{{#aJ*%|42voe+VnZ`J)&c4xpJdok+_{d*4(wGZoS`>kekiWY z@6zN5t(zcsSYz3JV5F|8!;6+b-I_ZENNj|xpl2~-A>npnXKRDcF|jz!YVMXrk~`CZI!=8Cqgd=asd z{?PftFVI82FSnqTZiD7H`2gxkNb6iv0}-+R?h)W2nL(XIX6I}Orjf`i@R=Zv=jT@D z)i(KSeIsB|48j@*n(;JHq9L}Xwj!VCr^dvg+0H)KZN|&mt6Y4!n@EW@A&!tV$k242 z^Xu#1z;#cr``DiO4n`l9>TRPd1+LTL?CcPAu}J(;LqZcmA``$Pt8YRA^l=sn-@ZMP zDw(t@V|Ks(bHaQJayPF5TB)@sI+Er4kg#yVl%hE~iOK!++jw|v)%OcGo8~(FXcPXb zMj?3S53q<4`GbS7Q~Qvw-y&SU9^CAI>zGgz1`^+k!Qm@Ex&S*utx7YY@1Bvl+aVII zB^F#4ep7e3EPi2oWX3xhPul);iSQ>pUPbIbUI24htVl`l@^p|NzOM{^>ZKp#J6QYM z%9qSv5ZA$vxJ5GV{-nh|EXJQl8S|XiPLlh1rg3O>M0;N603S1y1zgxLN`@3E?3e@5 zNF{7Jd-60W3fX&1GD4U91VJuR?S&^>Hl<=Jd9JXJiyHgubaJoPeHxTF>bc^n5dPJ3 znhHl;uD{Ya~4)wRb zO8dr#iC)C5!Or(;-|J5N%p-m%#m%&G*f|Ag#(Xvk_+)w!IId)K$WJI7d{PQle*vZ| z)-2n98FRgLpUg0=xGmQAi#;+7JE$-Ox!0m2tL-`MuSDEjedXa{T$}RB($B;EmlX8jz`S>AT5Yf<<*rA(p@&f75K(JVs{rWbIUVE#!JKNzT1DDpuvZSyH(GTdn$5` zHP3cZ9=B}gKWa3dz|lqRP1V%Z&Unkz?*py;1^a) z*5r_;KLCjlq6~ru1UY*bMl>8uWKfcLq(qvKHJ(_Xt?DzL`*(blM!kJ*`_=YEbb|30 z@$8*kbzNbCcl^une51YwB;oJa-I3SPzeB(#nmMfH1`Mnx(7?9!@l;!rJ{i2rNfz2H z=Qzjzf;IyRiSsYSnE`B)k1|v zv>%3v{7y%ehpJKV&-k%^(jUud&p;pi^Gj-bUEWFf0lJt7Iu<6X?`Fp_O?_4$G``a? z^v*E1IPz1kL)f#FJBL53l|_8+jPAYCuCLOmKN?i}PH<-9R{sF`^Y)m0U^u1Sg3XF? z3jftN@^>x0cXo-k@vlcnb%~!TSp0~NA|eVPy7?rAcNxCy>fB6j&?09hjK`@=xnCDd zjNlip&p*gc3==W1LOjqJ=h!g#v2gWSp|mx*1ZEvK<{wJ`p8Rj-ByL{SQL(eX%f;0b zckgx!E`=iO&y@!S23X8|h>i>!0j|4k6x25@VX zJ3EOz43-J>*qmlRLUf<6DO6`kB*c4-m~4){y8(&2n|H0DOP)7Gi>6F94Jkc;ZN{xl6i1!Sg+;Ral`MOn@Kt`k7XB}T@k zZ-$aW(wQ-aQXIeYtQ?HQZ?t}7x^rbeFV&&;;9;a`O)gIz2$LM;N*NAuex=M)isBEA zRGHE1DM|f1=P$$crFn>COS6=36BN;cEL!*8pgH(+RqlBnV{5q28(z6Geu+SZ>XZtG zSKe0q%}5v@dK`WW6CdU$zg34}%rdX@tajqMHF6IP2N$1_&2itb zel}Ynucq|#GHkXuka+Z4s)>zl5YI2Z+!oIW9``~ zL(vO8!kbe)9$_GkC2S0`=`vy)%)-&^WygSbGp7)t>K^zsx>0oLxnnKu=2gGIKfoNZvrIZ90?6#^}+JC+V5PuB!^ zp9f0c`5ng;^*idFQ=s%ARpb}R0=uc+QReb;KU!zN9e@=?z@%?l4G)f25ak6E>XlY7 zi24<&&hym)KRh81Gy8H{+=oi_^HVxbvGf3f3%qf>kOdM0f>jvVYJfTS_*NRvHvyMf zc1K2e@?J6+(FNXgR6oL|USyxE15Yv)@Y!|Fl~#RMK!+CpkQY*G4DzY?L{4+_wi|Az z&m1l*v&xy3Wq0zLC={2Lt)jX!#oF*Qs&nXZ;ISKI|FcpB`@2WI_g6{XwC-p;BxrUY zmf(qea!1rUGhW1^W<~lBjuc>m*SoL$qgbpCEa2g8>p{#!e1B&JzC}p-=%pr2oXAPg zGS#zyZvq7GzUrtS4LL`}B{i$uDeI0p>-Bzg)J{aMs*KN&}pZ&wz5 zg7!z@RXI#H+o-OSHIc#PoH%L6&z^Hoc-sN<9umNNQ8Ro-k)%g+F~)WY4(07aPAe$4 zZh<8F&d%rcVmrGthOM?{gU+7PwuuxR3ZCMjkGzcBhKrja+JXDOuu91w^knD@4RTTs zmp<5sWH6LOq=t7s#Ut`|aB7e{WfZ^Nodc47_4pId+n>Y52_L33Tu=UFE23D+#()BU zQ*5tf10h(ghw{OA=A3E7GuRxp#PQ0DyYF@tk|Bj=n`|2m6 z)?8!zi}}>=T_@$6xus4^Ua5wnPv%y9huJK}I=4PxuA5x*A1fT#X{YRe;og%1WhE2w1arwF7mbZ`fcfWB_QN?7*AA=+KILrf znYMb}{9ZX6+<-6lgnNG1GMTKD>UfQm4`n{du==bgCQ6kHT`sdXxm-`{DsO zYmwF3@P>~+Yo#qkR@ObUFy%^{qShNKa8e7t8HL}$c@_@raOq<7$Fg+ohoyoi3IBQ@?~c-ie^Sh=OuQYG#^5+{E6wi;$@i96GzG2G{?sb?Nyy0EpTxo@>E?7 zl-4kzQ)f#!r43f4TZaU-NH^9$btN`@K4XgRRGjj+UNWE>&Hp=kwq=vX!-%Sb`1^1$ z_UPi%7iBpt+x!WBG$eF7)@c_}RbQCiKCMsx8f#XDu?F!f;J!8h#o}k6DYHU}s_0x% zI$)*?YXJ1Lhdf5i;g$=P#jfAL5_)y17gj4nA-h8Nvotzx`ed^VU@K>($|<*5MF*k> z%Mo*u2b1T|522R%VSIrKDKfuKugB*1NG7ji{8q9^W@W)k!(syE@oAy*9d4e||1OOG zRUCKL+;8*k`-(KKyBfnRZX-ViS{ciuSHeE-N;u7J?rp@;4eZNxgzn`pN$>bf zu~M7-*hx|c(>(uUb z@)ucb-85RFpI}Kt$3)yq9F8v%<#|Wbg>&^o8D3a#gQE7NNtCMRs*43DeiGh9lxKiw z$P|$-@k9K0Q5haV;s<1}bzt*L&I2E)ME3VctfGSNXcNBU5##c?Bedh29fs$zJY%km zKMoW?dbtwbr|;XnPqe>Dq^r5Fnj!xg_z4elr?7W`@RTiCE&LsS{fT)O9*^rs{>T^m zVnhgh2fU(Q(><=^&%ilZ?$`SKcb+`rZUa$g7T)=gc34i`LvS@uXif5i3{KR0zo;x~ z4rcxuPm{OObtTgd>Os%wU1RgjarSgQpR_iQMvPn%5ksp zix!}4W+h~R`fTUwBi?Udyi6);H{XU(0{?b=V0ql`F8v8or8n|X@Ejr%T7+gs7 zkX$Nsn){j9YqXY&@ip2U3zfQx;;`{qWjt~TpH2su%Aqnr0>>L6 z#J1j@M|#vluXbZklvcw){AEP2nqu@9>h7)@JG967v`UF7Un+;qQ2O5J!L>?HVe2?a zT3O)J!mtDSkKRGv=UrXOX3HI8Yc?p=x4#Vu{&R{{3G%PTiZLs-HykX-4=-V+qvH~G zw2fpgLEU0$@~8s8zaR5+BOcv;z{JlTn6q2Wn{IG6w8A|P#6HnlazhX=ASuF`-=R*V zoYs9~AaC|(d`xDE!E`iis_j88|wT%FaS_kGK@oz}H{8?B~7NFO>I zNHogXDswrEPjLIOVUl_P)(Dk#Y5LBLFtb-#J8({J`+I+0*@zgH*3|OUWlz1dS+uSe zP-1B>zT3M#rYLkxnaw@>JHZw=#bG!R!zBQH6_?L%f$m_`d@HRAQ>?=Zm5~n+>d)TO z5i)~$xoOzWbQm6GNWo*(Z~k^=mcO*@7GkI^%9$N(Lz2pL{fOCY);9uS(pCmfh748w zB<1zl2BJA%WJ;y`xUlSvs@Y*2!G()yp+iU@D0m~M-sUT}?B{!$9M>ZQhIH^WVOlN#(0n2N+hW$+uIO41Bwb>S%mzw2rZVZ}BzS5VAyrD;~ zEX9wnJr4am?g5yBgFkSoxA~#2!4(ZN;PKcep*?0{{QS_AQn(}<0Ll*e$*5kcHwf`u z04abOdiZy)jZZBdVwgFr3-w*{i+(b)iQCbeH=LC(YAYLwJ$e{;?r@CF{eN72WmuCB zzyHQaX^;{aqa`GDFiEAQMCoRfQo?8?1|lKdATbma3F!u5N{t2)=^iCLa)9vXIp;jj z?|;s9zqw!BFYoIcpZe+rX){1w?RovA-v#D`p#9FO4z3R0EGcypDt$RDo~cz+IKosc zfv?s**Fs?;3(B*0GVLu^RVlmV8uCSVSQnsFZ3&ZU3n1^Yxn{J-S$V&CD!DUj=#9JA z(KT_o2Ls=>)iJygj5V=;tByX!sPi9n=5?zbiw-w<6m*U7dA&}5no7R=tyQ*eISV%8-~pNMDj;J1GTDA$rg_vv9)I+9~#^kkJJbDan9*1l|M;XPjUx*W+dFE z@}ZW_3)*0ECsWq8W-lgsvB8c_DhpPLm;FiCn2{C)Xhtgn7QT-uSxOWOrvgME^orCN z&@6Y_1_^-jOl!X#oHR+h97~c(egsM;_}EJwmHV6aYc2T^5fWqw3`48fJy7I+mYF++ zyf7aCfd5cOla1tHB_Vf_le-ta+yp!%I*tp9_B-4?kgovMIFgqG9`y4>KR}W9+?cuL z)~mvfo+V5N3M2PAd`K-!T&($#Tfi9-X|qzhIMT$iPt^I|bW5?LZXsy7AmH)T2y^Wl zLVFt$JyP{-wfh8d_vk8#_9vKcGONepuLyUW&r*hCS(pbH(FnBs&3(q5kHjDOlSp89 z6U>U!_SmSUxfJ~%P#M!%F(^6vrFf{dwO4#Lhtt@Po0WbdPFEB4rHrG!FYGbg zUOgmu23F7!e!|kCPHOS~T*{*~`G92-m~ZV%+9E>}X4)9{l3L&wJf}>u#f73L?u90i zXbd~qw%N8d{nckKV3xU^y`vfiC(|hp-7h=Y?1+D*&{E_o@u%nUE%v5YK%_`-Qp@_W zjj#wROGwM)ulG|md9PV*U)i#oScWeeX+O@s;uReQdy;_Py_BZzJt@YbPnrN2-|_%y z7Z>2u_HK)4bBwR;ien6<%~x36bmOD*V%tHf>hEhD9=CtS`RUB_JEX|GE1=QroWUr*cH&jIC(C&2ul74PX&*#8CoKrru-P8T{`Uj$fsyfz9~q>3#bu zB&=w+W@wJcJ(k~XnC-qij5%ZS?^ie|Ycz=uGfp1Q>MaZSHCg@#TCG8+HeVeSD=tNE z&uiU7w5InC!Ruc*t3mYhC+b>n@|m8LHa?%L148ieCNeP#7a*CG5-CiSIOuxQWPSW{EXs~3^? zED#)f{nrr>=@91F4BSS5om2FnUEttl@)#z8Mrn0tnzJZ4Zf;HT;ZF`3d>+RCm1KOs zEDxwX?4S`LN!u>vcoY-v2p(&ICHgCTt7^aec^c2wAcVCLT1gr+PcNwcaG6oQL1&VP zDpoX>YUdoe7k`Z>YE?Ijdg0*qI~Ja&>~}G&?O;)7Wj=(y_-RevM?V&$_a!bNi=`LZgW-#l*K;8c-%zHuJr# zWZwOI7SAFmcAu_kbGdC5{-;L&_gc7^_8UD&?j{s+#@Z7~arDk6!z$Px|Hr7^H2&h- zf@o2mPXK=G)sSF|7F!!wS~&o4f7;+yV{O4W5nKIhI%Z<8AzNh{Wq_~udcz+XpIILn z8=wooa?ydv%z*X|6H@dUMLv~ro{R%+4zhb*sT1(42{{6g4cd_ukJ2KOZ~~oC7Lv#Z z{JtR7S5%E*M4O<$<$ggLtc>~aR+rrpqw&+^+bi}WiY1ceegj|!hNE$1rl~UVw1t1- ze~li*iUW8^cNzB~RFxob@NcY|Jx;)@R_RdT4-k!vV{d&M;U$82$)rV_f`vtb_~v6<`$-&5Jr z7o+SKBiGqQgv{E$XpiBIhOng@U?00;)`Ya9?e1DTud`ml=*TCjJ^m>P+nhGOm-(W@ z9CLFRxBs$A?2WQY10#vgpBTrp8?(iu`Xw`@*9lhv5tYmbg8{ChE~5MU#O~4I>{q34#I?q3?C^s`-M|wJsIT&aQh#sJReti1#E$ykTlkVZbFW`7?F06H zef7q;n*V$H)dq=bfIw-?KEL$T(u685Jz#suLjCfu+v+@|4}OL@eE*6bOTuz5kMp!(EIMnYgkv+!RvkR@G_SZtm0TX5 zl&GVp}j6xmwlWjcbv=6@lBLuIitjAOQ+>BCMU`k%B;=KQoR+!zA6O$ zv(FBzo=wQxe6w>j=o84A{*it7oK2v(PeI!K22US2aR)S2hD607B${q>BnvfRhJ5kS zc#9xA6>JZJKOjs1V}gfe-{AbMLL5B&L4Eyrx{y?Ayv0>Kk~hTb`!j30N?tz-md)-n zY~&GIVRfOqbA9Z3sulcqik_8r)hzE5`GP1E?{~dho?HhT=9o&#sk|Gmj&}vW)R4!$ zR-J!wg}2g)(O3KYK4n_H{^5C{ltiVZRk$SN#gDrKtWB~X3-H}wyQ6PP-KYgoL3tXZjPH?+zhX$!#M`KAtLd~ zg!2T4z-punTEd-Nj9Ae#6#!vLyhYJp^6bc>lvj?8hq1F{Hg{X{G}J-SPci#6G!U(z zD2SvHE2ai;Nl*f)XyWb?ZF_DL<}+covo6vJ&u;mFx`A6jUqwB@2XdM+ZQJK33ch|^ z6Xu8RfU*)43q6whMYEIltdyHUq_FU0ms<}^aYPPMY*9X~%?@nZ$tPhXV7jL&ZAj!! zex~A3^%{IYSb1x)_5~n{=wS+jyW2oFiTN>wB3NAm1?Z8j?viu;Nr}w70_Gj5a-4IK zxQ`hhX=Esxq9ub;nB3pp4&PRCkVosFjbkW9aJ-~DzKddbWI>ebQ6w^4Oi5vTli~#bbARq>)T3={ARymG-i|>8Dn4I!JD(k04W&su zBXhTS9oY4Q`0oL4+aB!UqbHhA!HvTx1L`j^OmjIB(7h_w7C_73sOe_Dj65A@e5{L; zN5o&s+MzVJ59K1ay@SkT4=5DW5osz-%`5&-t_foMUdMaZ0x+WHQLlWK4ik4ztvYWpoY_>BcJT8B0QnYLsDTtu1<4X8z5*~gU*q&?c zv2iVFzy6B)9QyBvsWde73B~K{iv_E-fu@oi@6jBI9fOwiuB`wkSFE`UF+p^0+vAJrS zfc!v-iJ;h2Am>8qcuHQ(?30BkYXs-sdeyE|*0Ue8L6% zSr7yR5$YxH%wM*KRh`DB4axDZL)vpx*vw1s%TNVt#G>5dY7G5%OWLcFS}9&?=fuv2 zb-q#L8Rksrv!+&HZ+d2^$eqnLB>ol?;I+L_&~IB1ZT_T8q|Jd9k2F_p4=gZ*u`=wu zX0(ah!DMIWZ;bh2J&ojUH}P;NEsl!FIlkTF$v+?;K3`a)ZX#x#u6$do;v3kj@E1*X z24b(Qk;f(?jG#&U=m(2j2$1Vbv*14xs(9tE?^}iKl0Cl=@Smnk%%8oW$*xuNX_owa zWVJJO#1)dpSB1K#G z4>pkh(t6Q(k%+z{_YR4xhAl(Fp>{FOr?jEP^7Br4nl?)?i0)t5Lf^-(L&Ft z^J$;PX%Q?E3KERappn`0pvT zQmGTNvDS3O%Kk|XMkdN1QY@wqptT9vch1=T$e*e_0n*5r0Q`1gd)dIHigMu)kHPC% z^-1!ChjjxRbJpH|{mf5Ns{gY;S*N_P&Zp~`znDKwFRf^6Hh1rutqzU9lQk~mFGU?vl%jTA^he`cAFD`aoYk(S@xq@CyQ%+4bnJaD%kOhv)jzgs zV%C7yMD5;*%7UE))!yV6E(fOPYY~F1f|zg`?gqt@q%F>8-+Wv)etzadjlI2qKY!R_ z0)F=bDo!eklWb15?H6(i{;PcS_I4VpAr_3|?JVNyEH2y$j+gRWlKq*-h7$tcLklCZcgE$d!`qR_mPk*x@yjGW$@WA(wMr4%ob zJ*WeWNg@aJ_tk87a$3Pz#0jQ^@*zAVI1?XJ(o=z=X$p13@=On}2-Izz3V|sIKK$u* z5vbrxsA3hp7E1`^g;BE=xfb&p5$jUkU8&=1i z^x=B@W&7HWvyv(-8E-Dt>B5xOj-lX&nEio8N6ruCcz3j-hK^N4@aA86CYkJ{n99Pb zH*4ImN_Vz!lAejz@1>3&yPZhwO*I_E_+M2M{a;lx7OSa%zmQlD-4eWe-gCib_Am4m$ze@j`JX@Ye>VHBmy79d z6@;#@lCPUbHTl^O#;X^8eX}xG%HH496lp4C3%NX4#U!v91$#Nrbg0I$=>grwbC89v z-roU0M`%4rAn$&`8~6ZJODYDH!Ufc5P%v-~5D$PYHb9XLCS7VA>W^4=ZWQu5LO|+g4{ZvZ33%tRkvvHwfivy6ek*~`1o!=jE;nyv@xhp6_D{)YIYf+$%_^h} zzX5gASPmCJCy^j1$zr;4WQA1WZ)Lmq%ZbMT_B$=m#UhN3-J{yae7`DG2*wuyHC}eY zfR_&e%wwqj8dieCC=HC_Wf+HZ2zKN}Wc%2Ir&-(EuR`DL04D-NT{pAYXGw(DJyO;P zXma-sQM02~34itLT>WA@J~F^I?+X#^Q`$X5w;dGenxK~&7{kwGt%!9}#&W8SSHLX7 z^)HNMF8i1D>(v+$qqKNDJqk;{8PiQTxduA#!{?9_US`Gv7D+!@DyNt9Wcl21HVksM zoZpQH#bYGaZo^WH?eoU_oMzry^g4{9eMzpxGG;wpw7}XOmfijJ-Mc}-!>=~;hGUYz$2Dom$2H(DhmnX?+? zc7B@w+7p)$?}+7tD~e}}H+B}ljqnwI##B7>vUaM0A|f}AprZb0*#yXEjsq)jOO*cG z);IULEtR8aC;ByOix*6ToQy-_izsbN($Wo8RKyVPhPzI^9N#3hj$b#8s$;0bwuA(3v{q@YeEX46w>?{Y zV}>qg%6B`g;LT{NwamT@xkwsj(_GF#t?J&`K16*TLzp-`DEN~ha`y5)sMY7wo-EwU zkd_}h7$MQHB4(L(1CN;4wKfv=(>}hVr{bwLpTN(Bdd5yLJIf~LYQos-V1CE1pP?cM z>*fMA=<1=#@g4j~3qt4QBmZH1d-KMv53vh}8V0SFe)n4P-s1lDXVRtPJl82)%Yp1v&b}R?}2j{DU)T zgFyNq(Ra*H#h#H)r~k9l`>{Nz{6yE$h97*D-XVWhh3JJjrj89iED|=6`Isz=98F+BWyxx;YlwScPLzr_$q8}iZM3u^81nA6 z8l$yXa`arC^Fyv@$*q7OMQt?hS013Ww?zo>xA+B#ag-L};%~;eM1z}rGexc{a`WSl zWiLKkD2hCz08C2*zbS6W63~-wNmE;wJ8lv;YB5ELp?yw;=@RE2lP_x;uHc%8sE}p# zljt;tLBuTjNH%~_af<*me~j%`Qz1}be_Q3_irhkaEP8?~A~Wr5(3618c=AHg7&({T zd9SrE~7SGL+oc!kw*Y6=#Y61Wb4yukB;ql-bPQ?E@M5-_cP{d z6M}6<3;mZXsO~M36?>8x<4f{HD(SVD?GIs=hS2^MrO*+i7xc5{U*;A(A;xOXfOjQD zLxLxvzIR1V0`}uMO3^QEF*ch@@PoTW;yTUHWyN_Y5oiVL8*gSq?wLi2Z#X@iRKWqp z?hyidf==pR{_VL0oE|HYoEusQ^*Yr3oXo65!O%rJYhVYx4yse}1F#vZ z08jS8?-lLl6(!n>z~F4#cL)-E<_IrpM8T{SV6BsFs}5HTjv@9U!3_8L-X!SjK7(Ah zmv3cP>^~qkovs7)>Q1F!O{LE#Y9-AG>J59y&W&bwe?)}&JC{X$&@eq_Z{FMT5pCJV zy+{1GK97_ruPD;W6Wg`FjFGH-|2}AMb(P%n+K2mt$koWEy{WD*_$QjKL1XM67HaB& z*wbj6NRD8?rrqYPK6|4D{gX+12bQTZvr@Teh1F1#Vg_3Kp$=N9Z?B7@KA9tAszeCh zN{b75M^n=ZcfN>XP4f!tti2DPi1l5405`M~#Y%{$G1Fxcnh=Rwxe2r>f2`tEk6-fS z;rtuJO5~3a_COfcR|_aH@Wa9dt(TRm;BJQ12VECY`pesR@sOo%C!1A6Pv|W;O#^m)zsjo<`e9$!b(pe`$$RsXqhAG}; zhV>X@;`0kkpBeIeelFg0fgr-uB2s6c#P0d}g_ca(m5x`^QoiO|_$i;2=#GWvt@=V) z4wzj1BZ9CR@{TCkHOscR?IOxHF5jU+=ksLWn1wiYy_mMvjtbfhC;P9v61xF}%kQ44 zQc%6ti)l3&;?q^uO6lkuL#$9;U|!E2<^*Hu)H z_wB}u99;qrqz``^;5dS1#|e5c%ML5)JqXyWk2Vy}Hs zTNcQWlF2I21OL0B%|QNqG$DNt<)ex}e3kpWFOhb2kQHDm_=HoQ^DQU55rCTTP0Ix! zy!j9NzLc*xqS+R2a}pUxvDxtpgfS-4M3HuutGSnv!AX!h<063jVl4Tj)-{vXOk(a* zqSi2$d0AehZN}-n#{hUrDlsWv``j=iHCz9>C$fb7_fioK$}+yJHS^Y4{)4>po2i64E4w>#itM=-07xO z3tF$4$kc2Scv4++5=$EzA4C%)OMagy{(Qh7VSc+Lz=Z9?8*?5&-xx>Jjdz#I9eH!> zxC(tO>-6mPIWQ1ZcdSxR)K7pV;xV&(`nErb2J?iGa-%;iVjb9Zq>9?63M7%8`4o3l z6X<%h6SU|9=B_!MOiUAn+-*v@+pVxA&gJv+^oZl{i`XbzQir1~wa3XyGHc|O@2{Hx zvl<(4M;_txVWRhChMz;o!HOY^P!rlT%o7sD{a@_Jg&XSWF~(&ZyShc99n;0ztX7Xm z2&!9}E2A+Mx*jbpahnt8BDDYnwu8n8Pi1}5wU2CRsr`l%lIZYZK1oeyu$gj&AuoGj zf_<_D7DiSC@L7wAoK_l1@Y;s%L%3uoH0C*$uzQRHwN~V9)L7ur9nN8|B6~PGE9P)* z^n;Uby>s8295TJn-W@E`z7@HX1|rKii&8|RR_j*j==)$Nc{JjgnmGbjp%c-zqM2r4uD4@jEreeR_wwJrRO1padN&2Gn(VUYT_3L%$yu1ov(pZq>Bp zx=ykBnKtbCCjDfR6xgfxS`4Um%IV^-%hvuOx0gF|R9=Fy#c8au^F&b9@mTWoh*x2; zCUEc&cT3(W=UgfnMji4ieO5xs-QGa7lhZb3YrI64oexq0k<3bbgH4%UHrs{_^Cjd! zG+yE!`-B(y{qFKjTC(Pc_VZLFg^W@DEq${8s9ni+eXko67tIETf(bkD+B0?Pp|Y+8&Zc?(?OFeYT*?g&S%u zSyP^be5q#lP^~%Lvnty>I*)Fp>Se#&B;DM1>7(52?~_pp6WsS?o-xgE3*MoA?VK@i z)Hl>^pO;0Ag*r4QLI+vvOId$67x1nouYHKQY_mOUgb}=6G|H>{PTAej?z0^OOByFx zjoaK3yN}?{N}{|GSO368Cj?dww5;S+Z^e)GVQA+v`qnOOf1Uqh^O}r!;C`{$o zp<+GKfFHQKfI?Bi+xZ_>$mXp{8s>_fZ$g?(AUqO`0H(@V0I`<}Qsc+lb)*YGN6~fn zHxVD7z`gI!TW09PwLfP4QI4k^VXAz(a2)kc52)p&;QJZ`O0of@GK!ib?N>(Jz}y>| zs+}}wPO%5yp9Vp`LfQfJf4QaDHzM0;-p+yu*7dj*T@|^XPG$;jT)422ko_v8jsXI! z5g=}7qUxP6;Uc+V(94}NRRAcEpw;3Af445Rl?$!4`=WM687&SpsspmsS%Wh9Vsg8- zZIS?_6cGMW$BmBN>sI7%uOZ#oe)u~TrgKo|v`w}Qik&k>nRn5}kf=b$R`N<#u#zse zIrq00ajioS&)nu17b#c%I?Y>iWPo)g`&Fr755cpqU+VedZ*0Eee@)K5v|wZa(|%J! zVbr-!s9`{G++Ghg@!2-1ighiDcBobHTF@7!T15TG)nsF$#cOE7=|Oc~rVO?S)V6~w zdkZ)r{>=T57dU*@>YSG;dYO@P)8WQ!=#g?Pf48R|PNYYMRLSAlI+A22lfKLLSWc4^ z9>zS`H)lIp2zq>EU!}P!I=rXAi%Ae!AYv3BkUX3pg)kCf>?!cF3K0HhZ0OmlLVv=z zwtHBEu&KqfUGcpsgK;2MSl-3O<%bcTJDH6R^_FnOcD;GzOe?V5;_rx!A*ZBoZOkvS z{i5q>QTm6|g)#(n;3YW!#ru63a+33da>rPu77fLVHx1vKx7)9;R$pCRw+jWy74)TQ zu-EuNuJz8O`h2b~`}>$G{8~*zQ`LwQnhK8F@ z2ma2_P=vRLuALj)%kLWhsH{&j8%n|XDte-G7Y;utJ2TDUqkiV^JHL`NJmsFKd>8iq zOkb97{<8-~KY>lM#F@;4Vqdcas#3A-yOjX_y8x6;onz#jwX zDME!WBX>5R=_nemd>H1rbHK{_4pVV6Ajm|mbnOO)TdYoFKlmabGffyY2kfz2J;EK+ zpRiG#y#G5DH(j;l+%1%_w0`Q{$tka2T2UI@DjbLuZko~x?yx%A@jMK*`eQb~1cOnv z92hhuu+0o3FtbMvIo+Zhr3~}w-UjhTk%Jo~NyT)9PZAlHq!7NaXLG+AM7#uimr1&N z(vMuZvxPK*1g)#o3{(64i;~mDU%n};yp4TIC|1~bkX!p!iIPp03)0uzZ-S&+dl z8=0ihFm_I0T;wa`)=78S&%d5wCebXd>lgMv_95sWvegfpVb>}B!$MY91EZ@&4c8xA z10r|&za+7ndDes+wtE40P4Bngq^(p}dv%Si%C)qXwtAmuTLu(-efw{E^mC}uQ&3H? zzX!e)73dm7y%0FDkov~44e$TjrL)p#gy!q!KcD{V?VbThA?PR`F@LGqKe=SvcSEBa z@WuV<15%CvE&P_=u7=a=+}(ry$&}cNbWGjL^Dtz2P*OzT01&lix9;EVE+Zh+nx*jr z!AmkhVlsXK*CKA{B?D>_MORLsU(_Ta76h1nSz>_%ThogZ-Mtm1d~{Ibi);i0F5bh% zz9q4KFv3JihW53K@WtH)LJEC$9@m@6hyvE@$O)L`YK5?>pG|?z`p9%7uB?%0q|BgP zzv#|_?wy*MhdmT%Udd$AXK5=`VB)9*N}A^=jmO1>rMnvxXyN@a9Vdw32wfAQb=-d4 ze$(VQC6e$ltE4q6T3u?lI7}IZei<$Vxl5235~Xbi${`pAj-I=7~b;Mz;Y*scMOvV>zHIbs0tmOewWY+vZd}0x5{H5qiig$!?~Fxq4rIm@C@X@maegPMb z)=hZs+1Aw>hxe#nXa_x z77{!8e4?H^vm|?W-`donN8U^+<9X~r0n{f~2R5R6BG1O2|HhTLMqwMoN= zqAR0J*-2xwTtz4M%Fje3sBsnn4U-SxFNC%BlVsyl%NT)F)6O7@Pp$vf0`wq8zP}Q( zy4pqGVdEOunpS0(=dXyy*@J4hIc|%#w~`|K%@=nPG7x*uKWz*8Zjv%t z){j0GzNb1UI0hdr`^Z~=b1=*-$?}hCLiwFB%N6bjgH0A26?2jL)JU#r%qUhNiv76p zs65PgPXE9~zbO3`#+SyVb108i)bQs}&7(Vgf6SMSs-ur(9bJ|mRlkNNG}4c~Q|JmK zeRWKFoR-uc#ZYHUADW}4u&m+=@$BWriXy;OZKugk-$LM88utqn3~K)zq}y_fs%CP< zOj>{!^Vt5B^K+vT7xd|#8BuDs3FxX->%Dw(?O*m`y=&~q7W77(KU!n{JKxyqZ_O~j zj2^-2;uD(b#H89K3w#!(>@<^14RBU5uVt^(PgS-Y_g?k}t}7(5<2T5wwHer6ZM)EjQ*24M%(?L-19Uyc~#TVeL#LxBY$oY#t-rsqs!phA{r z8i2IJc*=m9^>))&kBi`%UK)+Dw*x*iwo6usU%v$KHY~eUk>qW*k`p;GSH+iesNk}j zp?DWp!ZMRFT2K#$PbRHj+2F^MFD7QT_&H?SW5LsDrwiL5Q||%)9woFv*G0rvD`{4D zCuIFP&Z*_k-uq^>2X2J~TN*ro8z1KX?~;5|lr2F~HvD;haM;E-4grNc?*N$ZZv}aoTHnxkXOb(^tT3r0WXc|U9LUZ%2Fhgwm! z_`$&Y|J(ASzUWY_JLlX#!W_gd$~ zXwaLf$;K)D2gtIsx3Ts)Ug&g2+uEhW+;f8Mx~H8+R5^D7IAwx3%{W($U%*Ar^>?Ck z0?}`0yH(=i=BBb3?wE=(P$u3ML#eApIa;tv=UmNEu z{#m|}kP9{8gA7BJmir5&@d~dii}5z`E&7yN%h?M%Cx&0+1J~%;e`X8b+P2%xT4CfI z2nuTYNqh+y(LHINoEXfOoqxNRDJgOoJ~t^Xnnt@Za;jsfj3r;YgTH6`>z%!`txcrG z#@-jHp}Udn!G8EltE-#(CRgpE;BQU;)~b)AwuyS)_^Klix*$wowGC@dq+H9Ego<7AwhS8%jFXvb*c5WKCf)R9NcxIXixQoL|-~&bq2an+ee> z$FHjM)zu&Av0jw`Q{A&XRL3$zIdluCCHIe_X7#A*we08XY)J6LuvKnss$*}_BpFas z{{G%5#EkQ8v1(WMvGjNT0!kh*o(6YEA({K9y_@1ey!3qIY97W2$5S6p@+&7$TOh$Z zU@ZRN(7p+E(C>@s+_R`8XbLL?F9}P9kCMx(Q0Y=#iJdlY3J3CJJK}1D6Y2lvHiS7ygWX zR)UxRNf>?@@T_CLL4ZF&?%QtCS^U$utn%c`PeBiHL6FF_3rAk`XvfD_%J;6cn7Yvm zlSsK^fjwM%lXFV4|M8!yq%9cE0Bn+=J0~K>#4x(MCHOn#t!UQRQ8~M3lul5_$V^;9 zgoW$n2l@;=KU3NT?m4We9$Suh^w7qU@IDNNQcgayDEbL)H%!xN3;3(zn67Yjin=Q` z>ti(K9a;RzQQrvhS`!aC2uV7OwvXv=8G0B{pxYk2kvxmiU(FZ%Ke#UXcUh>d zLrC-Nai{16wO*Z=K}X)6Q)lJZDf;Qj|AX_=QV!(FwdCyQh6s~fsaz?T2&7R22+kV?6TV`ZEqpJC%kH(r-`By3#jNd1@`PbX7XWkCbxdyb3SS!@_r=A(IQ?1paXLEoDhBrW_~|j zo#;;!i5!a3LTcc+Ds1?Z&#~WFv`=wpPb4Z3HZb zdmfY&z3rl(V6@|tb|G9^#`0R^jJfm{;3%B18wKp2=H|9iZN0zNpu4A_6TUsK$<9)- ze(xwTbl8+xjJSFD38>p~-t@}{Nd{bPha02S85>QGcD1RI$Zt-*pLOz^w-eo}tUGSe z1!h)kQa4ij@wXE_jFAp}ckfask~LYKR%NpdtWhm-G!%g;z5K_!N9}0Nt@C&E3*#}a#HI2(G3vz3hH9{Tho88h=@m9DQ zkY!7d|LN;_qqcn}tGnOKVB#+0)8ng)lJi#S6_$Fg2Nj_LGZ@qabl{(UfA1OSziu(P zDqNT}In2x1FBe9C^3{43N9NfoVtU{&Cc#lJ9{*>hAi`#lrpcc@!>M7^o~t4c)Nt5iBV0&P$=HpKhg;U z52_!C0lTW}<(h^G<*6+n#`~YY8yNoNnE9 z#ADa31?kBjvT3LqPIIOB?KZYsU|adEF=B=Du>|?mQm?&3rx1fpeY?C_UAn^c z?iAHyRx>-Yt2!NOH#B!3>XW z-}T@<)Rex%fS9CT+{vQAKEVQOQLc|E9?S7puj#7DH_brqd0)1J3wvj!mBR5;e6FW_ z+vq~8C5W%ZT@_B%Avms?0)2n+DGH#o!_~UcQEp=GK3Ck_YDJZgvM54_>;U6|)pkea zL{thOwou%2Rgx--FR|Ni%HT@`BThiVCBkTuD5o;CQN}Rx@cgrA=lwlXb3l4B=q`mH zaXkT7%{gNtBPL;h1oHq&2!)7RY>?b_J(c?lT8sl>Zf$$Za^c9`ANURuyuPPg2~3r|b61;WcP!T^3~|FMVgUMoNqFv%Sb@(`w1O0{hM_3YvhZyhGetHFf!GKdG z9LCZ&*(&wqQ=?go@6Mis+h4j>Hrc8U@YYLFPqBpd5lwzp81`52ZDFNx<@wm`;P+`YG1yrcFKsH_I>FS4?uT?=!S4P?BMZ_lus-H4y zoeeyi6Y+BWlIjpYN-a_B%i$;3)*d@ov)gp!>M`gWMkIBJzFd4AxUkOH6U*=0Zw8QH z++iv3PpP-%E|gyICDGlwK9mUliN%?a4s-9^VL{*R1^EMp_tdzMNPbVhwCiJ4fnyyCP^&4bc{XJ_V?0zULT#eO_j^jz;A zx#rOx#%TUk+Mv1M%7d6uvfs{D!^R*)t0b~I&v3WI>n2CLIq)%!`lynuXq(j>DDPC6 zvVt8=LrbE)c@z(##$HG>OIi>kpf2WBI@kQjSsB}mqdj;qmsn+3o}K`)h@<-E|J(Ci zc>u1|e2{fZ8|UOY)A~r%g@4F!ywot#@GFJM=hxJuI@yTY*{7*#&7Sl`>=@dq%;42H zyE6a1zI_&iGlxLbH#~E;lakJWls*!!lwFEV*(ZBAz@|cFlR^eZwPvw-p|7SBGqy5N z@5ZK}hmeEz$vlNS&%vU7)MxLsEzeUsiciW!VI*NRf4{uXWew4nwsqzy`&DW#E!q?( z-cL@Q2fl*t9Y7j4b$3fF&ZS5Ur=H%I6$^h~;WJ)?Mdq>BSqLZuab3>KE{#du6JE_K zU(T2vYg{!)+%1${fvUyL$SODE2Il986{@+Nd*2SK_%~lq^{euRm&$(rB|X^)ZfX?7 z1bgjzcEbF>d5p&?3W1ocbsv_DcjS?=S0waPi_!&IM^VxK9Wb(_#1zJ|BsCC$94Uhvmc?9+(sei8F4K4I8)RtFGSj#V6 z%gxQ*WMk~wvoXPXOqsuIJ<|BR7qs_!bqsZ-l_j$`RK0T4YrmZNb>&Of4gF=x9R@A` z?Pf$|*@ErCyV=!yzY$lln)Uk(f)7LWecyWOiF z{U=i9zbyqW;&-I4FAw{B`BCQbeptO#6345`z=KZDKHE}NeaNJU5fhSP6X;GpN%9z^ z!k!IW1Q`<~14EH65Q{m3Z-S&c7$!v+z+5i4la^+f$S+}JxkPQ>TPBb80gbge@@Y7) zGB^knVKEKZCP==?eS|P0*9puo$_(#s#SheIjwgkoatTXgRFzi1wBw(JKt?t}!yBlp$o>}u+ z4rcZXd%xjT3#^aDwahGlb_s%K{Q($=yQ};tDGGG>026YAA+F{OLP}(_c z<}xLa9qjuCZscb}c8-|idsY%Rco79l7tF`vL5GfzXCpO93yCk*71%i{BN1L|5wMeC zyZaET<6B`Nj#TS-sY*;S^rlYG>34)+5I1M|Da_)!)ixW&?!=wJ8uFi}Id$Kq3oSgO{+L-gwUX~lM2j(e zmuIlj6kiFaE^koGCD7|vCD3-?jw62Q2wKoeEn_{9jrtN2P%S3zTF&nY zjvRQs#)wKi7&NG7CP~1n=eu)ZRyKv#^O=|+z)leIuH%C>5A5k4RO~V4_zXY-$ zhn2hax;OOPGr+y&ebF%pcL1DUD&S(~Er@bZX$&aZBW$%~-HckP) z_X^_LNx(H}&qqNS&g*y~a_0HdwszGau)Fo(ASIBZ*52%I>)!7&jfRQu0qgSVy$yXRe!KYH6TnZIeX;{wvN4f z*Vn@sh02aEHQ?6w@^Q=VC~cacwYLk#)6w9U1Exi0FbLgM8>m^$+2$xAYE%>5J{DN= zhpesL0MvUtY9CUJS=+cRpZDJx9g3UeNE48XrE4s_t#I-DXP4FG^3jF574q?Y&0O*? z>8}1T3emq^m;sqpWm})!hl(DH-VLNyRfxR%&vokGPyb&&_2muT{HZt{_1Z$rLw}1w zDjzE#z=aV!ISPLc|6BG}wdG(7@C8^|dJf(~W)O-gu6%=UQPsc`x=FiUOD(!;Ns>L+ z^TEG}4AhI13VYX$MgAMlDnbP4CanX=BxgK1v@DDjRWS0_XTE28(c|i)U19Yrg;t@ zv;lf!cLS3FFd`y=A($iE3A z@YDOv7@}k@E!GAo)a#KApTrx>$pW~7?@eV&&NV;ZX)rt9yGD(N=nIa4pzCDPcF}xX4r^A}AnZ?>4BjJsu#bK3 zh&!sqpJi16a&9X5iDa)>$d+WZSMCKQe9UmJvvAm=;cw3)+863cMFO@jp$A0=Z&O`P zzx}Di3KJ-RY{oz?Idg#p$NUI9hx53A*Ng-XIe&?cnMt&n&C;9rtSyP6nOwF4YA;A@ zA`}(Qs`@H;h{hO?Vmh1q5OJ8lRD3Vi<7t{aImPef*t~(8TimpKiI6o0)B6~@2^bZ&uj~Du?GtH=n&e602Q-6oL(sY=^2=GZDu8du_eYp)|l?mZZ|{MXr*zeLfvUx&|?P4qVj>Zom`h6 z1-${&{f8!}uZh zC*cJ_J_8KgZF?%zJ${gN*g3dmacSDJH6`KXhMb%@}1{&gP zpFF#-p{Bv!>yB$EA^Vh!RRvwkhv~KkPqC=p4BKjTCHhED&Nu8Lq72~zM0f;@_Z=b= zoT8c3@|f)M!E&@i5m{b_Sos&w`RV@6zs%V>(@T8rffflG*53#Dw2y(~% zVY>G-R&L>rei95+;=!WB&pZn8{nL0$f!yZ1^*J*A#VL3MBr>ESPl%4mcA-(OB|)fn zmTtG~?iRzMnR*=F1`ZttLf(^ zHDAsGdvCYo;5VkGul=Hf*CiyzndoQOxNFC~^j{2p;5!~od=dCwVTCO_oiop>C9f2X18GuDFTS+h&p`JigDPxhkP)|w^ zW~-PB3saGaqxV!S(+{Cjv3zEy1~oNeCOPlqlln#-hw!LtAS<6FH82&O^A~KD!Zfb4 zPhORgpT5ts^2Pbo3Vi*l`?Ob4@ix-2?(=ga3e)aBG^=m2Fe0ND|Cbi@z7 zyTq#TWP#*F%!>cfH;;=2I`YQu0W@bPOjGhYn!T{xl<)c5*1T_j&yizj3y1@3N9)-gb>G?-ua)9UtW1EVph zq|DzKA1xF*KsQeWb)J)!d>;{8)t-?xwWSvyEaf2rLg!yK!wgReo<(Y%>RHvTcn2SK(KzrQTME++eB9(f+lc&f?&v+l4-r zyW0*}gMC+2W`-UVc%t{9rc3?Rd*P{>ac%xx#I1xA|GWM2?>3=&&uMwyYm|2hdRkw* z^cTc6ofBd#em-wxd(v+yc27QmTLOmIOo97wedw(4}GB&m}J{-R65>L|H4BbV`Ec3}K!DM&L?fbquW4 zE(4bFOtHihSKxKM%Shl2ure_y!MQ){4*Nyu;>oM{%f45$9a!J3CQB_re zCXgUkS5Pi&8Yb=Zw&`No%<_dqnMexln^gCEf_5eLa}%3qc!2s3vXzZMZ55xA4dp1xF(v||ie&;WEj|c4o^SNDKg_?@s4i}sC|GWe489>O1I5D4c z4ZU0RdP*s8mzgY^Kajtdm#z^&X@S;BZw|-VE^BBhwWU0ZB-Veh$Sr(n=b#xS&)~r` zBF@#24$XcwO;h`4((F_KvIc^c ziD(gqCtFcO@$q7fNh}y8954K^(A~548?6gq7W%PILo*!E`b_hfj?zmF))FQa-*<{w z)d=h}92TVIxvCMuVglVWIM=R8l&2V{&t$5$c|&enMpLZgZf*pvE~*8+uH6LP+Cn^M z`~Ne||BQ#%j7R=6_;Sc1wQWBrkB2MB_OL(a5w=9F=`QEgFzarjQSC%svo?kwK}8kC z(DR{R#S^P0fIV!$nvqqM&+7SQ9MT||)qPO4e2u_cfWykzEk|a4EY6UuhU-`*GY$2U zK-!b_C|@M8%cXuiZiIWqNj$vSLL$zQEz>ulzB`J3Gzxp|Ny`AhD7t+mq?TnRK}Um? z`P5B+dhi}P3|;AqxexyK!A^Z(!}?XzL)gsD*5;_CbiOL{;i^Z;Z~1d8Yk`uxnU`5A zxmLyj;f#IF-Gh)c>=LYXrl6rO=zdx8$lJ9^H0A22>#>i1=XA9493FygkpXK#yFHa? z4uL9?M=i?l%F*Jk{Q~{+ImQcb{Wl-SBTve&ZC;XpvL9h<5RMv-(=k9{?@vjr050^}4iZ16+aV z&P-05{3H4fD8?ld>e##sSn&B0_Jj7iRUR2I)5KmZ|`BblC zVX3_T-QUzjzb4Gh`m?}h-Nn8>z9Wv zbu8YJh7Yy~Ju)JALgv!2Di&`KDB$xbSZD?G0=J~) z&w=$_{>%K9pmCz=2RQhaSWpW61Xw#h+!rWx>_EZBC(T?-o5y5H_zAlbODNf9Egc_F zHzegsAI7@RMflRE+O6g%3jpw9Ko{VN#*zdU17s}Oo>6y|fbyQ?QD638z576f5|8GV zerqW#O&`!J;Z3NdpzRU!faO)iu#KoMzC0|)6>tH6iz5c+#Y)7*x|zA$Hh-52*tnM% zR2=23hf!I(V#Br`80;Iv=fD-1CMfW`+nct*#P9IyE;IP^Jc&@lXgq*D^%(ryeK5Ur zAb`6%S1kZjv@xKg>@xR(8NLs{Yh4XdLcYJePJk%SW&3{;1ByBqlrt%;pRXM4I*-v zos}KmHSB)9CPC)qmL3-Qr^nyh^A&?lTCaevph{Gvv3<(FpifhO!7jCtC`047 zIgBOCN1#XE*|yQfNm#P}4ie-C+>x@c8Sqo4q`tJQ$ZO(3n2?Vl#|#5G4ZF~0YuCN7 z-huPjkD~?wc=&%OSqhy|-^SPCq1Qd=fz`ODQmh|SGA17hxJcj0xcvCfsdcd;k!%Tc zY}pRoQ8}p2L8orH^W2l-=esE}xeI%Zb8fb`n9H@IHqeu=u1utl#7s2w#j#E7ylyeyD?|Cn*cmGMxLl4{0m#ZBOKQee0L3fg$!8|!s+$C_~p zV;GMP3-gGWAp=_AsPJsy>4uD9N#C}3PEgqDZwP}GEIKRnN%(TO;%%t_T z`8oc@E0~o`VPX)ncH66d-f#De^u1*3Bw1jj`vsqJNIvw13>zy{>N6Qe4S$YfvTUU0uRn;B%%_n+zNzn-d^@fJ^iejTD3 z#g%hB8Pl@eR~+RH!xxWuV`(1o+CIxeK^%P#`dq8-yxlLabs3A8m}GKRbdo;PBkS{^ z>MEcAK$Osi2?dBPiL>_N@4hSt;9-%3Cf<{%A4>C8kA8RsT*h7R2vAAz0;G3I00ga) z7Aj~SY$wJ!U-^B2fsiD>?-L9NLPw-utY4ZH%p?^3mVjup9)j7sjR_MrdTgX_scHRD0DjPpoe_IK1K zS-O%zmkAs)!I1H|39jL4{P0`fF0Y9Z&Ek1AAvv%D7l4)p9sW-~5- z`Pl)_f%$MuFq2cau=L3V@onXT0M&#k;1268R+rRARGMHNEA|jv`v%);D;hD3`&1b~ zxEaM(bRhqzH2#}&P8jogk@|a%Re1;Pibzw6s2B@CaY8qs`_>Uepr-#08{nDppdVo~ z%m7`$E+A4!pv|LsqnU|}>15p_EsJmgT!s}{x+Oxrfdfw`^Zrwjd!b(r?naA{a{ zI@&MiP!=tx9y6}Sd$a4F@}s;0Vylo$14 z2-rr!QvqVq=x$1-4-^gQ%xt=m$M0`uv`{|S6W)i=W>j&88@?G4Ddl2C9DV^kd~19`5UYly=$xQF2}+ zyn(Gr<-{?6{u~M`sLZGJ9&Ybn5>ri)^CCa2>#PtE}y^|w!@fhI~@Nu(as93tGJ1VQnxd}mDfAnjZbt>z*r*A!qj|*hg z-(Q5hs2|b*!X9inoyLiWcK3wK+58QwzC)I|ICq16lZQ5Tt>l9uMRq%7i>&5q)-H_t zO+!LNp$8y_EX@_!ZE0C+o!ajgnh{TT4MaH6q;1494{E}4OG$z$CDPQ-^HnU=J$u*) z=@9mJEot#Ta#u5}uV>!6*q>J)AeSWHqUg_+Y5w}Dd|Iw*3H&vh{A5l1ekp;4^^ChS z!fc37(U?`}c%td-LRg9a9FejSmfmB3|DkZ^XOfX(jwSE%*U=cYBiBopb*CH*V0HN; zxC0z`B+M3aNEI6ya(6q=6LM_3TK8w>rZ7L#1HI8;5TR?+pTj3odz8q~snCW|@_eed z5+jqTq`m8dMoAo=W86sD2YPD4pJCq;OvLT{t;vwKUsxc*K4g^2w-3~6-Een)_e5dp z8OdASI6>6UYbmCY=4a=ThJ_P?nM?i`EI#VBpcSJ64v5;g)K1hze_yrR)w;EzP_B{t zEmeR~NUJ>l5HDURqW$cN&EOQYiNbQLPKw_8|YA{BKk~HQ}m*HiZv1U zW|956_G-C!(`)jxW_A*?$Tqi5_niohP)_Tx6M!q(H6~m<@m+j%B0TM$*NAvpT8OY%M;Ne$b4;vp6)Lyj_D^ zi3&?hq3xok_ru4d^LHmhlPl$QzoObe!Gf)RTMs)VrfaF7XQFp^E&wV!1Zd8*vk(7~ zxcg4B)etq}0_xoSw)I~}RRt>rhl>!yud;li$QQE|+iL4U2AzA8i-c7yK(zPZmXLU^+$GAtCkRZAs&zPCja%d|2pz2SfGzM3Yu?u5dp{X(6LkMxm<=FACv1< zx?hUh$Gr^hF*}uQe7*}lP4f4Q&(h45o~={D-!D_K%pY@=NaGkP;>a4xJOcIt147>b zUga4-esgJ7_%s8da)x~fERuc+eq_lQx*EM8gxpe@`DE>xX>wvLdjY5h>QYE07y>>@ zJEO>#pS-Ek<-o$hMkfA&`vqe$0}uD9nqLtTkA1VE^(0InUEXlO$#Vrx#S%*I2Hd2o z=_)b+pwrvUY!Q}>d`6wt&engRUE}Eqq42c`>0XQ4s@Ql*$ttpQpjYTfmlwRi#UsyN zZZg25=cRoQKVQdlNYkaS$8t@W^|78rp*_43t`kZE<{qPNR?~pn?6D3SL62on6vIkJ zd)Z!NGH2Y>bZk-Qp;i9- znW%B2OUAvqoK3YwHwD{p$NmM2NnN`++yl@E&=fb8T~<1LpCv^$jO&lxkE>1|oj@Ff z*N?Y9VF9QGJh%kw0_z8e4~gbEk14@444>^wLsJ^;s+OOc}qG@srBnv z+6N`&tW$e|D?AKJ9B56k)~9IP58>W9Pwhwr>|;^yv-7{OYoCg_mrOgHUAH#7#WQ&9 zvxgD|*Vl8riB)*v;3sLdKR4{Gs9Tb1YG#k82JGXGst-2J=s7cHm#TixR^tV?mQ<$wy3sN{wgvL_;ePx2uOf8(Sd~2}ftU!u2GulglhCheddD*@X1B#PpU8Qlp2j>jcL?>}>~&JoeOKNjTD*vo*;&81nlY zc2s;anyY;i>DsP8;%*g^_H}9IuM#+dn@=zO!p&vGGq7DnHG-3X?C0u34lS!qX|NF0UdsQq8^UXxqUB;j&bU@ zk%t6!YPXm$8EvAdPEsy;>*E>|$K*6T5r15G2ij(!c|gTG{S&s-s;2uLL#AHkRP;~1 zPL(6P2-(@~3#J&=^cTp!@*3vASJ;?p82%KJexS$^?s!v$j3$T(Q@ zS^J+-|L>Z;tn)NTrBh}6xw89N~OIgB%uWI+42y_)bGk&Ow|iwa5UEgU?6aoiwc+!dV-k7 zXNkQ|a5hgDgL0^6QgQpbF_j66D4(~ByK0%<72~f&Ld~h>g4W8$k%fx&#?OVA{jb&x zn=NIYJCAdnA=jM88&uQeErm2@a=A!S2IZB6={fL{)6)Xy1TlhWz0OfR^ik zhJrNJvuZrJa~^vDP_iN!0x0j&>+%ivos6MpQ3sbV`vuyKwwF{W1rRDa{UuN zBjEJ=o4?omd%C7P*XNCH%v2oevCgBQhxp8a4&`-gafGI)S$UL)4--_1;6ba8xtFVs z2v{zd9f-+`!@R>|hBd+8#dQ6tsI+Fv0B!5GvBRRFtqp2!==`}g0<}CoN`?fG40%Az z;+AS^bH}n!UbDwMtZzgBl^&KfkB>nuBVoB=*5mmckvNV_wtTFXiIJK6j$bCi@)~gF|OBcN4XA-PGA4|H*pd2 zq)}cRJD>L?p68y0wvf!7PS-2KJ3hTpNuN z#d9*6yBn>cDXb^D7!Uq@#m8ocoa8$*Y9?pD&b-C9I}$vQzNRnFxR)Z-Q%Xhu#1Sj{ zYdtlPrC^AWibVFTTS1SlFZg#Q*cv|ZtX{Ny$avV}Zke1mC|@{84<)GyTl#L`U+s%B zDd9uG8uJ9U!kBX6OmfXtF&VVuwT}B?>fBp!-s}j^S3cbiJljJi-j?D~3oCNk9~z%D@S0YkIkjYIYF;)Bb= zoN0cL%_{SZ-%1)AsB<;x^rHaX&{R+@XozfsJK=TB-6idt2fi;XRDCRO{g0SVzFV+B zA!^FuR!eCGjPkq#;Mt8;Rs9uf=Xi_d+?K1w=EUIo68D{U&o&-1L^JH7^om#h`8`hO zsyp{+)c(V65A7G7zyb~4>u#excuQ@l?D* zZ!!K#M$Efp)=es5V{>pv$FuEzb}0K;z9?!=c*!Iev!`3wo|Ihi=*)_ad#V5KTv@%&fpc!|g5KZm9GZ_y4xtT_M_7pbR?SDeJV}Kch>{%?D9*XSt@~r4|3~>i=oayJ}fO?U!JW z5jSMr)kIBFJ{frOw}&60-3K9e*M{zyW~+$T20c|(3^YODjW|F?0yn0=68Ce;PNGK! zBaRvlCDvnLJ-|U)=#UtRNB6QFfWg8~d`j+RTWCaytnsm=CiuxS zDxgbU4n2-AXBY1?QKrWe9)w1}=5a(43ITDod!b*Ytp!;&_(tseDEdgE=G^-UVN zSj*VGiPZok7Cw%e#`*@<#7-`c$TuKD+JTJ&Ttfuc1Odjm{4<(}t@p5Ow&9xmqBtY81krjl&lH!NA>?ehyztf0ML#db07H&&*Oo~*@&>+YeOh^1A|>d9C0 zuJ4TfS%|XW;-K#ja!qz*0d>r{{d_ZjP)YBY-zf?1?|4?ro1h*?n^pI=5 ziajPWt7A?qxp|xpkmi3cR_pn@M2aVG*#W#t^E}V5*L`m*Z6g+UvvMy^`*>` zlo&^H@0oFxHL>{dFw$vfFTb)f;AC$hXy%e^2P&DA#?kSpZ3&ayQ&Bnp#eKeci2v?q zhwjLKHS%kUSVLR_E-8BYk)lh1LJEiz<9eq#NKX0$2SqL7H!O1ZlD^+~cQENmHy?N3 zMT^d#Jnhg(vleizx_Mj^w}@Ts$OgZ1%s4ZHdJ--y4f^g?N-0ouq;Y$asxG^V1OZ&) zW>@{atBVJL@~}G{ikjGA(ZbU1-~~S_QJJ;sGwm)T^0%d-hV+7B3<*d0BU(|*L z{jwJlk*8PY>gTwDZPcXKwQN=C)}4vZr9gN51~jk!t~!L>3{0&{Y?9UBY^kk0Kxe`F zkv(aDSyl614;=>}ZwhPEg*~Cy)XL!pB~rlzXd=*sP;3Bm@2^oAhh&Ey{=@>5Y)(>o zXxB@Hz;o13wf|Qo3AtVX{%2^?;7`E`H$`e?FB9OeCb_lP#Z(_3+h4y)g^w79@I>E2Z~Rk1RIY zq4vqB-dv!$^-(gYI=E@R$s>~=)ZcTEcOFOviLFVC@;ym}+Nw`@d?qlXoGn_zp|)n$ zE7SP+N>9^%&bBj(-#n?~!otR`@~MZTMot^oQZ$X7g$C$8vRf6G_wC(gvl@&1;&Xn% zM!&?n^2BI9(gvy96$7~L{F&XA2Cn`Yp4bXA}FvstjHgCe_=J@d9aq3@}aWw z{}$Yw#Co^B$Z`&B>^WE}U-NVy7}_b?f6n^xU1`{RP1xLt4Iz`WbZs-XX3X zkk{55Yd7JE?nABu#$|9dg&3ZiE0Ig3Z2e0_W)-0b3tD0J}DW2=*576eW2Em$Nq#UIOw7bmGDcD@KOJFq^BZi*u=r$F~Pg z%N}Wqbk5~%GFY;q?QqNY-0-?Cz>i1Ej86v)$!+0_C?E#Ex`_3ZgFmX6xt-e$vnD=X z>6$5a^y$06SC0mqhi!m7xP}*AI?IB34n!i@Yca3c{=SZpFuiy#a*6)+)G{BGCoB%M z#eEDL{l{Ymk=A9xL{a^aoN1%d@mhW+>dwyj)4F&?{jtDx?a z54=g3^2`R>hh8(IQ&M`$5s!?#%t z4~HxKI*SZa74ar0(NSoBV6#R?eGnx(EC(rUS|mCn3n0XzOY<1BcP1DUC3Pk0qO0>M z(3u(xeh5F5IAnIz1&pL`8kZ=2c_%+WLz}+&DmR{tH|^dKr^M?ureJbLl@H4Q?3d8c z7);k8+oSlY0N7 znYedQk0K78)ZAtvvrW#)@#Pljd*Cx36P1SF+ARS+XB7OJgN>kC!h9-NqH7=L5#zN| z!_zaiT7tF;Qx`LwC%4jem5Mc5cJZhDG0wIv{=^o7X3J@)eYR_FBb zBA5ER$`Jatc?#67`WgG21#F+UfX%nn>DB%QzABvuui26EHGxo$-rrfnnOn@t1mp+9 zTmxfO#D#TCpjD$yiQurvwCEC1B(BlR+}_yVYxF_am8OCCqcs^bs4L%+m~}7eAgP}R z%d5!@A*P<>;>JQ}yqDE}M2=B+?mSa5BW^q&Fe^gTrS*=BN#YkcHu8Au=5f?KY>yC3 zR-o(bs;~&VK+MBinG&U8`xJ+-?>^b0+!h4>XhdbhvMS0C zd1(5fYTX^-_((2O65`o-rWAsUKhAe=+?d$7uhaqS9T-Va3ctZV|BK$f#9;%1*fY!N ztBBaQHfB#E6(j6Ua^1hWLh=M=B+d$h07Hk_qkpBQeq*-s&U6}%a0B=sQRO~J{c1*w z^+gySe;9cYQjP75V)nexM39tNsS#pMNcZ9YtRu(HLnl<)G55}jPYk{_1z`UmeB96zAL-)DhvLPB;9o<|F7Qo zEM8IB_nSND{GbKpFmN{jF17#9+58{*JC%tr5UG_k^tR{OpCA8Xs3JXzvsat`C4HE_ z;L+dyEeI@+5btLKXaS7Gue$7|UoX>zl92f3ALk-CnpqRUb9)uo@Y4r-bb`>YSK6|Md5B3;OlEB5^UH8t|@5Cl}}sEL3_T zJ*FKn#F$VzK|$^w7a4SR=h%KYOy$VK;Br=oylZ}3<5b0`%pq-P$p)u9eBco5y0S~> zdqio4bS%!M!NwydVEY{DC76z|DlH^zqj)=ONHhG?crZ9CR>g0P^!}XkBV0%*VkfPGPe6&AlNk6 zDI=poCtu+Qb&SUX{5wEd!&3N%0#4^-N=Zvl=Gj;GUj4YD)10rxXE%W|MXij1 zUc$~(JbZ0e;Yh1hlKWQKt)-IgtWB%w{a3a$#QxGw@8+22r8x82(pCJ|)|USW9~e38Ot@Z9G!L?S*X@S2#-C4b&}kU$bvZ9KleVjm5H$gn#D&O0*kyWeP%3Vy zeqT}iJq9Q&Wu81+@~b+et){L=kA7XW#MI8TFsino)YDW#+*uKtbLw}5Pd+sa9OxCh z6q!o27$6f8g%H7(?5EiDhD*zCq@Z{8i@WFYcuv`oA-&yXUNs}^u%q00>k8yupv|9h zy|Bsnh8kvX8FWC1T;2v?uab}|${8f@}js6f)16Uqj< z<<1Qayqqm9eEjNiu``L-fa?eS*U@_Gk^Uva%UY#Ze27=2jemIJBh3$+%40qJr9&jcD>K8c$H|6+|5z*M*?^bp)sNXNr6I2qQ<02tv=Oc?jKV19+7`GJj^JKmH!G zgi&b_qy}Dk;(k4Z=dIF+av7h_1jk~xO_;Ub*Sc`F-Njdx{>nLS#3qU_zP zsWH3S#-Z(0#;*7>b(OV4uw!YE{U}1M9(I)XbES2sG8(#cH=RD0A5^~3CWqcDEBnvc z)1@?QWIs%deC^kHD~-V**t;;P+<^kW=B^5CVZMJh|5M8Ur%d0v5(XOfRNH3L`QA%% zebujjT76{lQ|Yc=GKKQweEuyMYBg6`Esp~b05i!(y4dBFex(6a3O)r|!TFZy$0m+Q zg43i36L9H>umDTs5`Ycqz^(um_)EMEcu&`xjCR~|{8sVL6#;b-yfFK&U$$zqxHc@A zE$~7JKDa1mgNB|lZ3{D1|FR@%;&6vQlE%lUW_xz$W#(C|zRF~|Vc8T3NzDPF zob_mx8}q?Pm8PZ0tFiXbw6*jfy6i5mIX1ynfG7996$MMqLuw^8ye^~fc*i%g=W2NY z=L>$T%W|ZV9~EK@c~DH}7aEw&gs=IU);s<(BHQ^9ZY1>b>KR)^c76P#j~<2~1Pq9G zrmHLhUzhD!4}K~U*iW%v-73zxg8#a@RaDGjE~{u8w`#%O1*U)Hw5_J?ca$1pe}e7Z z;08Ac%A}PYo_~4iE?pyj6}WwpakTR#Y;`fSQcbGby3KNUlnH=`MBRYUt)0j}Q(oLvx8gx^S>7x~q`!vzC+q@7*I%e+Eh zUZ$Sfm2^{MW8@OM)pE~$w<*amdl~*WABwGef3wSry=eG(P8wCUclp!7WkC|R9*2do zQ>pW+apdZOKP^Fm8Lz(`@fjW5^l*fkLifpc>g!=7a1_gX-Rdre$s>sP#rk0ndgphC z!LHZ5c+Ezds74obhOiv4HNfnkQME_0s;u|WNio(K8cLXpJ*fjOn*teihzsN2r$5GY zGBVt|1}qux(SXsWW@ph2axB>CY1_8zd_<un#Ab7VQn!eVMd2wk(3O`<9BiO#ewY84N?zs9=4bixY4M?1)03UtMjW8&6?MWxIvt8U z)7t$lMkb2)&z6eRGkaSM7HH<&F!!4do|h06bgPzVAAP*(NvPl0VTW7P8Lx9p4_Tnj zjn=l5ZKnkaGv2@8HMAoBf4pRbBdg~laP z{ERX)CN_>dbo0A{{l?J^QoWx8BP9Z!i@g~ldmY01xq5Y`G(v%ks)o=W3=|%1q+wX- zsgxttW*YDdd~U-WOvGR`x2xwX+!hx!kn=qF417HzrNrf;iptR58<6U);uU#YB4ShC z|0eG3v}kU)x*4CRAT4!PVBezm2@Aav!C#4ElS}Mam#x-aipYPPM?T@ju zRS3VZl{ITYZehUwz?s913r+9-C9d2IFJ;Qds;zQlTkTs8v{6v za1xbe>2H>yhfD(xLscx40w@bNaimtJSy{$GUeb-ST@$yx3&)ZJNMr?j$VA{k$}aig zg`7}^IqSywc4)g$ z|6tH;rp9;Lm*sQ92Ljh&rhA-7!o*S|0JQtq$leJZIwXW~5hoBFF2W*u5C_ss;w~I zH~){Tw~ULbZ@a&T0i>m*yA`Fo29Rz=I+c{}W=K&wr6dLkk?w{8rMslNK?a5)2N>Yt z{QR%`KCk=v%-cQh_TI;Duk~H)VCeB*nKgS-tV2-p-+%ZAqgoGU4kcvyY+60~qQ(yr zch50;Uj@2tTf5_ShF67Vi|)W2lQdO z+cJRx2ID6aL2QS zs?*fkDsfpMT6?%IaXV@9iB9v9@@eqjgZL(Z-e+O`(i!eB3Sg4ZK(D2k5l!A z_Dwn%b$ZwiwQg}Gw5F}z{j`!3A~`j}VahLPIzi#*2hF43G^vXj@p(Ime$eV#bYSR& zOv(7Pb{O5#vOa3v>v)5SL5V50yJNX@QinhKV-Zz-DdVxR67Xv^$?Lfd20uDb=Rx{r zp6p&P=gLyC7@%-;!D#;7tPDGJeTJe{G4|Wn&;uRfGQAyD+ags1@!3KJ_S8x1Gjiog z=i483TP4!wH2SE&r<4NhDF{7NhYrz;Y&?r@p_^@0fHGK!k?x~u7`Bz>_+14L1@P;Z z5e6rQAO`QUrBleov4`|Zqb+8ssvfk(=w+*|hUqcpJRW|OGhh|%=(0?R7^9l!0CTfg zZQ=*U8iv9YUz*U0yulfLe%UHceDw^sA!SycLP@DiQ@hr{Qu~pdC9A}tjIAnp$2goJ zWzCMpFnrNXD!w68@TI;p(Z@6Vm1mC`Gg86Avumk&!62VQh-F8EG%45LG#88c29__q ziAp^8UAeyRyQ)9VS(JF>gHGd1oXh`9I_$(q)3umaeOd8rt3M&3KNl?aL(qYt%-bOa z`z`F1H(@~PN?IgCvW0yQ{iSIM|ES(Ov2QDV(12QRbqQ^$;E9M&VK5%FFAs0I8^2*- z2s@Yj{wsk&;7vKDsp9xNsPK~5M~6c0T-L&ZA&uo^z5qoJeFAAj=10C%7IJ(GB7~$9 zmJN|-5pDFCAb*mmu^xX^>V$nr{cei-;4KJAS&P8mxtbzxGw*$Y!5c-E{8Q)rHSe@Y zXVlR+{f-Z!siB`(@w3B)gG zn8upYL9`7NAj1M9erO0>51^t6(@MY0aH@%=C2F1&^o<%2iJ1C$sr#N4i6U%omBw?$ z!ow1}l-SLM#oBTc!sX>afqvq@KnyPtMZ4jrp(e+&t0{k`s&>aGF+cGH35lr&GRrw= zb_}3{Wb~0?vjVzJ$-ewN!=wJ}js*&{x;F;cq31cBA064QW5yBp<*`^3hW;9r=g*QR zuLj^&i+{wQg{*1l6}d|=3}uh3_!pPjxPB2{=;wyImU1@o-u}3Z6l%zw9bKGuIoq;D zU|sw&7X-m6u1>D^`AlVCr($9R3@Nv7#j0sL4*U*}sXp;;DP*v+ur`ZZ?q^(}>xInk z=GfAo<7B3;>2nq#hKXf95TEd6P?!u*clRJyXaD$qpa^)_5219Mt8lvl6DnTCALTpj z8D;+7dFX7n|NJlcw@%6xnxKwYFfy_61X-r>CerD5wu17*J z4;@~=!SR}mBoeO$-xbRh9RM6ZPwA?!q}tb5XsiG@@cXoS?@5xtO+O^XNmdHnsIA!TEQI=&L=S*q$Jfh@UB z&qE*B-Dd4Ldfq*}L4y@M&yO}Ot7>pAuio9W?1jGUGm`+bv5VMc5!tVcF&eOQQ=w~M zx{@I5WS{ubbz4-J@>{BT^YKncY}-l=I!%?0_lPMS)nx?MPbVbAx&&S+)yOvO;wUSe z*Ky}&N?qqPpZB0|MfKCG963TqFWoj;U-3M(5 zNZh4Qk;$xxLxeUus={8`;KLJ=32Tsr)PJ`Rl}T$Lqs&JK@&I3j*ofQ49gI!8XvC|H zi%H%4PJ1yZoIiMKt&_WU-&~sVHC|7pTg2(!76E{Xi?jf^CnOLs2C(*p8UDQwxk?=5Xv8hVlr$;=fJ}mY;_tUNg$`_GL^FvreB%47IZ4;bh?S9i%`5 zloDC!cyynhzj(vkw&HP)JE;yizQ|!TDRt(P?nK8xD<|t9FMKqZPGfyCyR?!Rati0# z%9mM>Tdtt{byJ*g=N?&-KgD{uM-S57 z)#kiF>*{6j-Rw~JRJqrhlhnIp|BWZHXzbdZ92>C*| z#t4@D@msqVA{QjhA}@DP6knD!#C6L#bNzQ3>JSN&W-f1LrTsZJ+PG77YP?Vg3-VC- z_NYpy+%L4CC}vz1PId+qPInJ$+pfK%i1a1&_~<%UU!^P{=O2wszvLXT)>swDZm8tt zc~7M7;*M9H-t2(5Q9!M%qiX-)p~G&W==OT}COnvqUIKh5;kKJ+X{ni#7DFnHWbEtL z=M@g>^vXAe`Q1~U5@mk1l%JjqF7+54F67^u()Zt@`5zWwjugJ_mPnjTe|_>We{sy@ zvQF`!HlJ4u~crfS&tx9;noY5rk|bWD3s!jA;#nV*<*Dtse=Y`8qQ`ThBnlJ05MesyD9gft#a z4SbZ1E=OE|v~r*yg?F7a0?Yb}(wTNMAk3VKD?+dxo}@ROeq$8%r#KnjXRT%KEPMa# zGW!~Vc|>|Dz%I{sXi4xvEQs0AzW>pbe031z7x;s3O9MOE!7JX^!9M8AN&jGav?~PD5O!qlfTQ+AFk)Wj>32)He8f#xXDhnB%(tf$9+LP&(|i|&fv4S)hV zO8_nLRQ06zfM(z-AUjtMN+biI1hU(X#bxmGSQI!@S-nc%}!d-q3w)Wtlr2#7F-pUy3_VePIm&k4_@3C@)m_WFVt9j~gu->SO+;H8Q<; zmoJM0@OHLJ-A{)7J^h>bPr$^W0!3f6KP-)}z*MBKqhLm%!~ixTdX?Qc1iU}Bn2sbk zVE;a$P~c@%jh&q6GZHgj ziUJFZNVgAy02PgiVA{^fV$(E%uB^(hfP^k=-X&zn>TG<_@E>RTn9Z~`Gs@1E{qltR zr*v%>(Q(47tc{D0SgUQ0*AySbWu#Qgl9%3tXnT_Iu8Cg8TQ<^t8;^u!CI6vRvk3oe zL2TkJ5Pvo?2**1E&yeCh&BBEB%HV*HwvQpR~oLPNB?}L?3I`1~v3L_CiiP2YhkAkJ3LYB!Rh`GZ|3xgwPF=luU^;g-eM! zn@OI+_A_p7>L#7z7d=1$6UtcSbh*3?2)+*mhFjj6FX%TwK3!5o8Ow% zJJBLac{LiB9jCT~^$T!HVpS;NY34dY+n%mk9hz;m?x6MlH_Bv}<^2aPOkjJk$+_EV zdqj;P#K!GVipAn0RWGu&b$3{V4|bikInvL4-4#T|x4!#vI=?UaPFbR)^}jwu1N~Wf zQ58y?c>(daU7sG>ZyP2%K9rmqGK7ojnYi`(!aJ@uGm{DmEG@tIdi%?rmJwAZcALPI zykGprZi}oK;!ZaB9cCDR%T8NMb5Ro)WAb@2ljyL6eHB zJe%ARE8okmN0n7E+GlP2h%m8{csb`l)E?8@k@|fa(T{#oV&@h=vH zH0MZ@^4WQ~d2}K3XWd2Z|5gS6VF6FHNalBMq%3}`H|(JObN@%b3+KoDAP0?5@TJGiIo58SPvjzZ#Ch&N6sG^>GA zCz3BxIBI#55-&9ADb>DQW0HnR{5@khxdufqQf1n5XryJb34CTyc9ksfeAOgC&$QFWhX|RUeT_&sqc^b3}6&M0#iT8)z#>!}&sVSKX+vKM) zhTsRF5j$)e;N?}0hcx!(FfroM2ZnZCh(&PZVt+$H2n zRt<^gwM#COJ6Ky}NuuI8&G4$Z?{WfE3MuLp8nbmT_&1veBK!F-==))p-I{8tv1i5_c4P+IX=uZ?Cy!g7hpjc@RmH){AFjmK8kZB$^}`y zkO7!9?d~Y!x5^YBk+QzNed4MCxg&VSp$5q;xO>jaGEqdpTA?#Kgy4=7JNXc_Ez>kO zbc0@WB!z}MzN97-fCkb2p3uHrme5jX(=)PASK!k2gRjHyO9rS@<(nFC-FVMSp;e7) z4JaX1$6W-wP^q&#Ut@^kIKGi>n+`Op?_G>$`fP^PRip9O;a9uMa-~QedW_SKuLM`vDjZ_Ek zF$wHNVZHW2J}rY0(8|i*uIlbrN$GuYCemk^V0sfO6z~ylod3P@BDg7{6#g$2&{eDr6xbb&)u>vv zLoDn6t$vNaw4=|^_Dav-LqvwCn}y`XG9F)pU;oFIiA0!}6{ixDxOlQt)fDe7QOTFl zULOmOI*x^-csIFLZM0~o-P7xE%UVd^)yqL?Y*9{|8bdk}!`fP}laIj=O)%oZFMRe&cXHkjTf|XX z*iS6_X^UJhGmeI%Gqlw0cKMFZ*qUZa3R?tQ7G?!X=1YudNsPn5336p_cr~=eN^@^0 zZd7%hy!9{uKeD@~1VKeC()gHY$c&AQi&Pt1YUe64g^8_nlBbl!`c|YIO$RjxX-bmAwWKzg_^Gvif){QTOdz15Th;xkrhwq83-y4Nf$n^ zIZBnAi`&%Mv7=7R`_zYf7*0Ql6&CF4wmGNWQS<9awG(-lchvkS?TD|3B!g*iK5_<7 z;Q>k3!V<=OL@2C;*ABaVP$oCOC}e8|Kn}X4#)zD9uIog!N3sx7m>y47lzv=O8AuYu z2;OQl^eB8G<}+^X`~2dUQcYzE=k=d*0g%$}ODn#c5iL3O*MgiXFHqEnPV}=>gE*68 z+1eJGqwtp*IGLGkcO{D}0d;lD12Y|c#XPqYzWbkT57^WrV$Z+hN?cPl34KK>C0wfq za(FZbq150w;+sBaVGN0M=7WoBQ*T!Y&-0F^s#?gWIIb5NBGY%--oi7f5B`|(Y0gJ9 zoV9zga1=@%Dta&Oo)wpdacXOgoi!;A*q;wcs?CUjL{>KTlJ&()>wb_XPo}Z=C1u{( zK@z^v*4W>+v-c@;z~ubx)mLBk9e~$YTnFDu$KD>d@f2X?h=5<#~PbMC3atBkeZuYFU|k#_aCnRO4NPaycIw1d|TB&qi;$^VIuTu zHFJp{?V~b-j6)tf5g|6NB9pnRvZ2*fgaQ!kq>Ekv0wr|l}bKue0ld&y?&Pp)vtaERhM2hlS>T*gk< zjo;1Ni^iL%cL`Ao0=7L*%;$M8QM2?llZrG7(|S&u^>~$$OThwyaTnFyPh*at9`AvF z(JCW_*|8llWxff>2wJxZ4+9=onYtQu=AwNDI-#`y<}nIql?4IjGRIg%ugibb@reJ&mmS0?~j%4G&nq|>qlXyDFDG0?`6{W9q&L0*< zEFmmdy``o`#eifyQM~;p)Og7K;Kx%lqPX8Ys_ntoL-L`C`0;I;`Ko@^|4;!AE$dOu za%7-!to~5G*r8#>&}yH}&7ZadvK?FY-9MOo3{eFL+U8mwk2j0!2yX~%LZ%51|A;}g5ec^s?1TyfnSq8IdK*Zee5c0i4viI zXYY>t75l7<8R)9wP_EhrcF#iL6D$c)+%(6uljRDJ2}-O4TDuq3ETT4q(_EocH^+3$ zuF6gO--L)6zn6u851D^jFyL|Gk{EHosbz&~J=9~R~I84mQg=E3*2rgJou(RbnQ<2fJ&AM^O}5FFi$wDcV0a@VYv(hdWLR+|qIf^-H?@}S}b%@&tTR_?}k|dA@ z%DaG|7z_)1H?wjn_I)^GZ#?5!dXDEC$@BG-{m!FHw}B`*i_QE-yWHeJRYf2@7>|mh+Q7PwGyiESKA?1>ULtQz5NJCV z5mHDO^wd1LvZ#R1VOtPXbct*^qu4Isk~TJ$R2=}`$xu~|; z5q)>~1j4HYV!6)Xp%Zwr#ix0cHuTO@fu`inh|P+>UUBhzchH=o*zRhO+}Hf!J`4Jw z->gV}5@Q{QI_}F{Tm$T|8w0~NA^se{qylh(MyqPjzj(Bcj{@9t!{tDBk5OTCsolElRJY&17LVWCg}dInYL|1&n0dUzmhk>!s z4+N}^+y&Z2sH=OX7I1&CqhPr2@I9|$v6zS4lL&kh?4jdKYF>HzXNax?XG zx+B~HD+sts1vrb4f@Cs7{R*IvKq^@Fp z;6FvmkLUN`=%c0wBh>Ct*=AbSmtMSSBlbMHE=*q!w%=p0Ky|=6iK{_;96BzLAxR=+ zywn?QPUnJ?Y6S3Mp}iIynjYfT#k5ZRF>2!U_cn?S@GKFF72`aOg#Z^jL1!08t(B9K zPzSi7dbv({@+1~(OrjP@>0I7ru}~JW;68%hb@}U%CmL^@DtVFi^pM8#3zsl<2vjDt z25<)m>C$6_+(Z0q%}j!R8M@7RrT2ZNaiX<_csP$ZPYYO&t_7`iBMwstqg6;(P0J0+ z${Wm3u&*8kYE`Kuq6F=NA~GB&@17jnwprz!R@lYl_tw|&75imztf|jk!3joVY12`t0m5e8a91HNsVy&~g zQn+cp?-DQM(-?R77IGW~@3H2fqfC&xQ9d`cOQrPRV4JL8vM&**0Z8e(z20`njIy!H z|3*X9d=R$4XK~*1(`D`j_0k^>#x(pPw(9VC*1SI-W|!QK$kKZWM3#vJY(|jjS@jxi zPiwB92K0Tu4U)b&-%l%TwL?$r(A$e#yT5F5Lm~Q=ijO-sf7Wp)xH#WuVyLg3&JVlo zyuj*l;E6L@_MW5f*c_N1OSsQ+OynPD2nK7bXM$edT*BxpjD$66NqBwt7xcIGu#PA! z&8W!Cm&=1q0yic;Iy$QN$NhX_8%LMG4EW%rZF8M%LZ-=x_S_Njbqi#x<;U~+sT2== z+tp*k)GI6JGE(YNn)5F;^|UzdGr)-257Cnx80x$uFB6g8@sef*B$M~7UGyw@ ztrEM!@NVr2iEcBC9SwD)XV&$j^IJXf9SxtBrS8urfK!M7gLkyt(l@Y1BT5H0ASZ8-O&1I+barQ`LL;x~I#2~pB)gNoMGfl#J3ZxWim}B9iQ7dCmy{NR&u_{3q5Rvht zdHcyi5-JDfXGm|PvxEA7k^G_m!-g5BGgI2S5@dVz$V{hLA=6O_^Yeyn0kNIcIMi zzC|gco(Z2A%tYE+OMG*Q!{7cnXk{@Mk1WK}YUg|LnHKy;@^0V+`TNJ+*2KHdec~N7 zDG9Qm2vPk=A*+4{i>$NoVd8UTIg|_NGpB8oqmJ>r=J1AO-dzDux)8HvbNB@!_Ii@b z-yRkj?m5=V(m$ynFLqXpe(b}uWKQFmJgzvl-D9qS(UsY-C3koRKabEn4IM`045%&^ z;VDOYB6Fy}mk|A3e^pRyC8ZR^sk^Yj6-Cs@c$#j}!yd3CBAL{V00*{xNRq zW4G2je-C0hd+VSSer(_om_nm zYHr+M^PEFfU`eNfD75o~5<%AvD2ub&&sI^a(aXxku>>{4l>0S}Y_R<`ThFQ6!o-oE zh4)1L`*hb+QipI4c_BEKC^lH9VdDOUzKUd_esv7lqO~9MHr2F^)agz0dx6W6;LCWQ z9Jl%DzDh0wtS~y-;Z=PqILF$9cl+sxCE|AJIJ(hkvFC4x1i1D-97$0?C*Twqc=XvN zczsErQIfsU*lu{oB4~#7e<;xR*I&DwyMvxpsRw}(+l@zU3fV0)ZVciJk8>mNuZ&RH z|9@_%0`<+oUdxYN<~6;IOZ^O$E&M@ex5gHK{*dDcq65*X(L%aZWq9A5E?f!79KBWr zVgT;|HZp!P2223}#s|IOPr9kNcDD4lu2<=EqTJQ4012;nr=J|~#~^+6M^sT*`d|4lygGBR z;6I(&f|XSd>($5&DAtcWd_yFb43;AElO!QV?Ij?(jB^h~Xj1R{fU;L2T|YxGsnCb4 z8OJlS!U^ywX07Jg(XT|LPjqOT) zy9_L;q5u~y^JjCbt`4Zbu?5MR%{Lc^6afU3L{gwm({{vI52Y9?&G$r0r1$G!=6Pd( z7y4G&W75@VR!9w=+UOOtIigk>xt8lOWkD6_moBwsEDaJ}^!ZEvveAVEJ|69R*6{*y z8s5|cuWV&D3qG3hgzG<^h+w`G>jv|+U|i0UH%j!Jc)kz5!IMgGHZ%gm?aNvp_0n33 z>pS+74YPp$dykTQ#-AI8aCA8FFOtwF0jdE&- z_T#Ql7gYM4?KizXvcBTcze&06J#P`Ph;i_Vi-M^vBgBViQw`b8fo+OEI5EyOPs2C$nkMxLnY7Q36!xZ?py-N_l=DeM;$ z{UP7QpwJh)Cq#QPxh5)7sr%#g(+!rI!|S*mq{0;w>|}7HLg(`i1x=Z1)1vEWwwS=! z?amD1{H$P;W(}zr_~GutVl!-Z@M?PrImo^vqGn`Rbw=`#7K^NYJVM=5WI_em@(VC> zE*H`*&(i-I9{9Ez_2#<=GP%dIzK~~;Cm)o7>8RlLjU8WQHhgHJSgY32;Z#~+*-+6t z^~fSSMIk4*dqC`Rht4_3q`qGGE=y{(4Jo^3Gy^O>>cgS)l=SDPbkyKBA$F=WL`tsb zsK+d~Hf4u}1Iu$JNwUYjcrEsb#6IY5fbZvr?&6IqV;869`VW=oxP#@1SIc|Nejxsp z^>=OiU%ftDv7Af_9RA*Weq>AoP2GM0%5J&Ia_Oon3Rjj51U{}n=<-QcH#si4?Hj}L zVLB2c`D1cK?5=*etvrjlpmiQtGO$zZcV-O3qG&1nYjvE^ei@kB89(fl=^=*8ughrs z73_K#@2W`NXBz1=nzi$ZLEO%Sn~ALFaRGeL6Qv;9TI75+Y} z&WSIABa3_xPlL5>CFkAfiDJ0Uf~ms6tl!E2I~%3kHucs2umC0g^PV_V?nh&O((`M9 zeje&Qd}KT1yH`oxgHvEDxB$QFQTweK;!lcJZ}rjx?FtP|zHQS^LH=ykyZ)Ydksuze zTJN03-lUqlA>-iny1xnkj*#kCDtXMGxzx(u#ufd0XaA=;C)gDD*bAbILX5RpR*XWa zyT6!H^*@}Z?Js3SeE@WFtPTPwy2^=)5})5+YCZvo92pLi$Kw|Q>(LhY9ZhAF7QV^| zt&7V%BgVl&Z^j8h&qpIfTMGH0M9XZ6hCxbnV^->LuuYz1r*OU29#TbEUmd`@l zw`^?I4ZRp;7+n33UVHr!dpS8bZ|xi-$Zq?1o-mMIt(ReHvb3{GXlqD82&vgca{4F7U<0N+)RmgRi4t zg+r!{QJ!!feH8vr7t~`PGKTmec4S%&en_KtB}VJmNJyzy@`8N@?ZdvrSt^y*=gP7N zNdXuph`ld;Gtrh%~t7+BKq(sGX4jwTe>;BcQxH&@SI zm?{3|`h(lU-PJ?NF}!|S6LAD4jEcH=srZE)uiy}ccw7P4)DLlK=I@KAN;y`_6JI|e zwODh3lY12SF{7>c-Hw)oWvPYauZWw+m+{cjUaflWd^xT<2J1!RaYI@4jKQPN0KAQ2 zLhD(vgMZ$>NYD0tgUikveKViI2RZ=*b^ z`!zW~{{mB=h&Oaf#x^|<0JvMRJKIdq&nBC@yO|P&eVX1XUVG3}&st)or-y{|!=>Ck zXnlOs20s+z$IS>|yDxhVt~Z~3CQKiU2J1(goA|4gx88h13`k@Oe&j)$gxoB#xUJ%R z>66$xr7rIrB* zCyVhxUI+{R8u*gn8jl$phOGyL9}$Mq=sp|J`rO81-(i~z`P!ddN-+xv_J{40z_~q) zn#kwt;%5!le-VoUS-8bLhViDiXdIdnIzjhV^$u%HMzb>ton-b|$8tjOl!uKr0gj$K z7trGa-P%VE7QWxs`uR=vU-%IDOqG$i`-f&+aV`~=sf18pmIqBTLBuni0j{3kKddPv z*h${TNcBK09cy!lvyhA^;eMi1eWDhmZ>f5n`L!Gh*b~rL3qqO|wU*(kVw(kWT;{;% zodY!)93Ok^sBt*xN<`Tc!cD&@Z@Ra4Z1pV!^>ybe$@qELv+FV$B1}>54%CzbTX|rtzJRSdg5}olOrW}Zo0>^s^MsB@I73HUy%sc+l@#p% z`N%O4W*$_FTOYssvXC~lGcZ2m9j5~mBU9Z;5z-KPmQuyd_vJ1cCier|(drAJr+AwF zlxV3zNBq>VB;>Pe_$MnflTXv$lWk+O7n{SDQ4ycMfyZbT=*3g%^E_YF z#zxIf{8}`rrX=5Etw1V@Z~PQ(kk+o4mQ0xR4H0LWWscP#*<*fYN9;?XrGpnw@d) z^VUu6IQ0n040Q8uFbe0!9-q?@p^$h~E=5HnU_sIeOu{a~|IS0)wQD8as*@T80dEi!^keuRvoPD$CR~8mnI4C3Dke;bpIH})_RfTq`CT%7ysxRo}>zy&n+Yt#sZZFhrR5I z8FI@kHSUyrk>IsOZx|f6)`}QhSG>eFT)3^KIz88Pg}u7t<33a)SN?o61syUy_A=x+ zgh_3I3+%HIsaD)GD+A*SaFp71jnReZZC`6Lgtfw`(yG+r9|SISaToWRyZKo@eNLn8 z@a@N?NBv>WEKt}PdNe*c-{^Jz*NnIu6!GxLYyU5fWi1bEdrCuDr5R+2_;_zg*8P8tno%uHEJq94f3+EHYF>op*noZ(eA8!|#o4PT; zQl4TxYvgSR^U3sD5im0^bVv_ioktHK8F=y@SiN-#PZM&Wh>H2ENYVm3(=v>ZAtMpH!2t2;HL!b|M$-|`mFhT^nil{3HS5IRMOqOdY4h2M9DAn(8T_tOX*k{W1gVwzxW z1Fyi9LxwFQ9kghcD?>@t+qj~b`P4KMx(rTpJ!ZL^A8_2z#)|RkZTy4SU92rkULSxvzz>+Z`pZw8S}Ovjn+C6*05x@ zmSFjGg=bgLLHRw;Np?KimN8d06@|H*lZl&MCGTvhms~dJd}`iqiA$R;!Wyev#?}`1+yRidhg3JvW zn?qps6UV9rdu>{OSN6{pHM`#sTPfV}mr^%u7DOZ|H8_-sLK7xrm^Uf46SjMdy-rus zB%Pwy6TVf7KW|pM82Zz%mO1obt1r+tX2aFZgO3rSCvASCrqUADA5rH^62)8E7*ZHo ziD(CbOwaDY%P`TidRe~BS%=NCB^L(Ur&J_xF2^jHfc2}6?#if_pRF=!eMd?VLsR?* z6HJ<7*W3HGzwO<&la%@n3u@K1XCHn@6f-K6ke@8hN4Z^OD)o>OuI3ys?7lRQ>3B-@ z%*#oFLuT!!kZ}=q?RMjr-XoNroqD|Fmu|XM|4vIq9u(SWYv#*VNnLNw+$kiueQ}6b z;7UF&G2#F)i`tA!OL7k@F!sY@0IgxsK zjiv=y%qNRzTp0g5U>-RdmDCW+o??wGdM(gdpy%-)N}iFA%2A@&irBAFGTz*fSc2Y5 zZKl``U6N>I0?c)r>K=mu?L`b-Y;5SCe2pWf@amHJ!NuofCTthV_8&SCZu6l~%YtiK zrf7)afPLSFpVdpH-HH{yC5Pl#Nw^ON$$f$Fg$t_BDi-@WOstVV6O_b!KBx-0+ zH~vk{e^OXF)~ty0!+E9CENMX6G2&`1(9SB`3)!_T&EN_6KH%Rzd8_#GCx#r^vs4sN(Ci?YE%mCR|A7?(;pVAx|7AG1>EhH`18Fg zR3%?2nJcirFL!*+hH)KD2?!n6iIB#m@xWu7vXzW>S&3Jle)OV@L{@YL(l&KESS%zWpU) zv0TQ}N6@rP>_ly(06#Vbl|1MMzF*eut=_SqDpUE-xOm4A`aote|BuTLUrws4F~ur> z2&Y6YNJ)GBu3$I$*KYiO%3C!|1;7V;-259i?s${hyw~^A@`S8O{;ZWeYJ!P|V&F65 zj?a5fG!;e;KQrnD{A1@2|+JZq|PndzZpISgcq-gaW zeTB#_Q8WbFCA_{GhDrPguE;!lj?;tgj=nua%vfvCKhvilZ;TO}_G%w1*H0*3!n4yC zeEt*J*A>3?r{HP)&8`MHsrq^Z_VX}w&L?Qk-d&!+7Tl&4!Z&x<%+Iu9qMv!+92EB} z(l|Y<(ljI5%MCDFzR5b926I5%G#yVrQ=}9v1>bM@Rbx+Pc9Zc!^+ZJvE|u_xiybW1pqtK(riqA(PO$5_-IA5tEboMYH$P+Z%(9Bxm|G@g$)c~$b>61j?`%Lv#@pIcR{ZQ%aw<9i# z%#RfqO8aHk9g?U%(env83PSK*oJb6*mbA$1@sdzd@^Y~TzZZ^qu(PWmrnZ-Sw3F^r=C0zFqVhpu2^>Da#`NAx%676 zo06B3rX8-c#K(nS(Pi8at!GHyAdqfO3_q8f)>QVC;LbTDUED9TptE?dX2V!t5Un@o zDTpa=-c|b^H*`JCgIYP_Q9lFoWZs3`Z&=m_m4OUal_CQFO0n#-bbT0~ekaa3VCk}d zmU$&GpmlYz0UG}pkV>9X;IXHPSUxMrw=)I*nGsWUzkY1y(8ARP{|C7Ff0{$!ChJ6< z?YPxmS)@>t^_QPzF3Oig#?kQp%MV$pTbqfk^we2Lwf7zAiMmLMtB#yOrmQOy?S+_% z2<$Lqp{hqqqF!Pjk{f1(!Hz)$#bl<;$A#d_Dga5A2%@5bf33MPn4>K~?jq+mj288_ zQ%+Lm&jNKkWQ8|;sCj5;`4pENeFuRk6A-ZlLFaU!uBUg!+bgr|;IYJJ1Yzex`eAz? zwL{NX0E`ZDH^t3Y<(DP@b0eUJVc~n=6L%qY96^{mm{E09PTL_x#TA8nPspIPnH+7i z)2!*CK>2#}1IJl6Ydzz}KGudxmK7ZCF3Cvc;Bd!nsJT-MI%W$#9Fg9@qBu?UCjN$m zM`{OV4_cbX3B)h+tWrvGvhjk!kS)v2u$w=X(dv<+FduLJtV~k^+vhVSu?ci>ALX7L zMD}59Y(sT@nept7>HVtGu`_}Dk#Gjel>YUf-R`u{S86n8v7Pf>0*1t= zMb7ftiiXH?ic=n&`;AHmnU1LNqzAQv+QhnEA#Vo2yZ{6AwVdqp12b{)g@$;#z z%feoAbnGbYK7mxeR?wH438mX4XO>N-!j^uM^DU3U*GDvu8_rWTKT_DFTzZtTVesA@ zg3)dn8E!+J8ADoPQ=r$&ZT%((1z>BQNsc2!4)LdW1#Q@0X9bgPsQE$f!LicP|LH%~8?7GKE#Z%{ncc06#jO==8>V>rQPG!bDvi=XQOQ%S7vTKFZOzlmz=YewPfx^ z9=v^MuODiz=+_O&jF%qhrbroCKT+F%%-4}E#m16P$cqFDKf{!brj349@?G^UF$vKd zA~M;UJVAvXMYzJ714gb;8Eh`J*RSs%)2VsX5 zk1Co#r(yn>Qf2w8m09(&od4ha1lZ}aJ^{2MGkTw)w@qWt%VyV%Q2$-mNI2A_Bn zD>aD*vIX>RhUSAM?e?UC^=dy~9Qx_D&jf5^tY~+(5Wgimqq7}cX1v_IcBJK#PMmp9 zsDfvRn>4gGk0viP_XDCa^YdPKI(xQ7P5tToa=E2r7?eZEIWeZ0l zcS@4%CmZ-~PZ!MjIMu4!g=OEd8`d!D&MkT$RxQPR%G4l|*VmyAZzj}lHDe$x?mYuR zzdwM##J`QZrGLju8f>~Nco{ZYWNT%2Ah9Lr^!Jd{sI^_35K`VsW~l+ewlt@cmQ4x5 zOhM~bEOYFzQH8GC@(-uhn;tDTymNQ2{VO?P^r)1tgOfqxmR;3 z!PPB0y#9{heDTpuZq|q_O z=#uqBr=w>}u4KPYzZhiojx97>uwcHX&+2|En-E9h_zdPtJHzRl{-LhDznLQLaK3Om zXHQtzKn+{O)G{Se70VHXX(m||cztSjce#Q~_*5s61OGN1*3&jG*Y&@@^()!DH{V~Y zcDl-{&E8vm`rwm#j_C_6z$CWZcqh()?e-*?2$^9y7pTO}oY*zDB@k#y5_7V1qKoK_ zyp5zPRa~+nwouptJ1S8{}+xl?JISmsswBBQ03lr!k@ z@I}YXF^M0U|2V^>{c?+RPoC`q5}Q0bJO%oF?F-#;3t z4@ru7C+V1%85Hd*V{}W~6rMq}bklT_@}E(!Ltf!`<}^`R5cE*OWdQ$5lHxy_Yr+BW zl$$3&Y6h3Q7o^(+CvFrJt~DBKka`Rf&14V=3|xEtZPX5u-~$yyPd=r4wJri5c(A~k zCurYC{&+~s=;6=jTS2@0P_gzfiS zmRZzu#W1QBzMH&X!a%uJN5l9?Pw!dMxDVa&9g{2_(fOd>nZBx6^ zr0rCz!|Z3+&9?#KyHbIxhVgL)LR%KSJWeR^pm_m71u5Ww#dZ0op#Hl}$q~>olMgGn z$71{?S@Y;v-lq+t3FAr@2hUc+de>4bCE~Amoo$93YE5~!AJ3d?h-xUI`B8>HqVIve zi!mxNe>7Y)@E~dV*{9?8gRD-Wbr6H({ZFAP%XykW^!6uzfX_=kc+;O;6Zal*EJ#~X z;5_JZ2i0-2J8!ZXRd1J-3BCPN7eewY>vAQgTF4Rqg~a@?3eCm}^X--+D7(S;15yQv~E(^=g5q3)bQzWALBIZcY=_Yxf1eWzya`Kz<$6Ol`N?RxKepX zY{?0;bOR$BE+kF!4~Qu%0oq=pePZTX$A<_*iC3G@JL0;~dy<^!oe<+1r;u!8i6$(~>Uz75IW5C3Rp#6NE zgrk*@#&ZTpsZM~P*zAp~2OK=$ZZzJ()Wq_~dY5YriRq5?Mb9H!h`;MXlT{}(?%t1H zfA5{OXqwr9WQH^Cqaj}VNaxyqt#Oox9 z`qCMfN(B@)n2l-|Yvd9ewWVAY8U3bl|4nsm0aaQReqa0D!zdJQKIs;x)P(uBGeBCe zZWjsZQ-s~Z!rfi!F9C=03L@QV*LXzRSyq_p4DX7ElCku>vue%t&pYyKJ4uImw20s* zS7Qk%0x^8WQ2Kgmh5FE8i&4^4JzGG?V<-yoO-D?q0sCVaZU428-flI20=b}aUeI0X z<5BBNTVO2X(kWuhlKYtLM|H0D7jmVN!BSoLA{f;jX4z9)jeq2@|{cSt4tQe~np;4-69 z%=eHMc+{kLy0*>asay$a9ZZQ;nss0{C(yFMC6IIXe8VP({H ziAs3U-RCVGss;Z;vIz>3=z_D&VW;U5EiBfc)*1mp!RkS@^D4|<=Q1s`U<&H0mO}aj zj>(MR*|>#?!L>j2*!4~k>V)$_vjA(cgj1`yat7s~fgtFh#YSSn*VNrBKc)3LBsibX zT(!ucpX*_noR?(j=QXIbjocS+W&7_-5gY9TuPxM1$GH1zY;$$e=8?00IajI3->|U@ zOBc4qS_HcgERRzo|4#{#BLjLlO`1xo=044(RVznEEyh~hMBnHsPROq-B@wIB6}bjI zB^HApLhMmx@Q5pb{j{)tDcShYdF;-3#Tk4+5BNX@N19y~CuZdxj*8UjZcN4A z9n=CIEPs$XP~-VTpj&#XI!9F(hA+1aUK{=bObl; zYs@BaJQTa8x^$YzxrQZ*G01yn6^N7hQ@F|?pD0K$@JH|k;o4V16u0v4aTvbnd4wny_#>s~_ovq)e%yS+dR53y zl`K#j(~M(2(odcuuJnopQjo6@6v!MEOP4ojDw!ziCf1q>9LY$CQ18`%kas1)K>>lT zWrh<7y?G!dYTB~$>vhp}L+=QQm^b3lFJ{>ifFe5vVgxgc_W+tPzovkUWnk{X35s*$ z6M_@Phl|J!pepLT+GrM!mZbNhPsaUYwQN6+l{V$e?8DHrVH9CnuCx_)+M5>8Z2434 zj#W^KeJHdbcaH^21`H1;Z}Np=U5h>2+so^(5&FLWHf@< zv++xm`mfD5-hk<%YHI#V3lMi1U%?j?0bRwUjweQ#i|I_bz30;ph( zY!IFWMhs>*o*TBqF2qru8u*;#Z~c4giYoh+3{FiPB=RQ}treXbZobbTfg6~f_8>nt zkuXc<=&W5=m6X-uz5Y~jv&S}TaPyubSka929`F(RJw!h8i!~0Vi|gG>+^k!(&#Obd+{??QPLKVxL7@TD#C7QVhM^muna-C;#~xKcM|ebq=sR28*JkLwW6 zjTR>GX5)+E^>HbOQ-}UG=lFhQvESRSl@;JZQe)Br3)^xFgU08H1m@`uGTWvWg5%C_y|b%RfZYl1H>BO1~3BH z!1YFWyt)jQ6uYt>OJAQ_dOy}LWA!4SadT7&+1cu^wjvzW07unm9`XRCTty?xpxv{X zEWx(c4;qYRDwjrCyl!orXo@A{Au@|6X~c1pdrzCx67zxeSfmg^LEPl=`8lfQ??R^J zFqiFdt;?*2=|K6gSQ1wA0Ut2cA3MHC_H~9*Qhw{5_kj}QdhyvJc9k&QDrZ5BFy!)z z2X|)UypfCo{Gtxn(2Ns#sKL<=@}2pnWV=9l9Q5E%DYb-Oh76A45zR{} z;A}^?KZm&ld#*_>dxnTg%$btbJ|v84e+p^tK`g*Co{qo?@f7AAf4o=CXM#%F4YQHb zr}%9~XgLgha8n(@--QKPWHzIPq};;`*H?TgQaV`s0&_GI8FwchyHI4ge(q}1A6 z5Qp!OxqG&18)oL>Z(^J4cRAy4-NuQowD(o~N&=3aHi$l6OA$CiIzux*-No!{SB->f z5gfSL#W8s0A&9cTZ8d7K59T!Y20Pbay1msl%Cv5cOQ^kS`l?RXG2W2$tn=YJSDSGc z+1gvB8TVkpID_MWypc!U*3_9FMsyX6PjU3&Rd2XoD0r;D(z_=Zk3Wsyf3}tX*;=O4 zSMTkh2i6=@Y~oC=jm(W68J`XZJKhyrgB${fZWYE*YPQ;}0F^*)oxhqsf|bjL<9 zXK9ye<8*%2PlvHsHM$`VI^WSqEr|%;KqMeMK$W#T(Heqrd9ny#6vOnXerY4>R)Aup z8BCAP+Ji@2lHHO{Uwy96Ts*zm#D4@4g=-1^3u%g#2Yd^D1O>rO;jsYwE|A$gmrbXr zS7T3gaqGqJ)NxO~anF|&&=UK^ht`5b_zboqD=&m1EuoosltG|oWX-Wf<;~mZwFK4W%K^Uzi7_O=D5GCOnL2G3mz(miU9=6n##*Gff8AEA{Np8lla!o7V?G$7F zB6$`)|60uEQxn#}37vV!8*T~Jv*E@-debdYeU8ja>5)#)DaA{d<1#Y6_OIU(RNp6u zp{d0?!-{Qe9fwcZZ8iadPD8q^R%6w~@>&q-W+^YM#PIjPzXlteQG)%lQGZs>U4pjn zG-tThB~CVTLzPoK4_Lbnr8uYx@AN{{aZp^qVW@m3y<>*n^JFV5&Tyc;1=>&`VE6rmOX}}>(~3GT(?wfcpd1LTGt$R4(0EyBMh&in{8Gi#4|*&!wmDv&>>e|`2eTJ8<5ceJVY}B?dhY4; z^pY;W?~qNWN>a$6MUD;^mKP6uiJ+yA=T?%vG3+KorOSk6Lygx+cQk%aS!*kqfGkWLAl6+Z%TKs&`sW zY&ze{j!M!;`jQ4*v3-vywM_=f@ybauRE9k$+M~p1${k#Jz?k?-<0B_<);$z{P+@0t z0~?c00Z}7ve*RaEvl+{aHA~L!5mAx*YyC$1N6~qDwnt0i8P z!q}YW0Ui;Uk7Q!D^;zN*YAc756{Wog$%*xCw`XPJ13tQKKcRF9g9Xtgkq?^9Nfp1C zH78i6Sl$FbJ-imOiTZu6+r|p8mP7r+m;K(3YyU8U$LQqgv&RY&W}Qx{qC{N`((bU2 zMvlhR${pVCET%lz2*>8-)|Ww*ILORZg)x7dlha3K&8c1v2H8lNfW>X>@%jvS}PD3A+1T0Qg>OKB2zuW^?rHW>J!`l)8Qvy=Aj09IPgFCXxG>r;8#lHXKHgob7dF*BCEI$SWLrS1nFMQSFAcZN zM_ZA_Hru;A7TC?}LZ7Vk=Q-1k=l@zg*1eNMh*Nx+=exE&N7kYe9ADfrN*$!J79xWw zsaiaUsmn4BwV*L9tMk`(FBq5;LK8&iNS<8P@RzAyyrKAS&0LFEa9Yt5uDSmArsLVH zDKF?YY;VNTi#RUDH+Hu!diidt=I#6MA-tSsvv;F^bZA9YntOWd|5X!by_ZQ7#?c}= zrL@EfV2)tC(S#_T6hhQb-kmm|{5nm8NC92x+}HTlc~I|fbO27uu3alXYw_p_A~bL8 zOv3SR(|^q3_tNx_%bD~BLT-UblRJbpST$ebD1fM-XJFhJ<(*0`(mEbOmdQ}VOYzP7f-;5ZAV47)64WM}k1^n6qXh-OBYnO-Xt_3PjMN?o6$NtbGv_=M@&Q>Mz_ zW<#0|v5#e)#_pcb7-OS)=dzBa);IL}P2Gw2XY3C;sB$-R@tC2QWFL^oEbud0uyh#J zf#+d)aSWi9)z&jHm!|RXL`x=|snSLx!Dm0d0==)ZJDDs!OoHl-g-ouuIS^CXR-6>Hao_aCEwLfT;0a9kd#2aDiN-&?&o3`C$=jsA}UB1KeXf@Ua$OpPnE9F{RnW*NKq(pxT zWkY7#nr7;tV`M-eQLGb7+BsMASazC7T6`({qLG)uW%&tJPdxQfIi@quY_SAG$|=@U?qyxa>-i`85%l27xxlC?3W%XVR(HgzqMH zjnK#l_CcGKB0d0`^5%_jUBAL#cZ6McE6h9%x2s5b8_@*Z#PrC8>4c3yPm88l#=Ns7?RCAs?K}uRyt*_4cT4=FQ9({rdu{$ zJ-Ot!y_C7f=i8y8gh4KgL-3_sSD8yYkZpv(GsyzjturpeZdJ?PTkxs4fET1!j#i4v zgAs};gSm#i7G;l>g2zktff|`)eIhw1=|m$WY9jugmNncYye-_oB;9(@LixRZD2^=1 z4Ih=xS|~7bCwf&dZ7i(dV=>1mfE+;Qe`KA4z@z$}He7zQ)y=Y4CS79?X_q_m6nI~M zJ>VvHBkMR@X=-?xEsVtb2m2gsoI85L_sEaTk18tJJ(PHE zI`;*WD|;I!c!Oa{x45Wu`SZ!N0!NmpNnVhB;-$ZrYui!a_hxcZYY{j{1Vc6unB$-N z;c{)~XtCNaQ{~H>(o*f>H%g&ri@u8~Wx8iJPGdPDdvjHJ&1PlW82hD-hq|nZedzSW zKd8hPfUdG416s;%yuq1#-!v~-pKxnFL6yJz%{yc#kDaChi(9Xc_;eeC3|b#x3DJ?! zWdRLhx`$lXWt$S7L{xjl3`)?Qn;4?NR^MNxUV26HAl)~*Itjc-jX0B@qco(3`gyS_@%yJ6EAhiQ2#jJERC zoMs*V`zF83ouUS(xvW#I(Xv?y{M}l!YGkgQ>2)s2IiiY_UWKbEjvmj)bgu^*F=d%kmrQ{`GE!#l)fs8Hcw0H0= zylgZ|V);0)Iot?zWymM`>2I&`}Zs$QgQKMR^{REyaX+Q3c zihmB#xV{SwMPk(z-zs|STz&bL9*gl|(ms-r7&U&3@4)?o- z;`fG?mi4<&0y_7VK~Y*9_LosKI>NzUt_q_UhP07pX=FpfOR$n9c~ErSfRv#AHGMi+ zdJ+cHe|GW|a5}(O;3(~tOn^dp%&~l-MI{rp+f07Wo7nLi!kk`&*Gy%WIwucbLJ1O-b_n>$epBOiRaY{~)8}QE3#8M)_+nb&CiE<{Kj8u}YgCVU92w5d1 zAf@|8kM2cW2;`;Hqt}prpVH*<(-dqMcye!4KkL!H@tKJh&Nz&E|8eqRM#}Li9&@q& z%;@1m#nneYw-eXRw<8y^6R({FWC(H?Rjt6i}V-2eHBxqVk z&7%i^W?eZXMRlt(So9aL*hSEf8|^8Cv^TNxX#x@Hi0i&Q#4%Hcg3mvwI|iR+luP5m z+&^J9912nQFR-P#Me+354&U&Kz$Pn&m~G%Mw+OMozGKDjy$@ClaC-RFMdbFR84HlN z5o8fuqzPCD5+wUXeB!p7$J$WPK?uNqkC3i-rmd`^R@=P3iu(jK=I);-?4j<6Yi}ta zk+jlSRRq4B)IR_~ktkU5M;Lpo7w_Gv8sTK!RLKl;AbkQQ^Wgb64`>?@*#V+Y8?FS_ zIl}myXa{6`mTZWj8#Q|DvHP(yD-84lqL!~1J1a&jab{O ztQfz82|gKY=oF(!ReJVI^pNv#L8^d>$wUL*%}}h+)ri2R4a!?e8R~0msInge;^!%O z4U3m18n0x(X;*BsEvLz*w07Zfd9E4M&1Tt$)Pe<`YCbeB9)2m;hD8ETr|jH?7FbJG z)2%CioBZWzwKTxpG1YH%FF{X2WF-#s`Pky} z=>MPKR_RV?3Sm+80k}_~0Y5&jYH>T@ZV;6_fECE*ipUE9rV%jt#AHV0? zq2L;#=VR^yEU?IN7Y+xc(lG}GfH|BDlrmHPD$C?ngWj*@nNcYinf{4J@ZnCP z!{1WiS(~1V%+U>nv_-DAGQ31k8zO*-lpth>9hifg=78gXh-A2lVgXXO(jVXTA6^HG z|KWIdnnP?ZUiP9hNZwT(o?d;t)JZ4(;?`#-7W`y*`nZ_=;(HNtF0ZlbXI{~GX1w(7#?xAD59pITz|T)+bg z16qR2z^IpS-|<_%(!IH|RxfZhaW%7APC4>S_M3?nP4Z0ri{Y)~%ex&JQNN3lQcDsk zhgx0+D_m;e3>bZa<|!&8Vn^xUX5y>C^hoT5cL>2ADuDJDGJW=K2+-35w;A7}WnaE! zmebEHZ#<{uMTh*XCp_xuCKtWm-Jd5o&Tvoe@rFFWlp(5b_uSW`Y8e|zxB2YXTW(=E z@U9q2Gue%zC%kDi_Sbj1Skpzo<+4eS2A|uibw)}{!vBh&zI?uRbLP`OYXu^xKm^x0 z$~#qN0iO&j4!q;{FBI=`mZ`qQi7MMwQt$-mFdVg+IE~bM&VLiby_Oq^(Rmj8nZ4t z&*H5&mlOI6a8~~EGSx4w(CzQ=+bU)GlF2Xt`T4@{4*yoJE&bX3{Ne;y`Xj%zVX7y8 zWTK+3F+C9^(csx@l*XxV`mS^HS`zeuf{MyVgiyNS65g9BiQNb z52oL}XB?A~_KVvhOLll#KwjLZQp!2Te;M}(KrZ-xo{UdPA<9JT_g4nj3_Iot=>6vksI;QeABkyh*1 zQc`5k_Lc98CD#63vmxp!_)+E=t%nL$5F#orjId# z7DD?s*#QUn8B^+Y4GA*2xA_ZsLdZtQm4{13xj?JY@)Cjxr`9w-R75vL?!6D7K>~CL zpR%s=rkI}WH|3S_#*L5gl^`N$RL%%wgF2LKXymEHvwgk@r;h92!8sVmXyF!Mh#n;L z^l%4Xa)ZmRQnzZW74oqJz0Z5+8pF&*r_-*2yWC-T)-OLeM@2r8tQ#WceDn~Bb7*L!HQ`4KlK8z3wKZ*G?OXsJqMVjdO)9!Yq>k-d&(O2VLC zfC9}fMQ1VHG(8@?Uui;K?hT@j4C65@x=A80L52)_MaV?n)HnvJ_*}y88D`K;YG)O# z^Su6afsJE;u8+*L&@`s0&?1KR>!qa`%cDU5BMUfvQg*p;(gC#g~VaTH$+K9mHfLHVhscSe3g(7{L=DJ@FyhyqI$HG{oDvKG+nt)MQfYa;VzJCo@V`DOe`e2D-R=dsrYI?`IgFsY{CgQ z8fI~t{VU+^Vt1;+c}a9>;d}ne1+r2FY*OW?(7PuHmEwlwSK7kl$OLH=OX4tU-V}EI z0Y{I2mfx8+Mw4mYzBHzUWFHc6kt;)mBl#nV!YQK)8p&Q+$jn3L_DqSb zjoi(#RBDBS8T+J?HloDkm{ccUhL2Ap!d{B@`*0(pkUTf)_VT~axYU9sWu$@M`}6E8 z^5xcv2rS3N$*HA2y0qI0MQ)Admbs_FDAc+W!8*!na6Z7VAQ4YPK9+<@CBBLz93Gu^ zgnAm+U$bHT@@cNQn8G`J>L1YYMqR(6JWwqyRK{9PPcq`IHG70e=HUzai=WECuf1R5 z2}s(FPrya1{oD*y#}H(ub*;gFq7+Rj(T$Uu{Dr;sMr3#A8OyEr-gu`*qdkqSrTS(6 z>ys%2{`X->EAZ})JFwy3q88ipK@P zv?O-?VYm}lH)j3=*43w@B!#h-g?7(e#s15wA?fzl6`WuhyAEPE!@Mq}D#1lMKH25e-HVj$kPSb?v;9kyYdLz53Y|BL04k>Wb zarXI+`sDw4bOu9@FW0ZRmbe3;DgTrQSL!y0(=`_s7L02B$~10XO!#3Ih~NTH*&59n zHsLlf6+}wg*mJE&8#WK|3H3>A!jtJJ+Y6_Orin@lA8#fNVnKerF1!wmkkpYwT$dT| z80oS-*js9?D+chgNdMG?oX1T5O?H7uDC>M50`ibQQ3r1M+P}bSt=w3DS2b}LFtbCw9H##B1?E(W!OfK*f$e*4uzf*@pcBIFSQSQ@xla~17dD4Gr0XU2eEav4j z8|T>M?%#5APuyggP^`LCZEn{UYuMl2Xc=)IYkLHi{(6_{{{86D`kCB2DkJfw* zYD|Pr>x}$>5lQm6A-^+*JO3rr!bz(=@&9q5aggm1Cf>=^>3+<+ zCiHHKAAAp=4>C`aR}qgj98EY6!LS8TaflEP>jt6`$b1$ED+%RrgJ)Uk$;%%|yORZ5 zt*j1FCSSbA5;xu=(x56~Ct@-K1*ARrt(_^!P1 zG-|6jKy*ut_&nin4RB#6;@TAy?{x`F#=GM!BIAvcsS?^!X$^|HQ}Q)zXYce zO~5MNxp`3o3sqOjk+-^ikW@n<^T8X3c`|WyOYv%T>w!|Y<{JE zw|gW*>Kqk1Cw>S%Jw#%EoDH#85Pt>3t+N zX-n3?O*ZpT>%e~x{#|mb-ZHB~*u8)gz=a9Je)IuNmG6IJ-Ty$lFS@~I;0*)z%hl7m ze952{tlkt(qxIJWU(nrfWz@D9tXU$c|Gh1EZ7p0tw)v7Lg5mx;jFcCb;I%t$Pb$I= z=)w=N=)iyLXP57^u$UB+lnLwvLV>@4p}?mAXb4UUkt`7%dZ63{QZn|M5;KQ9-9OHM zLZ|`CR|@T7Pm9S3owmL@tgL!lf|1)CC9%ZL+Tnj(v?f1newGx@#K#>dB#@>akL z6%OHIY;VM60~OVEth>py6CfR_$#|H2;GOw^Ct>t(IR^^;8&k7+FW z84Hx=c=Hb-<~ANE>nXQCI+M;W$aAQQtn=h@UL%VQq_mp!Bal2aS7`fhDcxVMy=HvF z7*J}Go)+}ESn$)L_0xfvkAeU7f!<*4uTsB0W1Tt#73^bc=c`TPIt?oijRkisTPjWvr?I+adb*>n*`?CWdoUyVD!L!tl-Uec zr1wnsRDWWwG}D94lZNEcOo$gx#awL(BSx77MTNcS52txX6OW5Km!}Ny8v}G=c9GDr zS(4xswCkj}5o?!9l1>kwdzd|Kh4(L218j52aP=8r@ytFZUy}#Mg8=p532t zw+w1WIQ*`5S!RAHU~dAjrm`5FHo)_=DpuMd35hmNv-`RF82bYqxm)*?YYSE*{JK@T zb6^7-Yn$C(A(owlIl>F&xo*GJn5&lkmQl zxz%f)lDs{B`*PN6Nto&a(&Eiyde+2q7Vzpx@Z;r($2c2@QpQl~yF;;eN!RG8l7Z_U zU|_acwm*BDgyHu{r=9T?TzD(SHt^9TcU3{9~Z zwa9LZt9Cg;HdQNYCZ>y4<(m!M`M+Z42uwUa!c5b!w}@DHZ8;mv@cHKju^1a&F> zP$4_T?jQ=IK&82H16ZCkKsF&-zy+Wj$*=?4FabUR191FQ+&i9H@~fCp(tPB$_U@pL z%$06(pJs8MP9>thSd^rPXK$S?L4rqtb&SF#`ed zjU*4pOzVzV59t$P+oaw3XN$NE+6Vk!$Aw7kt>kSbXPeNrpH=b1S~E}o_yeyH_8bp8 z66d*RX=P43CHh23QnLW_t~9cESktP~fr3eBo`zJ+hI8WB^!T5~y>NV~**(wwG#}T_ zA-xjVpJQ-AG_`a7#Yqn{XJI1bvWqyBFi7;26j`cT+~YKsGgsrcrayief2DQII#s3;FVWH?I{sT^@&|FkF`Y5NF*Iv$e3;z;Zp`$I zcN-Dr@zG83BE_BaY(M0lMo%;0BJ+ULx3UbD4C@(?5h9MKfaEPsqaPAdp9Eq|;SCAY z_Ju|b`#b+OX$kw^H!Qhvm8BMpu$Nl!#Nm&Y3qW7ve)-L5qIbG_F6z2h>#n5mR+PPK zWE>y6*y-;MMH#`<;(pot$&@`YuUJPt&ftc1a^hb*LSf7Yc~8CGAyPbiwc(=kVBp&D4zTvK_y~ z+yXXC=oxEh0>hL}h`;d3svku7q&JMWMqS#$ATwpf@!(+f96{~;?ob$+@y^PHRIE3} z|Jta9J~JYB8j@XIE4}*)WzW~l(!AHa8g2dc|BgS$Azd_DWV1!;6jYJ{2G>%jIUNty zMMdL=-%%Vw6(OiGiQk89uo;+Tm5~O~M8K%}kQOX6F-25*d|r}Tny^fCEp+m91(MJ3 zhMz!MZIHe6Ke;!uKv`G0mNT^@{Ctd<;0gUvjP=y})Cx?Vem+`4I@-aV$b+pQh%_xP zC{FrB_V?jJhr+QYndN}hz+A?M%?7F4@|3H?2|^AUok#zyot;8%IoettBnvzTKq$V* zAhY5Slj@nQwCuOk8XUUuWD@JvD2#<;=_px7IpHu+V8}>>bR_LRmw}rKO3n22Iwt7b zx0~_M==2%l+fog>I23C!RHkgUc=C8&K0X2a3!gylz~52^zRg1d(|5aGtY&4&56EF) zy-zKHlJg7`7Lv68$Z3LWYY%Ku{)sio-L&jkP}0>h^erM&Fo5ebZZANMz>d{ETWsPn z%FOZY3u1!LKdXi)7uw7R9&hn1MFi=)=5(Q!V?KDYf=|*2(I4r@((|%oQ)XM4>PX5~ z-s~}hF#OsX%4-ZevekhC1W>sRDN=%y~aPxIG8f34Ffmhf#((^+!9`(dH zOXVBk76|MMYf0=a7?TzJXB2zG&I7kK$()f?MAHsK!^g7t1cj>y1cWw(W5dFXL$-VXd3E;bA`dTz4-?UzlS#6eCW8#(i_kn&Ou@bhwo6Yps$f5J$E#N3GUHa5=rcmOrXvJ5O^U5N}{^t0~7 z!8=ltw1L}iMAJ)N-jPj?brOOM`?J@}RYjcNbpe1br)>S6MivwEwukH+sGI&=C3U zNWdgv_+Oh}L7{3%kC_c6q;N=H#n}5`D!Zz^V3e)%cE*O_n^#c?UyiyM#;CvQlAWqH)%9x_lSopmstb_^%A8{BZdU*OY6Io!;MGxVO47w4A7qI z<*Oxe&tvB@ZS^b>{p*=BXF|(|@`MwY$_5`>r{m?oEwM8FifQO42ssahq*Tp&MZLzNPDvS>Hj(atD(Ch)6-c7K3RKyOO9$HutoYrB z9LeZwN49`W-l%euXydz7=2D#{3^g2ilXn5~5B8`r9Y0w!JtEZL7U^{y{W6rA0mFZj zGX!n=Qc;gjMttKTNc7E2lQbqai}{ z|Lb*np*_#;s85idYN3^YDIL$2vLKZ9^CLCS;K0x!1&ml6t+@^fOEf~ANjlYxEht6A z-|#M$Y~tTvu_PWSeu$evm=*?an1{th)$S&}O;F@l8;Mk_Wy)(pr5m{5)IgUl<%svH z!YRafHL7{wLQXcvwdNO+nl0T}p;AkpndD3k+N7wNQvW`JOa^LB)?Ngl?iej$rAZO1c-gAyius57K%W#MuK zQ(btqAOg@D_I&MITv$a?d=ax!sLlT~KSyV$b)d8E{<0#I6E4<6 zgX(KN7RJTM3g`UCWn~S$p`LrlaqN=-?tsH+zoKv*4d#%JIaEbY^v`x-5`6PrGeV$k z&Er^Ed7w=K$!aE1V**8cQ+@;I%pJC#V~P7@Zhi|1)(&{tsm(7NLhX{5bm%et@87EW z@u6=~5~SmzT(wqn)YdgCEkM))TX(eEc@8-WbA*dpf}54bB`0NxS#>Az?PS$#`Gu`s ze%ZM6*#|z}Ut-F9(0FjK%0YsI^Z$_Ts(Tb^TNl0{dsp@$fa{?s`>9X*qb{EoF$*&^ z1qXpeHH~U%m0&T&j#mz&*Lb-N`^g>jlp7-K?6X)A!rATtc@3jH`~ZIS&2kBU0Z(g} z$A5oD)*5fI5GA_|pDy0l6{!r4Im`vQew}rBG{jC1_~|*E&bm3DFmt4+k?jOy;1(JQ zCB*xE-6N?*n$h&rs#@tT`s0+;0fv9KS4k0KE0y7Fft}VrCqZs2G??KIMM@h)-)CDb zrbT%SJQ~lk%DD{4rZO2`v{>#Mpkf;w*Ku9-pHIIJ=x&kltJX7QDk@^Tgu9`kp+A*I zTJSZFYV{QTay~(x{dFrV^Yq30#%yywno{4pI=oNb-qJ8S9tq|aYSdh z=NNv?f`qX`mWM?{2?9I`y5Rs}c$CgDDw#2#)4CWGGQJ?bCx(vJxceJwx89a2QpcgBO4}A1 z-|~i|V}jy3ec6%uQ;&}f8U?JGWv=ztJMYwKrp#7?9@Cot?K-1ejMN>biOsLT*~Bp> zv6}eC2%78CI_whsu8%5yKFcvV9Hj|~b|U)w%bs#QN7_MxAPOFgNLjwiW&gU%qk@{} zq=WUlEH?VYMK78XSVC6W`y*Ngfb4svzmz+6kGr~Nqf`5GEpe=WPp9n4{(aI_*X1V| z>iU+ECGxjr8zLEatR7f*k{Wy`{waCDjA<}N zGUCc8Ped42FKZ@o1la0LvE)9hrJ@x(ip={nm<9kyL?U~zlezI#~v8oe9MVonVx|d!jAs^%8FUt?SNw6pd{GwXYna zn(GNaX;yu0N#$m3KWK7g2oD)fGy;RNn&l~HB?ADbT*rWQw{@#0<_;D~)zRu0e`wh% zwMI#dbW%nx*;nDDhB!dlHlYx`|34=KNsEpKN9l)mLXIzoG1}RwVxs=wr(0< zW_?J6by&#*2Pg0W!pb5+MJ*)OBLC>u z+2LZq_!k!#gUOHNwFF^Q3%ml3@z9ZI%FQN8K{1%3aY~{(va$)UGU#11mi*snTma<6 zN+)|AfXMO6F&S?kpp0shH?fMa%GaaqLq`*)#n})>uCz%F#RVwq!qxubhovOoL1)ZS zka$Vx(?-z;)KT960&|oTB{?M6ZLC!quonEKE5ldF*MgAP7agj5;kj zGhoj|7lqqbki%_lS~I$P1}=%b5%6e)X(QBR^B!!#EtFJKWR%sJOC$Mn(i?%;deyHh z3c{6T6Lh4VbO8U8-jgjlQ$rXrEv9stH{3QIQh4>R#1k94Ut4*EjaUoP(uUU99E9vS z2Wz6%y_HoEtCl|Lt*dr|sn=w7jf4HTods(${0AJtxyQV7BYeW46b4q~BNSJWtBDB= z-~pZFbDLSd;-sm0bf;Kc?h|C>0}$_P_eF5i&B-5G1xemq4@1jL9DBj&d2_3rq_GjC z#)yifccVq929|SxCE?e}{tP~r+R?agXUXZs?mTwdh&@d;55l8Mb6giH8J6++ zAIuh{-#LvpCOqG%YJcdB!kuyJdc$;2%4^x-?h8hUBwP+hQJ&*HJ>=tqo6P&$P^w)J zQ8!7!=l(ER^G?c*&_Ju7ZFRr|3l{LJX2g9&LtmY5@u{gfN?bEK7s82k^Om*U?LYW& zT`5@-cpEd@ywdR4qoeLMNugO&2}*P-5s`9VHT|;^*(0dqA7<|ppzh!0tiV3 zqBs8l!tuf=oq~y%5~rOfb*G)D*Ee{WIPf=c+?(l}v7743-<)oYPo&p8mOtZWkJnu+ z$@D$5V%2ZL#c4VdgVwBCg)vq{xMWG!lXskEOhNfT&Go7c!gX%~O;?Ey;X%rq0M5`? zQptAf#bNk0ET>(2Oy~ST)fG9OW-pgACd4#u-J>&9?BN`Mu_msJ4A#yLtG#!#hD~14 z%y>sIxY-pv$ab{J- zdyOFQmn$?S_28I7$f=Nr3s-+Hjhm2xU>r0TaNCDG>Q|Wedn!QY{Qo2CEu-2Bylr17 z6ez{rwLmGw-GaLocMDPo7Tl${Ln&6YXp!RX5`t@s7ERFN?(lN%z5jFWd+$>+vd11H zANJmB&$Z_I&6iq55s7u(IocpI@@LD6uj`o3pCLiOp`Pl9-{t}gMI)&!Z2CV82pKo? z8LQunD=$P&tuGWS5qchv*XWPmHz{PqC+Id@2kQJdHYX>#n6j zv*i0S*7u*YgTQeyLKR+4NEe1rZ5eNmkq3kDl33wpm)ez2&CCaqoKl&NV4aZ< z-XGYwRLeLFBoLIg6Z_fL8D$j}?O*b0bH2Twx07PI1hPFiv?+JheqY{l3z)G|6(Jze zx@T@_SfvmzJ}!C9G+$Ozmu~2%AI%h0zqBUq9j+kS4kis>f*#Kly3UT#muvg=0%RJm z&x`Fpn6*{cv8L`dxWxH2x;WjtuLF;+Hr$_x$p4jH<*WT}&2W;ab{Pb!AXp`=?Np}o zs^g$yCCkCb&YG+L z+EkY>T(8O=W;U>SOTjJnMvj?vWU8892nnF@5xNZFFnKLLF9^b2iMHEE3A8zyaq)h~ z!u{cd$jThrI6nl(8yXO8L-Uqm;&J?PLxiwb`wCKB>b~({;DS_L#_jlU!Bp>&yd_O0 z9)=;`%BozGg|hio-E#0F`s8jQVXin9<5L{YJGlyjmu3^7p9Z1t)pB^$ke{DF=&le( zJ}tN3051M#(7*J*BuDlyxLNT3+ea;i`pk>*m=a-WQ!yu~pcK^Y8k%T_O7R05mEj%P zkb~r~l&W;5%!X898yt<^6n|cFW%bjvUP0k6QRf($Y8hFSzA13nK`Q$QZeNrTR$cl? zI8_7qhYH8V7T8JG+{T{A0V=?7BE^F!`RLNN>3Wh^&7`6Eo z8{}iaZsMaC@2fM)r_ft#V5tAJpuWvG^#$?>Ru*oXQUqPJuK@CGwQwo-sx7oV0hdLU zH!U|n;fDSmnWkQmo)cfA4bIz#QHMwQnr~QtSfx5_rHs}B(>L}uzQtiGuomDiW2+J+ChG4Fpikl1+=@Oy`wF zD64@HPn8lE9SOo`Ia75vv2>2(zlr>KbCWU4dq@?URo+Wt@mj=j`P32OG)_o~h~@el z#p~6_U%gBT+@Yw*pP~D%M*Qm|DA*BJD?BLFcW>Q3vGZErn@ToRlWo}QIg9f+srBvn zKB+XnBNj5{ja}6^DbOhGbH0jK17krQ6Ds(aO#41w98o2v3<#s==oW7Bn{T;q#Il78 zUc~-VC-pssPl&P-afMR5;WLM`3Gz3gsX0?pxRO2C`eets*~#`3?JXg*KuQ zB343<iBe4EEYS7U{9DhbcmTT+lH@pK;4vIic4+} z%1rS}q=JHu&KhUYXTwO=CO)BQjJNMQHtNoI{^(zAL3Nub>KNF*`7^ZMjc?Y?vpzpi zxL>pAmYZx=aDC+BlC*h-^w+jD2NuL++pe7EA4HasvT;S7$X(^%xh!;4{&$nMmzE?n z+mMWe%sRr7Rk-k*s9mP8^ejJt$!Su>c1QX*sj7>oPi)sPpQZ!0ZLK%aw|M5{v@xxy z61cSAP>}-oAH|PDUu;m{16J{fgX~R`XrtEU<43Pd;*ug?)Ul)<2R?1So8IVfWAEk# z2s~F-9?Ipw!k-qBK< zW@b8althcU9Mmpha1K6s7q?A4CUFvfA>i?$U!*(*#n1i<)41;7UhgIkeXdu6E3mpG3wjt!CV; z7i9)+X9|wSVa}fk2?}hqU;0|P8sI%?5&_lI$5gPWHYs2&dRV6`2A%)8py02~V32vj z*0)HM?D}XrIsWH7sU~K)brVIyTlaTap8hJ#GBTJ>e%1Z4v$Tt<>;#Yx$`f{-wn%HM z=_+?M74B(t+6}_qpOf?VKGH;hKCU!(rHon}%Wb>;fD+Y1SNnjXKaVN%(C%ob7`aWL zfgMB*fhl%Cq_BP$_ivW&@8{>TY|iWKsHk)K8mXwoc>PM_c#|N)fV9d5Q1ns-v{I!| za^BPbP*g)Z|bM_cRJEkiXZvTdld{l(ptxNXBVy39U+HfI9^&kH}B@ZcOP z(Q`WsL)@396XhZ?I?axbsN~G#t3R2<(AWO3+B|$5H>{7KSJkg(SWn)52xzMa!x-;P zGQbtvF!=oLbsH-G{1*@efe?bU9N&Ig^WRv$3jO!N=w}GCMwUaO7rQpY4()vSP3V8$ zPzb}3e!xmtkc?5}JtF&RWP1C(LVfPjAvh63V-I&o z<~#g-AbBfQrF1_1m!&Pb>|FTkY8P|bX6cGZIrg&{uGf9ouf)QYlW#Z1p|7@nRLaY3 zvwcXGl9qT6mi&KFaUmN z^FI<;S-WZlXzl%32 z@3pfqJ1mZCJ?BXmxyvreSNh&k%Ds!6j(Z3Z&2%@v4`&CK;ms#+KGMV+pg88VF>3Df zX6xY{i>2O4D-Jf4zk+_3`5e8PU10~Xd?#czK$Q*qz|TdyAroBZy^|r#G)Y(6oZL%@BVfm=$F;v ze6xzn^22@C5SHspzrjB*o*t;K82$e5hdAzYn^-(Z09TfFhOZUQQ;o0dwA=CL5)z z*hN`-3M+;HQ&fP?V!wG2J!El^I~IQd_VLliiu#d9e=dV8Wa=kLWOTgHTkf|2`ezy? z6e_9R#7`_ZgXYkMZ2&~=9pQ$cs3lntQ(DC7gK3(fmG)`DW|;QOVK(PZxxY6XR;6Cc z548Bag7XN*@rF~jGu;?|yqQ*m(4eoMBTjNXtvi;RKxN-jN7_rY_!EMh3STh*6O#C(($jnt#x%DPehF`Kem>3bBK7 zvfdg{?kBPSOndmtv?ZwG-U6L?oV*}&BkulowVXmCsI!fOPt3$cJ+~r&_?V~e;~5e7 z?u$18pYw`P8msxIDeUZG0w!g$e6r9fjgD)OM-Rtx3utK0j{ds6!^XI2e_Ad`YSY$n zoVPAc_@B7Asri(@`NewH&jvg7Y-J-jv%0QiR>!wamk#Djr?Rar>(%x8CXwqH?dn=Q zM$~sSt_q{s%H8{Re>ya1f8EbxcHtc~9g(xV6yOE&ic0Y^C-5#phFIPUF!$I_<`uPu zJ~{VV@8qRim-z>qa~xGoxWLZnDUxRtYi&GT@7Xs z6hb=8;~y1$?fiN&V%MFT>J?R-3e>HCt_F8$S1`J+6FI@|3@}hC3`mDw=p|#Jl@Z%N zAmK`Wp*i7Rz`cKagCqcXkSJT;9RT;bgqXrbTG;9(@dq_>idF9_r5)pAc)ND~uSV4A z16}R@qSul7Y8gdD#ZR3)f|o`@1it4ci}s}aKXb+Z{VB-*N(~UC6|jcUdeG9{l@xV>jo9`Rb@$Q`Q=lj?9+Euk}|F`Uu0@iSYtn<5K#AH!Egw0VdQSf zkT_EBNY<`=o7a{Y$|g)q-6%0=$cfRfUy&AKY2jMYNMrx`N7O8h7451h ziUCkms*bRh4}bmXOlE|-=@x*5H&%9%AS0hymyKIkxt0ByH9+~xzsg2X2ju|`pJO#{QLVzHS$`aeAZ8)W4eQG zL$Pie(WcQB6#0zPR&aFxAGD6lQb8R+g<8S9H{H>BwJ6@dMB7u_26ymm&aebPpB=YC zh{+g7t%7{eo~*>ysq4az#Rm~1OEkw@Ni!hJBLBs3BrzOKaQ+pxCF6zUoG5fK1%D%Y z7_+J`!)_)R;QdN?*fJx_L}YduFQLL}1Cv~%1f}H@#I6ecu*(*i%^BY2xMNXP#j5Bc zK$?kC+sm;7#1Q#};TM z&P^I!l1aOR=Jps{vy1{tfkJVBMYK9zt_c85r-Y_2xAFd#AdKHX4btUp8ZYF=+ogELv4&mczVii2u^9-3B0t#`h2D^6BW*x!nF{r4 ztu7KhQE(6ln^Ll#a-z(kWhVFDWreMgdz(Yrzy_rx{%#pITr(XXqr(0#C_1HE`8V%J z_WuH=i+d``n$PwJF;724n)1q@nE%hEy+EJ*e-{3)QVH}mTo6aiGs*;xqcfV3uHO<@`8~iQ^5Y!X*ro0*S zRlC0mS8>Ht<=fSEbk81PHywa$VFOqrVf&;)06x`@F~+ah6eW;hpeQ0Bcw)(;dzShW zS<@bLr-glFIjE6<}_LEIA$BXI&3)P&W^LfFYga(D%VV}uKx7;*r3XNKOvAfLQEbJtbqL? zY%f)vBfp&pdcX5J5)ltTFTzBH+0U^or&6H~f6(pH@+I#0Rf;LU%Y6n5bvkJD(EBo9V+^W} zi=V7XWGpM6q}Ekki9ZUwKa$5jv>1B0IXc!my4<=5{8M0^8MNGHoGaovzlq|n+Xl_0 zxODq6i*oTj^YvPo&`?#F2kYgioO|QmSB+m>hq=P;o^7t4zygdRgEGi`6&NO+ku1A1 zw2Wkzp%@t*>rZ&$tkir)PTV`ro!9NK_6XSi(#3L;tZ80|=zYblZxR^bV$>t@@8Z~1 zX;`%Y|gTBLOac+nw?8E3+_ft%+w zsz+uGOux3tJi{ld;2WH-FFCv;aYcPdd%l1~Nt7MVd=Xdi*qIgPXP;B_CMiFw6gUXn`|pb6+CX=Dqg2g%qM=z5 zupQjwkElhqS^PD$|8v9s*Ed1@R}#4^C3pVaS~wCC;-){+Ftvt93@(;`p{S2LwM;0K#2sE$RM}|OZj2S7 zD_-RD0cdwKvw+vpeRv7_6D`_!9;HIJqMA|w<+N0hO ziTjN(4mS=xCW)hnqyU4nRS{|*_EY$ZwMl&1R3FY4q8|hUw!gzCaH2m!Lx=Ym3|s4m z!q3TXokceZcTktr(0A~-DTV0ytQuHSfvmr)G||TwsbEI3GCJ{vHObwMRa_*{(VX+P zN-LWR|LxkWLzAfCeV^}x;$xRvcxqU)CH{k`0DC4U1yF8! z(0yqSfe7gf2q9(o;Fu%~ym(EC?t-ecbMaBsJUVm_+~3Ve|u+*zC50adgV$_$Yof z02R|{Br&K;Pbv=mw%x%s?ZW7x;FoEKp69>$xx!Uaop-#Se&}F55F&KX(R(EXn^)~N zzk7k{|F}#-D=~7$glewi{6P!lfw4FVvoba(+HeBzaz~xR7v67rG=?=(H-SYfeZq59 zm*Va0?JF-0b}tR+^jI>j7ptS5AGGLd+#;hW^5bRSSdmunbwnmR_cXwySq=E$ zr8QbtR9K<+a=Y{)2&4-iI2QJj*EaiClV)yQH{;?o^BTx!^{B&Nt~~-Fd8d7A-uBbJ z24`V*s)}p4smwgU&|Bj6r&54at(@wcYF2db&u%9AM}Ir?nN-qsS~5z1sHxG|GJw3gACTehR zr`y)&nSu)nQ3iGQ-RI!LRrJ>M!6v|?+)4CoUnE1pqGk~pKa>O%4&lE50>C z54Vi4Ik1|YU1j&n3=8a3Jm{WN&9UeA6mSS7G1fl8sFcikx%dox{6+y%bhAR!m0=FJ z25k24&ADe;_rMfEUjks?cNJ6-sEMC&&J8Oa z6~+E}LnMVF!n_r%<6njzOV{*7k<{5WZ<2URya1>1*>X6mm4d^ zwrVTs5r@V;QV!VpVWdD8Px#2QKkTS_ElX9ONe9r!L13D*)!Bq(v$X4(i^|eFB0*PtVOdioW>=bC3@lV0l3F~U z+nus3t}nN*Rv3G{Lj3I+`#r-F+;>>Akud3Sd+n%tC9Nqe@(53v&~pHzo(UQW<7M_E zxIR~eT-d4#RxwaP1pbnPCfjr;3;w7M~3^j_5qtI0ilq$D~ZBhp@+wLNACRp6US`eFq1-8^K*K&3 zo7Sh6WXVd}{EvYav_utx5j*_b|g-<)=5 zPy_fnIotnYtHQvkfd#~$ds&@QGkQhpnB<)D)JZy;IqV`90=~AL$Yw*5@Nba%2@vJZ zA2txcCwiyJ^4ydw;Gg5_lEceU%TF1;qa=VKTc!%g};^V^y zIzM8LT)lW60Pl6NOg<-7s8l9`cQszlPwtW`9h&~xTpr!J({H~Z5_|iD zw?xnP114$N(Zgl@Jtph*o@JLitF0-fel8oFqfieUT+YVf7ZA zgeCqc`dsOQWf?CqhQ2j+c4JM*Q1pD4X2tZJM`mGkB>DA3yv*h3>!)NZb|p+|Q`af7osubB^W+oL4-1FCL>Y zXhuz;9@}H?BW)6RHCO)clbtz5&;1B9Yd5)YkP%hzxjiBqJ_G6is-Z=hqM~ABulpPe z8l-Wu3!n%~5`Ui;@b1QQSSbv6LF=5#6)L;okji@!3%bBFH}!loIMV$hv? zxWyk9KrV{wwDrc!kxsQ&Q?U3{jp(cd<6^QQs#0NEUh(JiG8Ndrw;E%9=FWGmTd=o9 zzGXIAXm0O%jX5PapWVRBkrSqP>)-k830ykQpgjQ#E?4|k3yhQTG-DqL5!u1IIWHYW z+PQ&=v{VxJGXku(-vxZFO#F7h;DhyQm360%p}v_4xr^2_jol%LQE7{>+G2b5i_oOV ze&!w`HlH?iAmveMKl50d z;I^6AnT{z;?!BCbBB?O6nFC`t*Uy*58dmU1@7pg4c^xxv>iWJOjF0`PociDO<-dwY zuk?>l_L_D6$OKwp<8<2Vm3Qh!C(*v6diVn*v80XC(eIvoWpCG{H*<_JkR-X0B}Ad3zy^zFZ5Qr~+6y7_U$FjU~32%hYh_ew;0bpVk1! zsu+<91f0a^5|Hvz@{xYVpz^Z26w)ctU(rq5vzaOwb}*zm-lM29%)W=-OxHf%eK+MX za7fI1d2c7^Hdhc7Iv}ZWj$)Ofmux(aUr20-RNSkv!DcaMi`n(+H}Z6Mk>D1!z-jlP zX#)>_wMm&G^H$A6lr0YyjQR;F)G3oprY({{6SH+r;?5@9sL_`hvOmVpRqsMRGX@=AjG}~3z+8@DA#+FOYM(q$cfn?^yx2VIlfSE1pg|b!MYJiqc3@MZia?Cvv zF)a~l^Rj{e$$5=K9tTM7-HQ?T0;L`eeOD}N1R2&iemCS)OtH2#zMQx=5}aIsX8Hq* z{(4PZlzwZ{^!PW5+1@}u;Ec3YUvDT14S)P>=2Uf;-F*cNn`qtJ4*iieZPQ)F?WI%} z0`_}HE4*0fTcWWg*;DorIMHz2cmQeU-}ep?%_4eN>!D^-soJca+-w5>RBg-OnDAF50%LF-|Tkz-!&nsOr1Q#ikRD9o?q;s{biCABiY_ z#*yiTgZ?&pI<_4`_m!)NMXwFtTR#WW%l>V4DpJdC248d~DKs=60rh^9jFick!35K> zgdPK*-v_6xLxQD;32x|2PIK}2Z({?5x*sv)cJgs*qN8rL9yKF^2Q!on0~sDb0e~&C zDbcE42{m*ATN??z&Y>hYizwCC>WKKmjIT<8Pw#nh&!h%(ut8_aZC_V;aV(;DA%N|B za1fxLfc87B2D;*h`>mTe1xUMGFlT+LJc>dY)+5aI5TOON?+dSz>%}ZrJw2R6k>3Ft2;JdyVnv6bparH8Jj(SmE zKLPKZ)jcN#ldop0#Y&h5%7_JAq@7(;)=UWCPu;9rG%Wg+by-DhKR}|Du(cf99gnzX z|IC~Wy5Iol*5d1<`~Tww2)HWY%$z&heFQWwIW)-Tu`X5ZZzUwL43*g7#?v=dqE89g zxgOT83N+-4BT@)qNu)W{DjY2Vh`;f+NFm8)HsE`W-P!{mhxk6?1@}x9>)~o747jS+ zqSP%Iy#9FBvcL0d>fGdaLhIv|9c_XP4&Xh2{OMCwt zxNb$PgFofkIbO^5;DMtXh|@waclL69;KSfrk2ss2;Sal5?&^*!TJYlY-STu`q6pKy zOOt!-Dik}A|N42ZoBQ&~yXa;V1hKcnO#5ZQb?}LK-3NCME%CHi*nJLo5Pje^^+-n_;07QSV%Wg{Qe z>RhCw6(j6P7s9(!HHqHa?oB4sq1qLf0)Q6$!j*b$AiulAXh%p&#nIb`tpi5`GNawp zcC52*)H8(`C6YFptPeVOns6yY=zSzcJlNUjOc)2_^S7=}O!s(C7z2ArsKm6;OVMOB zJAqyXE)lnr4waDdvfLEdUo-La+y489(&w|jR?E68?2bUGN9_fP4+7@411|rPKeVy^ zFA1b+zwr`+_FsA@RH-`B;Qy_5e)4fCppcrlN_9TpAIw$SGWoFh)6u(jc{X$|X{r}8%HB z#Wk}6u7m}+2Ct=2G?CNl8Ao8@258f=G$;-Xg7g4-USuSH`mM&{jiPu%N#$aEx*v9q z>3eU<2S*GkF+9qh?&tFlFvQTqQNJmKEJ+bB!PWgiQ3G+1O4?3!6{oJmkW-(1Rjey^iDF z%!Tk`F^((@yuSFT=MdPk)d>$BSw5m1B+RId{MmIEu%+4|m1;0)0v<;OB=1i9OvLaz z=Rgm-U`QZKME=_kl$>2=3^7i%Ti&EPSClVL6_wQ#4TY8_H!az~ zy{c>Oa;dn*`k@?4;h!4?s6CYs>+bE;5x#KO(AK^wg4EMrw-Z)nu&9_%$^F(b1U;vO zcUF&r&$R1l>yhLA3Lm?hGi7N5+yHhl_IRSVL))iG0OIaKAr!;c$O)VyEE&x5y(~4NAX@#)`e+h+M7^`vsf+GH35)VO)x=}P^h2P|)r z6yP#Rs8gQouUE?xJRXRfP?|tF(Os`e(#Soqo+L-18eih~k+MeJfz;GkQ1OiPK{2Za z^iB~XA1+ngq2;x*Tf6w2@gRS4ouIu!&q35uJ0h`uB}>)e-R$XuS;F zo&Ppqc{Lrml=wKs;pSwwTJ*rb2@&p81YPvppXiw(iHO%0I4_2tr1m<`pmg zG%Fo9P@UHAK-BIDqT;wmj{QNb)P-PAhbgJT*D?FU3A)hQ! z+^90P30E|w>#guN9A$aVGfJMERx&iC>={yNHmbf*HvQPi=#`#KsFQpu z(N903`b+Sw%QlZjN(gy0VY`X7p5ta}GH*yd1Et30&|48#|I+SY0KD0z^j_I|(HRh-)ZdsYEfm?YE0=q@ zOQc78(zY?X<-HY#!h{3-A-n$QAV>AjQIGd%nJ9}1#>fBwk63Cq(w%^4qYKtBuevD; zbyG>MM*_SN%Y=e<`VTZ_D!0H(E>snm$1eGF{D92J7iQqrqO)Z%m&~%yw@`x8nm}80YsMl z{rc(qLL)1WuR4?o_-QevIeEYlI*qMWsA{o|nb}yU&G;4(*+;%gcN!?dpISV7#fW{8 zQ!$V?T)gNi2}8!osMOjUffAoC+hM2(v?yP*pZ`6jC+#l~ZYtks_&S#mlw3V*ryaw7 zuS&UGr~yJRz_~^5Hr5WZbAM*0TuPa497JnOdN*A9ZAji)6y@+QnM{F}F+O%cBZJd!ncOANQanRwHmSjP-wtT%C9$^FiO2ejau2i(CZcqFGQ zU8UDQO5=|mpB(GPBWK#}kDl*d1Fj}2rc2eb4T@I_!3SMQ&|=%8BBcwG{qJ69ajkvG z1kXQOqdB9u`dVON>>ZyWyV~`*>D3bE9+ROc@31_nundCFstBK1f^%Ln1O8pR;~)(i zY}FzMCu?xvZ1{_!(l>ADUo_#5Y1x)g>{sAHM~P2kIvG^7jy@~%tevO z2Qa{S*V8#CUbJsdIfHTnm6sABhFg+IGtZagOJ%Zp*#H|Ws*4U@cqH2knZ%0c`l9oK zc+teSB&*p)ZtzP(&7k{~I4y1WwMhUUEY6zi$-~?sYteZs3>e4W62JXdP+i1rZx7CW zCjw=m@uh3$m|>jk+9=GNwY)2~bNek9gT9Ay#$@Wf@b17Qr*Dc0go`e)=F8!==DWMJ zRo?c?R3@NODxOA=+qv!E%~{6!?xEI%adWH`sTo1GORf5BMt61h zWoK(edf-90#0s8Db*=p9+L-#@ZD;^2!Ggi?s9_3FH*(!Eq`+&;vO0z~m`I^c zO$%QOVt)3y%ZHz#pJcJm*Wd{A1f5@W5oD>Fa6N7-!2cBC#3o5pzXn_w+ODtr{9Zg> zu#=A(BvFEhyT3aYzN@-`eZAPls}@+iML$F2ASA?K8x5fZQ;`Xpin0I~Ink-evyWQ! zJwU&O@KY>k@{`Dkho}H_dBOkA44iVY;d|O>a%I1rzD`R*1+U&?$^?m(9u>X_-M|MgnW2X%-i*28wB&oE;MRa)z$d<)((8l$|4&_!mic%4$keSn?WK>xYX6MPP9vtUGwwt zZtY=c>PD1rx#csTXr7Vv=&+DQZ{OJ;!lc1^wrZLl(St_=|GQ(>=D9qEbRtSd*FGOA zXU5c8Y)kxRDRtl%{b5)2oB7HC6DaFWp>}2xcU;1f4t)%RQ>>Jw2u@<1OH@rK@AI8K zq>)_MO96)sy+v6743cq-SyUQYuj%q1=I{!{1vlzW@*SDNRwX$gpSbA{Wyjf}gebAJ zOO;osC-vQpDlKc`Ym`;8QYbTIhN-@|@Y$Y%bPEi!ouCsJT9M*+t)79NH{`lpc? zmxTzrop-$f9%y|?)xBba96xzCzAV2svlp2?^JB$oe{rY=OFrqJ~^L%(Kivp16lO9qo9z*^1CZ z)u3Wr5#5EI@K--Qa#^e}3kbNBgD)ef(5&Sdq(5pUS0GIbfI|w0xp&^;$E}4l4$CA^ zZu?0Zmbv0Djb{s&s^n(@{iG>U_Z_;94b4l2=TWibFg#YTGPzWV~UR~92n7ya8= z%eL-6CS4V|XTD)_B&c%D6-rHqM$kwDoAK(w0!BJOwz?q-)MoW_+*6tMpL`qg&|zz2 zTYs9{j~99QNG`Gx?rdXe{C039Q;CF;v{WQ7&k0(?knws5@+xt&+|bTUXFVf?+1-vh zWk?=1Vi)zEjKPqe?2-ZM>Fzh2s) z=D=ejf7$0${na%ns=AA`^I)_b3|9}4SZx|E=4Balva01iAw&IyDggvdbGZa$X}CY# z7&|BAJKn%skFH{7JpZB3jO%_~|4O*&-e~-!_di!H)U|Ay`a} zJY(6@SGi16n^p?lOMV+ohpmU47`Hf6$8pR9oT<(gJ@)?q(haKQ&OZ&RThTmw$7ox_ ziAhhn?;HZ?WS_sbwWY)h9n}XzH3%in?Wx~?Sl~Ywq}JNR9RsjKe>3MBWd~`X3xBum z$&*;AOV1^>Oi;}uPAv~R+`Os#eg1rV=u#Twll z^%UnSBd+5&@SgJn7WF)m0fmIb5g;I5o}2fsHDDVpbqGr7(n+Hsaj)6=xRQGH{pxtN zW5~JZ(Z*x%CH3=+*UX~DSruCZ=hNiRy0eE+^r0f$m2AMnpe4&tv*;ngs*#|gteCq+ zRkXE7!thW}+G+wWJBcI}uQT9icp*Pg#``ZvQxn__UA|J!3C5X$l^1wbIQ4LKKsu2v zn3Q4e%MVTZRbrnQoqyDSh#DY1`J?VxsVYwp|H#Q+Mq#w)q&{ys5Ps&&HRoh z$5$&0rYXY`<9G&-VK-dO6zW#)H3Y0@uKo0leUYF5@I}s5&wfl>=9uLO>VC1Tn6$pB!7f)ZIYf~LMoCjOiDZ~vTxFeb(%1hv~?7bK?IVlc^+Dnl!2FR0hx#MZ3y z=xk|H`JJTp1M_bfM0+aspr%!*sbv8i1|RzCoVv3tMO^na^RLv0y5BH0W=PBsEn)!V z1-v}`%c(AQ@n}#zL@+XWrOSl{J};7Sj*Zh8htysPEb|5No?em(yU!4@aICqi=J?NN zoh|e{^9S}EttmjJZkAc}45~iW(>JT5D}DlbO}2gk4ATSH&q}#}#?djY5RUb~gJ>*+ zhlh5p#K8Orw%q4{wYT*}{$)PQHi9+Og#`I=*$E@JSGyL5BN*7;q3G}DWrVHKX@9uX zQE63plbH1S|DX!_!a^TJVJ@7xmCN9D9aM1hO{vR1(TTXeTZDP2WzE{oofVN+J~*HsyG%u799twB<>{6IXK^_=d9zF^j4}Y^MeY>k|KGAhRJ+5js)x zmq|VV;Lwa^%y|<=)_VNH16%~-=93Hd2wQsPX4D$_J6qmrh9#N zb4gu#p1dxN1Fu@||AF9{>+3{>wgT#s1|u+(+jR9FPq=Vdhxl7Xg@0eO%J6yvI8yOX zsrPntSh%ojao;KINkR8^MB=l^4P@%yv)HO!Gxt)yigVOQlK%Xo8B^)?49Xt1BjSqpp_1k^?fiV{Lzt=!?9CJMocY@ZU0ZW;Fx z$_md+zz-<~AU)q}CQ)?@Mk(?Qf0R^_WR;#P8sLpKs)jc1ODQ16LMwEgK8cCR&LK@f zB55ETN1XB#>vmz3+wZ(nNT%WzK_;H|GSV-KQh6w78hKe7*UJXT)emOexRSb0$Yvw= zh#64T1b#kB5B>w5-(E%@e*NC6Jlv|>6wG-$}CJe=H%37xl(7HQ?kDlfX@0 zcXnf{4Kk?|@$Zb+10zRB|79FX`&LyUiUiN(?eRrnuTqe+7~lx+{nvlb%jXU zaAc$8IB!)3T4Nd12e z_~b2wDc{9Sh-n~~`@K;Un@iAadN~v>HHA=mhtA|HqW#5{tHWdzssMbmR|fZaGPuA(SvISRA(GiT>3BF?w6| zp!Sn4Tl)bA;RE!iiJ@6tRyar(W;*YpwO!A_)u{%=E4&6;NjB{LL7vzV=U91$Qy2Hhse@2b}^pwl_9JpX07<8MOYt?Y|FqBC?k@_;4FApKay)&+AltoBINN)oSDq;6!`o9JT zE*b!~dD9X*3f}eXmw2&VE~2}^_^nb6r|2`t+kfWnQ@^=S^|Q#Dr$S`lr(xG|5++lsz3oS`Vr_NPhALKDk!115>GQ5(HM$mFn8a)C1@fiCcnBN49(p=h@)t zY?DcK`4jZj6RSQ^iMg;_sSMegZiJ&pZrIBE@rqv1jF|NCcu(ia&Yud8^3YGQ(P>R+ z@axT*yLXPq<&U}*OtvBCD4cj~z5NuAvR+mQ*Wt#|E^SPo!f_f;jcLG0$xOuw^VvM8 z1|rw9>h2}qntLO`)BJR2Hd6(LH_AiAVN`X@Pg#uMP6LI%`x{zgAIt7c9HE&)7k^T< zY3*{s4xPNe0`FHgnHP}``ybO69WE=AO5<&411~>4UKdhYEv3yXUTfCt_27&pTRyC2 zz_!9JbTgGtbm!v%PxG<3>prYz!JFkvOIzvYHVI^WKCY^cO+H4rmtkA1L0`l-IQyBj z4p;bS(?dn01An;)e7s2aoMKKGGm`$wYYP&i&iQ(Fn5s%a{LrN}Ahm(MMt`WcZq<-yjcY&|bH-`$A8q$>G1J&Doza3#PZy;2 zcXzOd<9yL5S#xiOsqT;eG?Jfz6t_Iws#gCGS#KTGX1GQVCP0zmR@|*v@!(dp#R{~z zw79!NkmBxEw532>Ah>G_1oz_F0Kp|laM;}W?cKXO`)}sU%$J!r^Sno%bNFz&;2-wE znOlzvNl7YE(jx8n^KTxn^md*GFb%y;{&$)C?=UI0`*^zX8*$uz0{!)%7hh4``bx9y z!c`!^VbhD5l(L%98NdPb>#6aY?Tv^L9k6jDj)}%V|Nd+eZIXzSNF+2!uG@n*x@ZZ_ zh{Y6ekRm1(Ua76~#IgtYoAQH5AT(B0Q%0c>xK&oE?Fn45dUnm?oI-xR*x)WRg)fKl zG^5!szDx%bsjxFI2NjCa*%TQ)vC5+NphS@jfOuG7&qu*Ue#VOKuq-(n%QX9{&@ia= zy=(B70gk|QBw0WS!_f;8qK~VXI3GK$gr_0+l;2sm^0QTJQ$9%N&c}^{%u>GXj-4;@ zl&65BD$&$Gy!Ckr;0#^;5CC}JhF-kyx_@yW*c(MG?KrO;FunljkuycgmxDf!5)#ek z1Hu<${A~ya#XwA`ltA;GtQ{;jW5_2A^?DYGX9**q$Ow!UB{!C6pN9Ah>qpW>NYhsg zE2X#7GCG7rSp@kJWq|;x;_Nj*`=0Xa?=|pTFDrVJg6A#DNDIE<5|%UeOuYB|nNH?% zAM^MCL4x-4&{P4ma%c6aRK(qQ{T5TZw}QS?bk(i2>5(AF*6CnLaQ+kbe#+`~9Jn4I z&=C-sV*j|ndP*mjSYumQNJlQv=Cf)Uh@r0UIcZ%YKA-fXJcrLybQlXI1f~ZOWh6gY`d8;uTEw`uCd7!o%(km=dh+4?9+Em zt??=z&#V4B72cw+Q{o3n_&cFMP;93JeC*lqYV~lVmN7>s+-yI+X_E+x)&*L82$MI$SidQPm^M}qIcGc=&G3mf5# z)hRyCjYaqi7NS=G<@Bp2u#Ds?$PA3dfUQ4nI!re{U9TjGpCyp zhJgt}33ffBI&hq&$+`V2+HnD7Mr^ZoR#AvC%tGdA{Uk&pX@!Nf7n9b*j#l`iT!(Og zUdP$6?B?9aM7+vLp2C_#y=wbH&I zCw$m!GN@E2r@Xk6RW7&Id+Q!APChVm`Q3hI^DD)AA_aX2|8d~PZ@-<=^6AU8x7jYM zmaXJ>{ZV<&bllZV$_@7OGGk$*V_OuPF=Gy~Q9?I^;Lv=yt+l!X%YQ+!-hA11&4+sFz}xVww* zgi0)2J%X*U3CTcPIP}TBN@VytEIbAF8_6r=o>ArJ8@o3mFM5roE`J!eYXknH+o9iK zKFCoRagamfaYq2Ru4UwPt&gTD9<1XC^tb6OfG z;Z@9s;D0FnudZ=T=5=Ld$eA9!!QUGjT)o&nJov51~GpbxY7x47Wn04cFf(O~HR@(E7X#ly3EjQglr zNm%g#f#Rj{r|1AID7qi;D%7Ev_T9=QbI0}IBgCc>)OJgIwLv`&HB$pKg3u{W`vh7-`( zQuKIv7C;RC?(D9(3=yb=04^=A4$}+T$IDXS7c8W}$tAx2s4jWm^NlM-3}>Q$0gOJI&l!D0&(34?H0txW`=oL!MK%?7g_3lF2w0nu@$`$8 zZ85I<(@KljjD&e_BL?mLdA5zneQceX`z{YM0#BP|5}w*u>!mrNal0Iw>sReNS4z`U zDmQeNPVj}O&_wL@o6)wWH_UmatyxOlYb@BVb1af&95gY93$(fQ9*HrCx_A_=!#*zI z)l4ZG9HfM2{NstDGNR&trxtgvr{H9*NY#Q6eBJk%TcwYyKrg1Ty_!Ccr}Gh26IrA| zPjdx6wQF!$;+orgEU;Vr?n7N{)_EVu)#$V+DB*id()_p?cKzKw+U_rk>@qYJ87JiZ z{z^s&4Lp?7i^YYd(Ps%mB_G&2EE?bV-dhA0UB@%@5BSr6_v2hoh~QzpPIt;zht_TV1h(uWI*sL~aWeZB)YSt*tm0ZzF9HCp3YKj$Mfka@FYZCZqA?8bAx%rdCJq=eU~|(|WdreiQVgQ2{;pD2_qYPb=~J~2koJK^4%*bE#o!YK%koaF z?_z@@@#*Cgar4T-^bA@#^!U^uRB29AY!=4#(;F)*m1AxF4j0H!5iDh5J3HJchJ= zseNHwXYBLOA43VW?;5~fFIL;+J}#vAY*6i|zsYj({Ku$2gzVSrBqRgjaZfYcA(Dv0<8_DF zH6tnP!q+}AWdS=%`Z{hksn&>2mTbW+_A+9^PXzHFDdK|XOoD$dxYbv|B1)1I4C`%z zD`G#{20i6m%O_vjE^A(@{cpL%U_rQM`?4KcgJ7|ZzuYu4?sGq&71RXfv{{M z{!NU!d}og0b(%37(*6I!k?ULG8kEVaA8RwJ6f;lQiI(uhLs!`GeY@wz!2#US@h4Gl z+m#X9aqAJZWzSVqxt?9TF~d>*dq*~ifwF=T-V#N$5mTcp=dT!}h_p`ydKR(#dl`(j zn(}7Y`!gFl*+0~l0P(AUghqEAwY#V%|PO8~5xj;;w(_8ZzyhMaqQqU3;8&PLz4jU6~v zhFHTXC$BDz=7fuwu&JUoo^6yllI~Hw%+rKuCi___V%m)arKNX;{S$l#987#eS3Wrg z2I`IhNeslj6A$|_hy5;rp&*;kUt=tuc&tiG7KB?EXCpdIPVIQl>m^ygdbimWa4x;f z1rmkthOg$>`n+L)&vZ~Lw6aGWFVM@)MUkK8gZF9dH2L7Ow$KuW9=>JyZx-30)=|>a zZlcgZMU`y_n)5g7PS{M-Hq**#Tgb&Xt@}{gsdR5`5Hum%wB2mLU^kQwQ1UQd=6w1? z_DX~a;KwQ=Awn4H%-Ghr>3_6HblQBVfAOKtBz3(d6n@bYJe*nVq=}TI5rn^mV;$A7 zh_#VUL*+ni$glqI2X6TmCwH~;)(H1AD&AclHHZfyb6ZP{1!aY$nr9qSod`>%q`9h9 z@^BB=pw!EPX2=*uTNZ$Us|iSULEsM2ieGuggYwYmudggG)Ip0VGY3BV84kK)*A~_e zeM;KNvfqv92vZJ@T>TmHq@iExXbnwfV5si6G!s^%>g}S_j|_5t0)D{%@c-hdaxiJp zsLzhW5y@BdG5?B^e`R0PMOQ8P_ZxJ9Cw6NGb!yey*(|5vi|*Ht=1uVvWn1mN`Uf34 z_wMnY`?K)wdL5Ji`pq*NBCxfFW%2f$0D;mE_(QyD zmclQF^fy>KNx z1d)t)F`_9peE=9vEq80z4Ch_-y|4fggVAWez6u$P^z5$RJ(%MBwEw2-UTX7o924PF zn)(>^xO&$bF2h6u4!)5}a{rg_lq%rIpDnK|Ek*^!IBrbCL}dno??c>cuN6ZC=~J<8 zloC3GEwaVD)!7SAQz>#m60an-e^3fq4LS+Q!3;wV=r$=NooM2{4>Ru1%kXWW%O@Q) zC1!21$6cFEeme!eT^~WRYn#O_h`QfA#cDJps;2vDtdJ7Y3do|xC>Y7IQDTe}y7XFfI@N%MCRF)tv z7u`8t_kyF0oa55eOWDMKY7r3~|H%EqyiSGIV@YYMbl@yjku^$Br3kLt@rtIdg$Ovb z!Ue-%Wn7%^GiTE+L`-4@a&F0A_G=Q%G1bQ6_HnufOK`h%-N3Im_f*ElW6E5rQ4|vT z)uK?_g_*RGuye+K7wG=nZ?m)36t0)dA~qkb;%R4 zr2D^MsT<3Ao~%8gh+K5d-+U!_TM6-qq69^5t7&K+ z-R|txZI2AP91j-fP2y@7t6E<-kYLq4LvlwUWmI!J96Zegf3vLl%ZhD9@>>Uwb&JFn z?{O@SmHMKY?hn{nNFM+SnVXQB{Fo>BO`C3WIYvHC2X8qS;+>-QZs*HxI&}p<)N2dW z1@||((fZ-QDtzN6`XB0sS@hxx$XWFf?o8>mDkoT!lspPN5L1u1yZ*WTI`ZiIO3M?7 z=`9~Ijccnjp4isu#iK#(d^3yX!T{+&AN&0MA?``eZ<->q1HP%J^&3njQ9Yr~0+N8gtZ@)z9L(zMjUgQFpziW0(@z{pH90zyD&A z|3ew~{P_X=X2UY!`mU4dn|Uhm%+(>B zEJ#8L{4lLBx|T2i5RGhYTV+hDunK@c1kOk2w+HLYq>V%=Dck^yI{C~oK&Wgn!A3kb zIvG~kq6wg<4T}E~TM@GhZSSof4}eLI$_>BcnE^VQ^{URiO=!3CYsLb`QpOBMZl+{0 z)93G1b~*Ls9cK2lkGKGTg*E3%gvYE185z)0H89U8HYiVTcL#YalR;F%&GvwdQrY5C z4SdG-U|@`#J>YxoGIpvqUd=E16e?AmAPu?ugeCG!U7@|_3z{Gr-V0tD8n+G;Zk&W? zbxRp?WN0;L+Ith%qxk&uD*%qLql@B@GY6Wm1p2TE96N}$>RGF8SYwa&S=mgtNDVnL z!O#F4ApU@ljR_c;%Ywiib7OtjmMH^kQxzw`l2$s5ljMHJgPw50mMUo)QZE848~ux) zTF5QeI!aw;X3%&5Gl%hXwg$B)jKhuLW-PyKtiZ^IbBh&R>w0UMyyU^W2v=w|Qx0_M z&W~uu5vlVz;u5DNX!04ISven9CZl;3(X1V|(of(_V1OQJ_a1T}_Y%D+TLarC%=%!lnZEpXa=z7cwl0|u8RGXGK=y5pHAZm(PUrYX#$Ezc}pF1t1H zad!=x)oNimhrL|;>aqm?U;s1CooSBzb*cYXnjTrxg8`Fz;89yzmQ3X*iKb(xzcrDH z5E@aA^N2AT&^uN8HI>_@tl`E|)v{896S*MAw|Y1dALmi7t$y}FtAdIH@Xka3<6=!B zt7Ri7A;W%YcSpX2+9)WXl__hy?J7uCPWw2trN(+DSY(FM#Nr2NOO zH4v^)6ob%p;2?!h{X!CX%^kMwqbUVl)f2)}KWUZn^mu1s;#+Jdji4G9Pop=Z%bj#L z;VeZr2Aw0rm(%|q7^J8b)&lvJHGY)dR4EK@FY{4A`V3sA=)DOnhZ9f+0Gj4<8a{T- zy!CsVhllT1SyMl^P1N`N8-k7;oBK+73*!*yQ$+~Kl=ZfoU^_ToX@_M!qvf%28CLsm zqHlq7^K*x;t-6|Q`E0Oefq=-v*du`n_)4+vSA}}609z7xcht{nS6ASZ5$`i?x=gy$ zwUEZu$9|iIBU#j2Jt9z}b8_<5nmDgLUQfdaiUc>sjhMF~ai!P0H!r6k%GAcisgHSF zj8WcuOCl~*XCH%>zJo80Cz;oN_T$_yrEz9;Z(ijJo&P57;wXCG(a!+mKy;6g2ik7}vlo#7SLTNT3Fvl6;sV!6RLzk*OPFHDYSxc$F%&%Uxh zmj02Q#j>DWPcz?pM{0q)u0z{jM1f1KG86;lplFWe9i}t^Ct+{iwx_bFKzqW_H=CAG z+K{1qHHt99Ts|i0mff4YxYxRp5e{xW2_;)+qRSi|gR_{f8pPKR64DUK=5QdSbNBQ| z6$uN;eSeH}zgjF;gFEywZ*pTLq#zam7TMS2i(00hpH1FFB18w4Hk0E^`WvjY`|{Y9 zPbC#p`im{G`nV@#=<^=z?!6d+$})E_J$m)t5tWke8JNeO4;N3rzw-8kjbxz`qisg zZQU`51->A zbp2b$SusmyC?`Ex{B)-uPT1RJi0)s}c2TW4$mQkgQsa*~JPS(c`0PrBx;!;cUNfc9 zY4(*rf#FBor_U`7|11^eIENNYKDBS|3^LPOci-Ap$AiXl!mq*%Qg~~BUhWKUd?2`} zv6T>cbxB%Vr=4WjN!nqX(z)>KDi%@edyw6Ey-L|}zJOoA^n-KTbXP9>vO&4}xb{KI zH)i!plC^9ihd7?E@#4_&)sqTl9g%dygGS&$UQw5%@i!IUR{yVqpIeE3{vav+uhjBF z5WnLZQ6X@>r?y$8wVo)_j*Q=&zwsX4lB(8mwOCm5rKi_Q@(cE%!5!Hvid4VK6#K6O z?7yOC2j&{x-6yBO&0}-Qfj@;Z5BZ+sv7dUQ`p>v861!I~uEg&D(3I?aOC*sqig~l` z#rcjHfHnD=Kt2XI4SZ|ri`9b;7w5%&cQ7px?ii|EY{V1@ym4X$Py#~)m0Aga1~{Q! z`{b%vUg~mQN5iVlz@VXLa(}S^6fJmmD*5sa+8oyFzfvavf!GYU>3cExaRPv=AyoFKvIbibgCxx`1jGc$0KDHK zabBze)HqgKmFZ+d)bu!uIZsJ;_ZYy7p{g6#7ub22!OcKmzDlJ$8ak4BT8!A4z}XDI zPh1&V450f|KQr>_F8h;9