From 6381d4e110bd0ec02843a60bbeb8b6fc37a9ace9 Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Mon, 21 Aug 2023 23:07:43 +0300 Subject: [PATCH] gguf : new file format with flexible meta data (beta) (#2398) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * gguf : first API pass * gguf : read header + meta data * gguf : read tensor info * gguf : initial model loading - not tested * gguf : add gguf_get_tensor_name() * gguf : do not support passing existing ggml_context to gguf_init * gguf : simplify gguf_get_val * gguf : gguf.c is now part of ggml.c * gguf : read / write sample models * gguf : add comments * refactor : reduce code duplication and better API (#2415) * gguf : expose the gguf_type enum through the API for now * gguf : add array support * gguf.py : some code style changes * convert.py : start a new simplified implementation by removing old stuff * convert.py : remove GGML vocab + other obsolete stuff * GGUF : write tensor (#2426) * WIP: Write tensor * GGUF : Support writing tensors in Python * refactor : rm unused import and upd todos * fix : fix errors upd writing example * rm example.gguf * gitignore *.gguf * undo formatting * gguf : add gguf_find_key (#2438) * gguf.cpp : find key example * ggml.h : add gguf_find_key * ggml.c : add gguf_find_key * gguf : fix writing tensors * gguf : do not hardcode tensor names to read * gguf : write sample tensors to read * gguf : add tokenization constants * quick and dirty conversion example * gguf : fix writing gguf arrays * gguf : write tensors one by one and code reuse * gguf : fix writing gguf arrays * gguf : write tensors one by one * gguf : write tensors one by one * gguf : write tokenizer data * gguf : upd gguf conversion script * Update convert-llama-h5-to-gguf.py * gguf : handle already encoded string * ggml.h : get array str and f32 * ggml.c : get arr str and f32 * gguf.py : support any type * Update convert-llama-h5-to-gguf.py * gguf : fix set is not subscriptable * gguf : update convert-llama-h5-to-gguf.py * constants.py : add layer norm eps * gguf.py : add layer norm eps and merges * ggml.h : increase GGML_MAX_NAME to 64 * ggml.c : add gguf_get_arr_n * Update convert-llama-h5-to-gguf.py * add gptneox gguf example * Makefile : add gptneox gguf example * Update convert-llama-h5-to-gguf.py * add gptneox gguf example * Update convert-llama-h5-to-gguf.py * Update convert-gptneox-h5-to-gguf.py * Update convert-gptneox-h5-to-gguf.py * Update convert-llama-h5-to-gguf.py * gguf : support custom alignment value * gguf : fix typo in function call * gguf : mmap tensor data example * fix : update convert-llama-h5-to-gguf.py * Update convert-llama-h5-to-gguf.py * convert-gptneox-h5-to-gguf.py : Special tokens * gptneox-main.cpp : special tokens * Update gptneox-main.cpp * constants.py : special tokens * gguf.py : accumulate kv and tensor info data + special tokens * convert-gptneox-h5-to-gguf.py : accumulate kv and ti + special tokens * gguf : gguf counterpart of llama-util.h * gguf-util.h : update note * convert-llama-h5-to-gguf.py : accumulate kv / ti + special tokens * convert-llama-h5-to-gguf.py : special tokens * Delete gptneox-common.cpp * Delete gptneox-common.h * convert-gptneox-h5-to-gguf.py : gpt2bpe tokenizer * gptneox-main.cpp : gpt2 bpe tokenizer * gpt2 bpe tokenizer (handles merges and unicode) * Makefile : remove gptneox-common * gguf.py : bytesarray for gpt2bpe tokenizer * cmpnct_gpt2bpe.hpp : comments * gguf.py : use custom alignment if present * gguf : minor stuff * Update gptneox-main.cpp * map tensor names * convert-gptneox-h5-to-gguf.py : map tensor names * convert-llama-h5-to-gguf.py : map tensor names * gptneox-main.cpp : map tensor names * gguf : start implementing libllama in GGUF (WIP) * gguf : start implementing libllama in GGUF (WIP) * rm binary commited by mistake * upd .gitignore * gguf : calculate n_mult * gguf : inference with 7B model working (WIP) * gguf : rm deprecated function * gguf : start implementing gguf_file_saver (WIP) * gguf : start implementing gguf_file_saver (WIP) * gguf : start implementing gguf_file_saver (WIP) * gguf : add gguf_get_kv_type * gguf : add gguf_get_kv_type * gguf : write metadata in gguf_file_saver (WIP) * gguf : write metadata in gguf_file_saver (WIP) * gguf : write metadata in gguf_file_saver * gguf : rm references to old file formats * gguf : shorter name for member variable * gguf : rm redundant method * gguf : get rid of n_mult, read n_ff from file * Update gguf_tensor_map.py * Update gptneox-main.cpp * gguf : rm references to old file magics * gguf : start implementing quantization (WIP) * gguf : start implementing quantization (WIP) * gguf : start implementing quantization (WIP) * gguf : start implementing quantization (WIP) * gguf : start implementing quantization (WIP) * gguf : start implementing quantization (WIP) * gguf : quantization is working * gguf : roper closing of file * gguf.py : no need to convert tensors twice * convert-gptneox-h5-to-gguf.py : no need to convert tensors twice * convert-llama-h5-to-gguf.py : no need to convert tensors twice * convert-gptneox-h5-to-gguf.py : simplify nbytes * convert-llama-h5-to-gguf.py : simplify nbytes * gptneox-main.cpp : n_layer --> n_block * constants.py : n_layer --> n_block * gguf.py : n_layer --> n_block * convert-gptneox-h5-to-gguf.py : n_layer --> n_block * convert-llama-h5-to-gguf.py : n_layer --> n_block * gptneox-main.cpp : n_layer --> n_block * Update gguf_tensor_map.py * convert-gptneox-h5-to-gguf.py : load model in parts to save memory * convert-llama-h5-to-gguf.py : load model in parts to save memory * convert : write more metadata for LLaMA * convert : rm quantization version * convert-gptneox-h5-to-gguf.py : add file_type key * gptneox-main.cpp : add file_type key * fix conflicts * gguf : add todos and comments * convert-gptneox-h5-to-gguf.py : tensor name map changes * Create gguf_namemap.py : tensor name map changes * Delete gguf_tensor_map.py * gptneox-main.cpp : tensor name map changes * convert-llama-h5-to-gguf.py : fixes * gguf.py : dont add empty strings * simple : minor style changes * gguf : use UNIX line ending * Create convert-llama-7b-pth-to-gguf.py * llama : sync gguf-llama.cpp with latest llama.cpp (#2608) * llama : sync gguf-llama.cpp with latest llama.cpp * minor : indentation + assert * llama : refactor gguf_buffer and gguf_ctx_buffer * llama : minor * gitignore : add gptneox-main * llama : tokenizer fixes (#2549) * Merge tokenizer fixes into the gguf branch. * Add test vocabularies * convert : update convert-new.py with tokenizer fixes (#2614) * Merge tokenizer fixes into the gguf branch. * Add test vocabularies * Adapt convert-new.py (and fix a clang-cl compiler error on windows) * llama : sync gguf-llama with llama (#2613) * llama : sync gguf-llama with llama * tests : fix build + warnings (test-tokenizer-1 still fails) * tests : fix wstring_convert * convert : fix layer names * llama : sync gguf-llama.cpp * convert : update HF converter to new tokenizer voodoo magics * llama : update tokenizer style * convert-llama-h5-to-gguf.py : add token types * constants.py : add token types * gguf.py : add token types * convert-llama-7b-pth-to-gguf.py : add token types * gguf-llama.cpp : fix n_head_kv * convert-llama-h5-to-gguf.py : add 70b gqa support * gguf.py : add tensor data layout * convert-llama-h5-to-gguf.py : add tensor data layout * convert-llama-7b-pth-to-gguf.py : add tensor data layout * gptneox-main.cpp : add tensor data layout * convert-llama-h5-to-gguf.py : clarify the reverse permute * llama : refactor model loading code (#2620) * llama : style formatting + remove helper methods * llama : fix quantization using gguf tool * llama : simplify gguf_file_saver * llama : fix method names * llama : simplify write_header() * llama : no need to pass full file loader to the file saver just gguf_ctx * llama : gguf_file_saver write I32 * llama : refactor tensor names (#2622) * gguf: update tensor names searched in quantization * gguf : define tensor names as constants * gguf : initial write API (not tested yet) * gguf : write to file API (not tested) * gguf : initial write API ready + example * gguf : fix header write * gguf : fixes + simplify example + add ggml_nbytes_pad() * gguf : minor * llama : replace gguf_file_saver with new gguf write API * gguf : streaming support when writing files * gguf : remove oboslete write methods * gguf : remove obosolete gguf_get_arr_xxx API * llama : simplify gguf_file_loader * llama : move hparams and vocab from gguf_file_loader to llama_model_loader * llama : merge gguf-util.h in llama.cpp * llama : reorder definitions in .cpp to match .h * llama : minor simplifications * llama : refactor llama_model_loader (WIP) wip : remove ggml_ctx from llama_model_loader wip : merge gguf_file_loader in llama_model_loader * llama : fix shape prints * llama : fix Windows build + fix norm_rms_eps key * llama : throw error on missing KV paris in model meta data * llama : improve printing + log meta data * llama : switch print order of meta data --------- Co-authored-by: M. Yusuf Sarıgöz * gguf : deduplicate (#2629) * gguf : better type names * dedup : CPU + Metal is working * ggml : fix warnings about unused results * llama.cpp : fix line feed and compiler warning * llama : fix strncpy warning + note token_to_str does not write null * llama : restore the original load/save session implementation Will migrate this to GGUF in the future * convert-llama-h5-to-gguf.py : support alt ctx param name * ggml : assert when using ggml_mul with non-F32 src1 * examples : dedup simple --------- Co-authored-by: klosax <131523366+klosax@users.noreply.github.com> * gguf.py : merge all files in gguf.py * convert-new.py : pick #2427 for HF 70B support * examples/gguf : no need to keep q option for quantization any more * llama.cpp : print actual model size * llama.cpp : use ggml_elements() * convert-new.py : output gguf (#2635) * convert-new.py : output gguf (WIP) * convert-new.py : add gguf key-value pairs * llama : add hparams.ctx_train + no longer print ftype * convert-new.py : minor fixes * convert-new.py : vocab-only option should work now * llama : fix tokenizer to use llama_char_to_byte * tests : add new ggml-vocab-llama.gguf * convert-new.py : tensor name mapping * convert-new.py : add map for skipping tensor serialization * convert-new.py : convert script now works * gguf.py : pick some of the refactoring from #2644 * convert-new.py : minor fixes * convert.py : update to support GGUF output * Revert "ci : disable CI temporary to not waste energy" This reverts commit 7e82d25f40386540c2c15226300ad998ecd871ea. * convert.py : n_head_kv optional and .gguf file extension * convert.py : better always have n_head_kv and default it to n_head * llama : sync with recent PRs on master * editorconfig : ignore models folder ggml-ci * ci : update ".bin" to ".gguf" extension ggml-ci * llama : fix llama_model_loader memory leak * gptneox : move as a WIP example * llama : fix lambda capture ggml-ci * ggml : fix bug in gguf_set_kv ggml-ci * common.h : .bin --> .gguf * quantize-stats.cpp : .bin --> .gguf * convert.py : fix HF tensor permuting / unpacking ggml-ci * llama.cpp : typo * llama : throw error if gguf fails to init from file ggml-ci * llama : fix tensor name grepping during quantization ggml-ci * gguf.py : write tensors in a single pass (#2644) * gguf : single pass for writing tensors + refactoring writer * gguf : single pass for writing tensors + refactoring writer * gguf : single pass for writing tensors + refactoring writer * gguf : style fixes in simple conversion script * gguf : refactor gptneox conversion script * gguf : rename h5 to hf (for HuggingFace) * gguf : refactor pth to gguf conversion script * gguf : rm file_type key and method * gguf.py : fix vertical alignment * gguf.py : indentation --------- Co-authored-by: Georgi Gerganov * convert-gptneox-hf-to-gguf.py : fixes * gguf.py : gptneox mapping * convert-llama-hf-to-gguf.py : fixes * convert-llama-7b-pth-to-gguf.py : fixes * ggml.h : reverse GGUF_MAGIC * gguf.py : reverse GGUF_MAGIC * test-tokenizer-0.cpp : fix warning * llama.cpp : print kv general.name * llama.cpp : get special token kv and linefeed token id * llama : print number of tensors per type + print arch + style * tests : update vocab file with new magic * editorconfig : fix whitespaces * llama : re-order functions * llama : remove C++ API + reorganize common source in /common dir * llama : minor API updates * llama : avoid hardcoded special tokens * llama : fix MPI build ggml-ci * llama : introduce enum llama_vocab_type + remove hardcoded string constants * convert-falcon-hf-to-gguf.py : falcon HF --> gguf conversion, not tested * falcon-main.cpp : falcon inference example * convert-falcon-hf-to-gguf.py : remove extra kv * convert-gptneox-hf-to-gguf.py : remove extra kv * convert-llama-7b-pth-to-gguf.py : remove extra kv * convert-llama-hf-to-gguf.py : remove extra kv * gguf.py : fix for falcon 40b * falcon-main.cpp : fix for falcon 40b * convert-falcon-hf-to-gguf.py : update ref * convert-falcon-hf-to-gguf.py : add tensor data layout * cmpnct_gpt2bpe.hpp : fixes * falcon-main.cpp : fixes * gptneox-main.cpp : fixes * cmpnct_gpt2bpe.hpp : remove non-general stuff * Update examples/server/README.md Co-authored-by: slaren * cmpnct_gpt2bpe.hpp : cleanup * convert-llama-hf-to-gguf.py : special tokens * convert-llama-7b-pth-to-gguf.py : special tokens * convert-permute-debug.py : permute debug print * convert-permute-debug-master.py : permute debug for master * convert-permute-debug.py : change permute type of attn_q * convert.py : 70b model working (change attn_q permute) * Delete convert-permute-debug-master.py * Delete convert-permute-debug.py * convert-llama-hf-to-gguf.py : fix attn_q permute * gguf.py : fix rope scale kv * convert-llama-hf-to-gguf.py : rope scale and added tokens * convert-llama-7b-pth-to-gguf.py : rope scale and added tokens * llama.cpp : use rope scale kv * convert-llama-7b-pth-to-gguf.py : rope scale fix * convert-llama-hf-to-gguf.py : rope scale fix * py : fix whitespace * gguf : add Python script to convert GGMLv3 LLaMA models to GGUF (#2682) * First pass at converting GGMLv3 LLaMA models to GGUF * Cleanups, better output during conversion * Fix vocab space conversion logic * More vocab conversion fixes * Add description to converted GGUF files * Improve help text, expand warning * Allow specifying name and description for output GGUF * Allow overriding vocab and hyperparams from original model metadata * Use correct params override var name * Fix wrong type size for Q8_K Better handling of original style metadata * Set default value for gguf add_tensor raw_shape KW arg * llama : improve token type support (#2668) * Merge tokenizer fixes into the gguf branch. * Add test vocabularies * Adapt convert-new.py (and fix a clang-cl compiler error on windows) * Improved tokenizer test But does it work on MacOS? * Improve token type support - Added @klosax code to convert.py - Improved token type support in vocabulary * Exclude platform dependent tests * More sentencepiece compatibility by eliminating magic numbers * Restored accidentally removed comment * llama : add API for token type ggml-ci * tests : use new tokenizer type API (#2692) * Merge tokenizer fixes into the gguf branch. * Add test vocabularies * Adapt convert-new.py (and fix a clang-cl compiler error on windows) * Improved tokenizer test But does it work on MacOS? * Improve token type support - Added @klosax code to convert.py - Improved token type support in vocabulary * Exclude platform dependent tests * More sentencepiece compatibility by eliminating magic numbers * Restored accidentally removed comment * Improve commentary * Use token type API in test-tokenizer-1.cpp * py : cosmetics * readme : add notice about new file format ggml-ci --------- Co-authored-by: M. Yusuf Sarıgöz Co-authored-by: klosax <131523366+klosax@users.noreply.github.com> Co-authored-by: goerch Co-authored-by: slaren Co-authored-by: Kerfuffle <44031344+KerfuffleV2@users.noreply.github.com> --- .gitignore | 4 +- CMakeLists.txt | 13 +- Makefile | 23 +- README.md | 34 +- ci/run.sh | 44 +- common/CMakeLists.txt | 20 + {examples => common}/common.cpp | 105 +- {examples => common}/common.h | 38 +- {examples => common}/console.cpp | 0 {examples => common}/console.h | 0 {examples => common}/grammar-parser.cpp | 0 {examples => common}/grammar-parser.h | 0 convert-falcon-hf-to-gguf.py | 282 ++ convert-gptneox-hf-to-gguf.py | 266 ++ convert-llama-7b-pth-to-gguf.py | 307 ++ convert-llama-ggmlv3-to-gguf.py | 334 ++ convert-llama-hf-to-gguf.py | 327 ++ convert.py | 1026 ++---- docs/token_generation_performance_tips.md | 6 +- examples/CMakeLists.txt | 21 - .../convert-llama2c-to-ggml.cpp | 180 +- examples/embd-input/embd-input-lib.cpp | 6 +- examples/embedding/embedding.cpp | 2 +- examples/gguf/gguf.cpp | 246 ++ examples/gptneox-wip/cmpnct_gpt2bpe.hpp | 1133 ++++++ examples/gptneox-wip/falcon-main.cpp | 1111 ++++++ examples/gptneox-wip/gptneox-main.cpp | 1082 ++++++ examples/llama-bench/llama-bench.cpp | 6 +- examples/main/main.cpp | 39 +- examples/metal/metal.cpp | 2 +- examples/perplexity/perplexity.cpp | 2 +- examples/quantize-stats/quantize-stats.cpp | 2 +- examples/quantize/quantize.cpp | 8 +- examples/save-load-state/save-load-state.cpp | 10 +- examples/server/README.md | 7 +- examples/server/server.cpp | 33 +- examples/simple/simple.cpp | 153 +- .../train-text-from-scratch.cpp | 138 +- ggml-metal.h | 3 + ggml-metal.m | 15 + ggml.c | 1013 +++++- ggml.h | 122 +- gguf.py | 718 ++++ llama-util.h | 553 --- llama.cpp | 3032 ++++++++++------- llama.h | 267 +- models/.editorconfig | 1 + models/ggml-vocab-llama.gguf | Bin 0 -> 595423 bytes models/ggml-vocab.bin | Bin 432610 -> 0 bytes tests/CMakeLists.txt | 41 +- tests/test-grammar-parser.cpp | 3 +- tests/test-llama-grammar.cpp | 6 +- tests/test-tokenizer-0.cpp | 60 +- tests/test-tokenizer-1.cpp | 131 + 54 files changed, 10020 insertions(+), 2955 deletions(-) create mode 100644 common/CMakeLists.txt rename {examples => common}/common.cpp (92%) rename {examples => common}/common.h (88%) rename {examples => common}/console.cpp (100%) rename {examples => common}/console.h (100%) rename {examples => common}/grammar-parser.cpp (100%) rename {examples => common}/grammar-parser.h (100%) create mode 100644 convert-falcon-hf-to-gguf.py create mode 100644 convert-gptneox-hf-to-gguf.py create mode 100644 convert-llama-7b-pth-to-gguf.py create mode 100644 convert-llama-ggmlv3-to-gguf.py create mode 100644 convert-llama-hf-to-gguf.py create mode 100644 examples/gguf/gguf.cpp create mode 100644 examples/gptneox-wip/cmpnct_gpt2bpe.hpp create mode 100644 examples/gptneox-wip/falcon-main.cpp create mode 100644 examples/gptneox-wip/gptneox-main.cpp create mode 100644 gguf.py delete mode 100644 llama-util.h create mode 100644 models/.editorconfig create mode 100644 models/ggml-vocab-llama.gguf delete mode 100644 models/ggml-vocab.bin create mode 100644 tests/test-tokenizer-1.cpp diff --git a/.gitignore b/.gitignore index 9c749f1ef..a4df837a4 100644 --- a/.gitignore +++ b/.gitignore @@ -1,6 +1,7 @@ *.o *.a *.so +*.gguf *.bin .DS_Store .build/ @@ -47,6 +48,8 @@ models-mnt /server /Pipfile /embd-input-test +/gguf +/gguf-llama-simple /libllama.so /llama-bench build-info.h @@ -65,7 +68,6 @@ perf-*.txt examples/jeopardy/results.txt - pyproject.toml poetry.lock poetry.toml diff --git a/CMakeLists.txt b/CMakeLists.txt index 824d9f2cf..bb63ef98e 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -497,9 +497,11 @@ else() endif() # -# Build libraries +# libraries # +# ggml + add_library(ggml OBJECT ggml.c ggml.h @@ -524,10 +526,11 @@ if (BUILD_SHARED_LIBS) install(TARGETS ggml_shared LIBRARY) endif() +# llama + add_library(llama llama.cpp llama.h - llama-util.h ) target_include_directories(llama PUBLIC .) @@ -546,6 +549,10 @@ if (BUILD_SHARED_LIBS) install(TARGETS llama LIBRARY) endif() +# +# install +# + include(GNUInstallDirs) install( FILES convert.py @@ -584,6 +591,8 @@ endif() # programs, examples and tests # +add_subdirectory(common) + if (LLAMA_BUILD_TESTS AND NOT CMAKE_JS_VERSION) include(CTest) add_subdirectory(tests) diff --git a/Makefile b/Makefile index 502781c69..d31acc450 100644 --- a/Makefile +++ b/Makefile @@ -1,5 +1,5 @@ # Define the default target now so that it is always the first target -BUILD_TARGETS = main quantize quantize-stats perplexity embedding vdot train-text-from-scratch convert-llama2c-to-ggml simple server embd-input-test llama-bench +BUILD_TARGETS = main quantize quantize-stats perplexity embedding vdot train-text-from-scratch convert-llama2c-to-ggml simple server embd-input-test gguf llama-bench # Binaries only useful for tests TEST_TARGETS = tests/test-llama-grammar tests/test-grammar-parser tests/test-double-float tests/test-grad0 tests/test-opt tests/test-quantize-fns tests/test-quantize-perf tests/test-sampling tests/test-tokenizer-0 @@ -45,8 +45,8 @@ OPT = -Ofast else OPT = -O3 endif -CFLAGS = -I. $(OPT) -std=c11 -fPIC -CXXFLAGS = -I. -I./examples $(OPT) -std=c++11 -fPIC +CFLAGS = -I. $(OPT) -std=c11 -fPIC +CXXFLAGS = -I. -I./common $(OPT) -std=c++11 -fPIC LDFLAGS = ifdef LLAMA_DEBUG @@ -329,23 +329,23 @@ ggml-alloc.o: ggml-alloc.c ggml.h ggml-alloc.h OBJS += ggml-alloc.o -llama.o: llama.cpp ggml.h ggml-alloc.h ggml-cuda.h ggml-metal.h llama.h llama-util.h +llama.o: llama.cpp ggml.h ggml-alloc.h ggml-cuda.h ggml-metal.h llama.h $(CXX) $(CXXFLAGS) -c $< -o $@ -common.o: examples/common.cpp examples/common.h +common.o: common/common.cpp common/common.h $(CXX) $(CXXFLAGS) -c $< -o $@ -console.o: examples/console.cpp examples/console.h +console.o: common/console.cpp common/console.h $(CXX) $(CXXFLAGS) -c $< -o $@ -grammar-parser.o: examples/grammar-parser.cpp examples/grammar-parser.h +grammar-parser.o: common/grammar-parser.cpp common/grammar-parser.h $(CXX) $(CXXFLAGS) -c $< -o $@ libllama.so: llama.o ggml.o $(OBJS) $(CXX) $(CXXFLAGS) -shared -fPIC -o $@ $^ $(LDFLAGS) clean: - rm -vf *.o *.so *.dll main quantize quantize-stats perplexity embedding benchmark-matmult save-load-state server simple vdot train-text-from-scratch convert-llama2c-to-ggml embd-input-test llama-bench build-info.h $(TEST_TARGETS) + rm -vf *.o *.so *.dll main quantize quantize-stats perplexity embedding benchmark-matmult save-load-state server simple vdot train-text-from-scratch convert-llama2c-to-ggml embd-input-test gguf llama-bench build-info.h $(TEST_TARGETS) # # Examples @@ -385,7 +385,10 @@ $(LIB_PRE)embdinput$(DSO_EXT): examples/embd-input/embd-input.h examples/embd-in embd-input-test: $(LIB_PRE)embdinput$(DSO_EXT) examples/embd-input/embd-input-test.cpp build-info.h ggml.o llama.o common.o $(OBJS) $(CXX) $(CXXFLAGS) $(filter-out %$(DSO_EXT),$(filter-out %.h,$(filter-out %.hpp,$^))) -o $@ $(LDFLAGS) -L. -lembdinput -train-text-from-scratch: examples/train-text-from-scratch/train-text-from-scratch.cpp build-info.h ggml.o llama.o $(OBJS) +gguf: examples/gguf/gguf.cpp build-info.h ggml.o llama.o $(OBJS) + $(CXX) $(CXXFLAGS) $(filter-out %.h,$^) -o $@ $(LDFLAGS) + +train-text-from-scratch: examples/train-text-from-scratch/train-text-from-scratch.cpp build-info.h ggml.o llama.o common.o $(OBJS) $(CXX) $(CXXFLAGS) $(filter-out %.h,$^) -o $@ $(LDFLAGS) convert-llama2c-to-ggml: examples/convert-llama2c-to-ggml/convert-llama2c-to-ggml.cpp build-info.h ggml.o llama.o $(OBJS) @@ -418,7 +421,7 @@ vdot: pocs/vdot/vdot.cpp ggml.o $(OBJS) tests/test-llama-grammar: tests/test-llama-grammar.cpp build-info.h ggml.o llama.o common.o $(OBJS) $(CXX) $(CXXFLAGS) $(filter-out %.txt,$^) -o $@ $(LDFLAGS) -tests/test-grammar-parser: tests/test-grammar-parser.cpp examples/grammar-parser.cpp build-info.h ggml.o llama.o common.o $(OBJS) +tests/test-grammar-parser: tests/test-grammar-parser.cpp build-info.h ggml.o llama.o common.o $(OBJS) $(CXX) $(CXXFLAGS) $(filter-out %.txt,$^) -o $@ $(LDFLAGS) tests/test-double-float: tests/test-double-float.cpp build-info.h ggml.o llama.o common.o $(OBJS) diff --git a/README.md b/README.md index 9f8512dc5..82e070ac3 100644 --- a/README.md +++ b/README.md @@ -9,11 +9,17 @@ Inference of [LLaMA](https://arxiv.org/abs/2302.13971) model in pure C/C++ -### 🚧 Incoming breaking change + refactoring: +### Hot topics -See PR https://github.com/ggerganov/llama.cpp/pull/2398 for more info. +A new file format has been introduced: [GGUF](https://github.com/ggerganov/llama.cpp/pull/2398) -To devs: avoid making big changes to `llama.h` / `llama.cpp` until merged +Last revision compatible with the old format: [dadbed9](https://github.com/ggerganov/llama.cpp/commit/dadbed99e65252d79f81101a392d0d6497b86caa) + +### Current `master` should be considered in Beta - expect some issues for a few days! + +### Be prepared to re-convert and / or re-quantize your GGUF models while this notice is up! + +### Issues with non-GGUF models will be considered with low priority! ---- @@ -291,7 +297,7 @@ When built with Metal support, you can enable GPU inference with the `--gpu-laye Any value larger than 0 will offload the computation to the GPU. For example: ```bash -./main -m ./models/7B/ggml-model-q4_0.bin -n 128 -ngl 1 +./main -m ./models/7B/ggml-model-q4_0.gguf -n 128 -ngl 1 ``` ### MPI Build @@ -330,7 +336,7 @@ The above will distribute the computation across 2 processes on the first host a Finally, you're ready to run a computation using `mpirun`: ```bash -mpirun -hostfile hostfile -n 3 ./main -m ./models/7B/ggml-model-q4_0.bin -n 128 +mpirun -hostfile hostfile -n 3 ./main -m ./models/7B/ggml-model-q4_0.gguf -n 128 ``` ### BLAS Build @@ -513,10 +519,10 @@ python3 convert.py models/7B/ python convert.py models/7B/ --vocabtype bpe # quantize the model to 4-bits (using q4_0 method) -./quantize ./models/7B/ggml-model-f16.bin ./models/7B/ggml-model-q4_0.bin q4_0 +./quantize ./models/7B/ggml-model-f16.gguf ./models/7B/ggml-model-q4_0.gguf q4_0 # run the inference -./main -m ./models/7B/ggml-model-q4_0.bin -n 128 +./main -m ./models/7B/ggml-model-q4_0.gguf -n 128 ``` When running the larger models, make sure you have enough disk space to store all the intermediate files. @@ -572,7 +578,7 @@ Here is an example of a few-shot interaction, invoked with the command ./examples/chat-13B.sh # custom arguments using a 13B model -./main -m ./models/13B/ggml-model-q4_0.bin -n 256 --repeat_penalty 1.0 --color -i -r "User:" -f prompts/chat-with-bob.txt +./main -m ./models/13B/ggml-model-q4_0.gguf -n 256 --repeat_penalty 1.0 --color -i -r "User:" -f prompts/chat-with-bob.txt ``` Note the use of `--color` to distinguish between user input and generated text. Other parameters are explained in more detail in the [README](examples/main/README.md) for the `main` example program. @@ -635,6 +641,8 @@ OpenLLaMA is an openly licensed reproduction of Meta's original LLaMA model. It ### Using [GPT4All](https://github.com/nomic-ai/gpt4all) +*Note: these instructions are likely obsoleted by the GGUF update* + - Obtain the `tokenizer.model` file from LLaMA model and put it to `models` - Obtain the `added_tokens.json` file from Alpaca model and put it to `models` - Obtain the `gpt4all-lora-quantized.bin` file from GPT4All model and put it to `models/gpt4all-7B` @@ -710,7 +718,7 @@ If your issue is with model generation quality, then please at least scan the fo #### How to run 1. Download/extract: https://s3.amazonaws.com/research.metamind.io/wikitext/wikitext-2-raw-v1.zip?ref=salesforce-research -2. Run `./perplexity -m models/7B/ggml-model-q4_0.bin -f wiki.test.raw` +2. Run `./perplexity -m models/7B/ggml-model-q4_0.gguf -f wiki.test.raw` 3. Output: ``` perplexity : calculating perplexity over 655 chunks @@ -809,13 +817,13 @@ docker run -v /path/to/models:/models ghcr.io/ggerganov/llama.cpp:full --all-in- On completion, you are ready to play! ```bash -docker run -v /path/to/models:/models ghcr.io/ggerganov/llama.cpp:full --run -m /models/7B/ggml-model-q4_0.bin -p "Building a website can be done in 10 simple steps:" -n 512 +docker run -v /path/to/models:/models ghcr.io/ggerganov/llama.cpp:full --run -m /models/7B/ggml-model-q4_0.gguf -p "Building a website can be done in 10 simple steps:" -n 512 ``` or with a light image: ```bash -docker run -v /path/to/models:/models ghcr.io/ggerganov/llama.cpp:light -m /models/7B/ggml-model-q4_0.bin -p "Building a website can be done in 10 simple steps:" -n 512 +docker run -v /path/to/models:/models ghcr.io/ggerganov/llama.cpp:light -m /models/7B/ggml-model-q4_0.gguf -p "Building a website can be done in 10 simple steps:" -n 512 ``` ### Docker With CUDA @@ -846,8 +854,8 @@ The resulting images, are essentially the same as the non-CUDA images: After building locally, Usage is similar to the non-CUDA examples, but you'll need to add the `--gpus` flag. You will also want to use the `--n-gpu-layers` flag. ```bash -docker run --gpus all -v /path/to/models:/models local/llama.cpp:full-cuda --run -m /models/7B/ggml-model-q4_0.bin -p "Building a website can be done in 10 simple steps:" -n 512 --n-gpu-layers 1 -docker run --gpus all -v /path/to/models:/models local/llama.cpp:light-cuda -m /models/7B/ggml-model-q4_0.bin -p "Building a website can be done in 10 simple steps:" -n 512 --n-gpu-layers 1 +docker run --gpus all -v /path/to/models:/models local/llama.cpp:full-cuda --run -m /models/7B/ggml-model-q4_0.gguf -p "Building a website can be done in 10 simple steps:" -n 512 --n-gpu-layers 1 +docker run --gpus all -v /path/to/models:/models local/llama.cpp:light-cuda -m /models/7B/ggml-model-q4_0.gguf -p "Building a website can be done in 10 simple steps:" -n 512 --n-gpu-layers 1 ``` ### Contributing diff --git a/ci/run.sh b/ci/run.sh index 8dc394964..54ba6d710 100644 --- a/ci/run.sh +++ b/ci/run.sh @@ -159,17 +159,17 @@ function gg_run_open_llama_3b_v2 { python3 ../convert.py ${path_models} - model_f16="${path_models}/ggml-model-f16.bin" - model_q8_0="${path_models}/ggml-model-q8_0.bin" - model_q4_0="${path_models}/ggml-model-q4_0.bin" - model_q4_1="${path_models}/ggml-model-q4_1.bin" - model_q5_0="${path_models}/ggml-model-q5_0.bin" - model_q5_1="${path_models}/ggml-model-q5_1.bin" - model_q2_k="${path_models}/ggml-model-q2_k.bin" - model_q3_k="${path_models}/ggml-model-q3_k.bin" - model_q4_k="${path_models}/ggml-model-q4_k.bin" - model_q5_k="${path_models}/ggml-model-q5_k.bin" - model_q6_k="${path_models}/ggml-model-q6_k.bin" + model_f16="${path_models}/ggml-model-f16.gguf" + model_q8_0="${path_models}/ggml-model-q8_0.gguf" + model_q4_0="${path_models}/ggml-model-q4_0.gguf" + model_q4_1="${path_models}/ggml-model-q4_1.gguf" + model_q5_0="${path_models}/ggml-model-q5_0.gguf" + model_q5_1="${path_models}/ggml-model-q5_1.gguf" + model_q2_k="${path_models}/ggml-model-q2_k.gguf" + model_q3_k="${path_models}/ggml-model-q3_k.gguf" + model_q4_k="${path_models}/ggml-model-q4_k.gguf" + model_q5_k="${path_models}/ggml-model-q5_k.gguf" + model_q6_k="${path_models}/ggml-model-q6_k.gguf" wiki_test_60="${path_wiki}/wiki.test-60.raw" @@ -285,17 +285,17 @@ function gg_run_open_llama_7b_v2 { python3 ../convert.py ${path_models} - model_f16="${path_models}/ggml-model-f16.bin" - model_q8_0="${path_models}/ggml-model-q8_0.bin" - model_q4_0="${path_models}/ggml-model-q4_0.bin" - model_q4_1="${path_models}/ggml-model-q4_1.bin" - model_q5_0="${path_models}/ggml-model-q5_0.bin" - model_q5_1="${path_models}/ggml-model-q5_1.bin" - model_q2_k="${path_models}/ggml-model-q2_k.bin" - model_q3_k="${path_models}/ggml-model-q3_k.bin" - model_q4_k="${path_models}/ggml-model-q4_k.bin" - model_q5_k="${path_models}/ggml-model-q5_k.bin" - model_q6_k="${path_models}/ggml-model-q6_k.bin" + model_f16="${path_models}/ggml-model-f16.gguf" + model_q8_0="${path_models}/ggml-model-q8_0.gguf" + model_q4_0="${path_models}/ggml-model-q4_0.gguf" + model_q4_1="${path_models}/ggml-model-q4_1.gguf" + model_q5_0="${path_models}/ggml-model-q5_0.gguf" + model_q5_1="${path_models}/ggml-model-q5_1.gguf" + model_q2_k="${path_models}/ggml-model-q2_k.gguf" + model_q3_k="${path_models}/ggml-model-q3_k.gguf" + model_q4_k="${path_models}/ggml-model-q4_k.gguf" + model_q5_k="${path_models}/ggml-model-q5_k.gguf" + model_q6_k="${path_models}/ggml-model-q6_k.gguf" wiki_test="${path_wiki}/wiki.test.raw" diff --git a/common/CMakeLists.txt b/common/CMakeLists.txt new file mode 100644 index 000000000..dead56118 --- /dev/null +++ b/common/CMakeLists.txt @@ -0,0 +1,20 @@ +# common + +set(TARGET common) + +add_library(${TARGET} OBJECT + common.h + common.cpp + console.h + console.cpp + grammar-parser.h + grammar-parser.cpp + ) + +if (BUILD_SHARED_LIBS) + set_target_properties(${TARGET} PROPERTIES POSITION_INDEPENDENT_CODE ON) +endif() + +target_include_directories(${TARGET} PUBLIC .) +target_compile_features(${TARGET} PUBLIC cxx_std_11) +target_link_libraries(${TARGET} PRIVATE llama) diff --git a/examples/common.cpp b/common/common.cpp similarity index 92% rename from examples/common.cpp rename to common/common.cpp index bd39d9220..d7e1a5725 100644 --- a/examples/common.cpp +++ b/common/common.cpp @@ -170,18 +170,6 @@ bool gpt_params_parse(int argc, char ** argv, gpt_params & params) { break; } params.n_ctx = std::stoi(argv[i]); - } else if (arg == "-gqa" || arg == "--gqa") { - if (++i >= argc) { - invalid_param = true; - break; - } - params.n_gqa = std::stoi(argv[i]); - } else if (arg == "-eps" || arg == "--rms-norm-eps") { - if (++i >= argc) { - invalid_param = true; - break; - } - params.rms_norm_eps = std::stof(argv[i]); } else if (arg == "--rope-freq-base") { if (++i >= argc) { invalid_param = true; @@ -439,7 +427,7 @@ bool gpt_params_parse(int argc, char ** argv, gpt_params & params) { } params.hellaswag_tasks = std::stoi(argv[i]); } else if (arg == "--ignore-eos") { - params.logit_bias[llama_token_eos()] = -INFINITY; + params.ignore_eos = true; } else if (arg == "--no-penalize-nl") { params.penalize_nl = false; } else if (arg == "-l" || arg == "--logit-bias") { @@ -561,8 +549,6 @@ void gpt_print_usage(int /*argc*/, char ** argv, const gpt_params & params) { fprintf(stdout, " -n N, --n-predict N number of tokens to predict (default: %d, -1 = infinity, -2 = until context filled)\n", params.n_predict); fprintf(stdout, " -c N, --ctx-size N size of the prompt context (default: %d)\n", params.n_ctx); fprintf(stdout, " -b N, --batch-size N batch size for prompt processing (default: %d)\n", params.n_batch); - fprintf(stdout, " -gqa N, --gqa N grouped-query attention factor (TEMP!!! use 8 for LLaMAv2 70B) (default: %d)\n", params.n_gqa); - fprintf(stdout, " -eps N, --rms-norm-eps N rms norm eps (TEMP!!! use 1e-5 for LLaMAv2) (default: %.1e)\n", params.rms_norm_eps); fprintf(stdout, " --top-k N top-k sampling (default: %d, 0 = disabled)\n", params.top_k); fprintf(stdout, " --top-p N top-p sampling (default: %.1f, 1.0 = disabled)\n", (double)params.top_p); fprintf(stdout, " --tfs N tail free sampling, parameter z (default: %.1f, 1.0 = disabled)\n", (double)params.tfs_z); @@ -650,24 +636,15 @@ std::string gpt_random_prompt(std::mt19937 & rng) { return "The"; } -// TODO: not great allocating this every time -std::vector llama_tokenize(struct llama_context * ctx, const std::string & text, bool add_bos) { - // initialize to prompt numer of chars, since n_tokens <= n_prompt_chars - std::vector res(text.size() + (int) add_bos); - const int n = llama_tokenize(ctx, text.c_str(), res.data(), res.size(), add_bos); - assert(n >= 0); - res.resize(n); - - return res; -} +// +// Model utils +// struct llama_context_params llama_context_params_from_gpt_params(const gpt_params & params) { auto lparams = llama_context_default_params(); lparams.n_ctx = params.n_ctx; lparams.n_batch = params.n_batch; - lparams.n_gqa = params.n_gqa; - lparams.rms_norm_eps = params.rms_norm_eps; lparams.n_gpu_layers = params.n_gpu_layers; lparams.main_gpu = params.main_gpu; lparams.tensor_split = params.tensor_split; @@ -685,7 +662,7 @@ struct llama_context_params llama_context_params_from_gpt_params(const gpt_param return lparams; } -std::tuple llama_init_from_gpt_params(const gpt_params & params) { +std::tuple llama_init_from_gpt_params(gpt_params & params) { auto lparams = llama_context_params_from_gpt_params(params); llama_model * model = llama_load_model_from_file(params.model.c_str(), lparams); @@ -714,5 +691,77 @@ std::tuple llama_init_from_gpt_par } } + if (params.ignore_eos) { + params.logit_bias[llama_token_eos(lctx)] = -INFINITY; + } + return std::make_tuple(model, lctx); } + +// +// Vocab utils +// + +std::vector llama_tokenize( + struct llama_context * ctx, + const std::string & text, + bool add_bos) { + // upper limit for the number of tokens + int n_tokens = text.length() + add_bos; + std::vector result(n_tokens); + n_tokens = llama_tokenize(ctx, text.c_str(), result.data(), result.size(), add_bos); + if (n_tokens < 0) { + result.resize(-n_tokens); + int check = llama_tokenize(ctx, text.c_str(), result.data(), result.size(), add_bos); + GGML_ASSERT(check == -n_tokens); + } else { + result.resize(n_tokens); + } + return result; +} + +std::string llama_token_to_str(const struct llama_context * ctx, llama_token token) { + std::vector result(8, 0); + const int n_tokens = llama_token_to_str(ctx, token, result.data(), result.size()); + if (n_tokens < 0) { + result.resize(-n_tokens); + int check = llama_token_to_str(ctx, token, result.data(), result.size()); + GGML_ASSERT(check == -n_tokens); + } else { + result.resize(n_tokens); + } + + return std::string(result.data(), result.size()); +} + +std::vector llama_tokenize_bpe( + struct llama_context * ctx, + const std::string & text, + bool add_bos) { + int n_tokens = text.length() + add_bos; + std::vector result(n_tokens); + n_tokens = llama_tokenize_bpe(ctx, text.c_str(), result.data(), result.size(), add_bos); + if (n_tokens < 0) { + result.resize(-n_tokens); + int check = llama_tokenize_bpe(ctx, text.c_str(), result.data(), result.size(), add_bos); + GGML_ASSERT(check == -n_tokens); + } else { + result.resize(n_tokens); + } + return result; +} + +std::string llama_token_to_str_bpe(const struct llama_context * ctx, llama_token token) { + std::vector result(8, 0); + const int n_tokens = llama_token_to_str_bpe(ctx, token, result.data(), result.size()); + if (n_tokens < 0) { + result.resize(-n_tokens); + const int check = llama_token_to_str_bpe(ctx, token, result.data(), result.size()); + GGML_ASSERT(check == -n_tokens); + } else { + result.resize(n_tokens); + } + + return std::string(result.data(), result.size()); +} + diff --git a/examples/common.h b/common/common.h similarity index 88% rename from examples/common.h rename to common/common.h index 375bc0a3d..c50a6edfc 100644 --- a/examples/common.h +++ b/common/common.h @@ -22,19 +22,16 @@ struct gpt_params { int32_t n_predict = -1; // new tokens to predict int32_t n_ctx = 512; // context size int32_t n_batch = 512; // batch size for prompt processing (must be >=32 to use BLAS) - int32_t n_gqa = 1; // grouped-query attention factor (TODO: move to hparams) int32_t n_keep = 0; // number of tokens to keep from initial prompt int32_t n_chunks = -1; // max number of chunks to process (-1 = unlimited) int32_t n_gpu_layers = 0; // number of layers to store in VRAM int32_t main_gpu = 0; // the GPU that is used for scratch and small tensors float tensor_split[LLAMA_MAX_DEVICES] = {0}; // how split tensors should be distributed across GPUs int32_t n_probs = 0; // if greater than 0, output the probabilities of top n_probs tokens. - float rms_norm_eps = LLAMA_DEFAULT_RMS_EPS; // rms norm epsilon float rope_freq_base = 10000.0f; // RoPE base frequency float rope_freq_scale = 1.0f; // RoPE frequency scaling factor // sampling parameters - std::unordered_map logit_bias; // logit bias for specific tokens int32_t top_k = 40; // <= 0 to use vocab size float top_p = 0.95f; // 1.0 = disabled float tfs_z = 1.00f; // 1.0 = disabled @@ -48,12 +45,14 @@ struct gpt_params { float mirostat_tau = 5.00f; // target entropy float mirostat_eta = 0.10f; // learning rate + std::unordered_map logit_bias; // logit bias for specific tokens + // Classifier-Free Guidance // https://arxiv.org/abs/2306.17806 std::string cfg_negative_prompt; // string to help guidance float cfg_scale = 1.f; // How strong is guidance - std::string model = "models/7B/ggml-model.bin"; // model path + std::string model = "models/7B/ggml-model-f16.gguf"; // model path std::string model_alias = "unknown"; // model alias std::string prompt = ""; std::string path_prompt_cache = ""; // path to file for saving/loading prompt eval state @@ -83,6 +82,7 @@ struct gpt_params { bool simple_io = false; // improves compatibility with subprocesses and limited consoles bool input_prefix_bos = false; // prefix BOS to user inputs, preceding input_prefix + bool ignore_eos = false; // ignore generated EOS tokens bool instruct = false; // instruction mode (used for Alpaca models) bool penalize_nl = true; // consider newlines as a repeatable token bool perplexity = false; // compute perplexity over the prompt @@ -100,15 +100,31 @@ void gpt_print_usage(int argc, char ** argv, const gpt_params & params); std::string gpt_random_prompt(std::mt19937 & rng); -// -// Vocab utils -// - -std::vector llama_tokenize(struct llama_context * ctx, const std::string & text, bool add_bos); - // // Model utils // -std::tuple llama_init_from_gpt_params(const gpt_params & params); +std::tuple llama_init_from_gpt_params(gpt_params & params); struct llama_context_params llama_context_params_from_gpt_params(const gpt_params & params); + +// +// Vocab utils +// + +std::vector llama_tokenize( + struct llama_context * ctx, + const std::string & text, + bool add_bos); + +std::vector llama_tokenize_bpe( + struct llama_context * ctx, + const std::string & text, + bool add_bos); + +std::string llama_token_to_str( + const struct llama_context * ctx, + llama_token token); + +std::string llama_token_to_str_bpe( + const struct llama_context * ctx, + llama_token token); diff --git a/examples/console.cpp b/common/console.cpp similarity index 100% rename from examples/console.cpp rename to common/console.cpp diff --git a/examples/console.h b/common/console.h similarity index 100% rename from examples/console.h rename to common/console.h diff --git a/examples/grammar-parser.cpp b/common/grammar-parser.cpp similarity index 100% rename from examples/grammar-parser.cpp rename to common/grammar-parser.cpp diff --git a/examples/grammar-parser.h b/common/grammar-parser.h similarity index 100% rename from examples/grammar-parser.h rename to common/grammar-parser.h diff --git a/convert-falcon-hf-to-gguf.py b/convert-falcon-hf-to-gguf.py new file mode 100644 index 000000000..b3e190a0f --- /dev/null +++ b/convert-falcon-hf-to-gguf.py @@ -0,0 +1,282 @@ +# HF falcon--> gguf conversion + +import gguf +import os +import sys +import struct +import json +import numpy as np +import torch + +from typing import Any, List +from pathlib import Path +from transformers import AutoTokenizer + +def bytes_to_unicode(): + # ref: https://github.com/openai/gpt-2/blob/master/src/encoder.py + """ + Returns list of utf-8 byte and a corresponding list of unicode strings. + The reversible bpe codes work on unicode strings. + This means you need a large # of unicode characters in your vocab if you want to avoid UNKs. + When you're at something like a 10B token dataset you end up needing around 5K for decent coverage. + This is a significant percentage of your normal, say, 32K bpe vocab. + To avoid that, we want lookup tables between utf-8 bytes and unicode strings. + And avoids mapping to whitespace/control characters the bpe code barfs on. + """ + bs = list(range(ord("!"), ord("~")+1))+list(range(ord("¡"), ord("¬")+1))+list(range(ord("®"), ord("ÿ")+1)) + cs = bs[:] + n = 0 + for b in range(2**8): + if b not in bs: + bs.append(b) + cs.append(2**8+n) + n += 1 + cs = [chr(n) for n in cs] + return dict(zip(bs, cs)) + + +def count_model_parts(dir_model: str) -> int: + num_parts = 0 + for filename in os.listdir(dir_model): + if filename.startswith("pytorch_model-"): + num_parts += 1 + + if num_parts > 0: + print("gguf: found " + str(num_parts) + " model parts") + return num_parts + + +if len(sys.argv) < 3: + print("Usage: convert-h5-to-ggml.py dir-model ftype\n") + print(" ftype == 0 -> float32") + print(" ftype == 1 -> float16") + sys.exit(1) + + +# output in the same directory as the model +dir_model = sys.argv[1] +last_dir = os.path.basename(os.path.normpath(dir_model)) + +# possible tensor data types +# ftype == 0 -> float32 +# ftype == 1 -> float16 + +# map from ftype to string +ftype_str = ["f32", "f16"] + +ftype = 1 +if len(sys.argv) > 2: + ftype = int(sys.argv[2]) + if ftype < 0 or ftype > 1: + print("Invalid ftype: " + str(ftype)) + + sys.exit(1) + +fname_out = sys.argv[1] + "/ggml-model-" + ftype_str[ftype] + ".gguf" + +print("gguf: loading model "+last_dir) + +with open(dir_model + "/config.json", "r", encoding="utf-8") as f: + hparams = json.load(f) + +if hparams["architectures"][0] != "RWForCausalLM": + print("Model architecture not supported: " + hparams["architectures"][0]) + + sys.exit() + +# get number of model parts +num_parts = count_model_parts(dir_model) + +ARCH=gguf.MODEL_ARCH.FALCON +gguf_writer = gguf.GGUFWriter(fname_out, gguf.MODEL_ARCH_NAMES[ARCH]) + +print("gguf: get model metadata") + +block_count = hparams["n_layer"] + +gguf_writer.add_name(last_dir) +gguf_writer.add_context_length(2048) # not in config.json +gguf_writer.add_tensor_data_layout("jploski") # qkv tensor transform +gguf_writer.add_embedding_length(hparams["hidden_size"]) +gguf_writer.add_feed_forward_length(4 * hparams["hidden_size"]) +gguf_writer.add_block_count(block_count) +gguf_writer.add_head_count(hparams["n_head"]) +if "n_head_kv" in hparams: gguf_writer.add_head_count_kv(hparams["n_head_kv"]) +gguf_writer.add_layer_norm_eps(hparams["layer_norm_epsilon"]) + +# TOKENIZATION + +print("gguf: get tokenizer metadata") + +tokens: List[str] = [] +merges: List[str] = [] + + +if Path(dir_model + "/tokenizer.json").is_file(): + # gpt2 tokenizer + gguf_writer.add_tokenizer_model("gpt2") + + print("gguf: get gpt2 tokenizer merges") + + with open(dir_model + "/tokenizer.json", "r", encoding="utf-8") as f: + tokenizer_json = json.load(f) + merges = tokenizer_json["model"]["merges"] + + gguf_writer.add_token_merges(merges) + + print("gguf: get gpt2 tokenizer vocab") + + vocab_size = len(tokenizer_json["model"]["vocab"]) + + # ref: https://github.com/cmp-nct/ggllm.cpp/blob/master/falcon_convert.py + tokenizer = AutoTokenizer.from_pretrained(dir_model) + + reverse_vocab = {id: encoded_tok for encoded_tok, id in tokenizer.vocab.items()} + byte_encoder = bytes_to_unicode() + byte_decoder = {v: k for k, v in byte_encoder.items()} + + for i in range(vocab_size): + if i in reverse_vocab: + try: + text = bytearray([byte_decoder[c] for c in reverse_vocab[i]]) + except KeyError: + text = bytearray() + for c in reverse_vocab[i]: + if ord(c) < 256: # single byte character + text.append(byte_decoder[ord(c)]) + else: # multibyte special token character + text.extend(c.encode('utf-8')) + else: + print(f"Key {i} not in tokenizer vocabulary. Padding with an arbitrary token.") + pad_token = f"[PAD{i}]".encode("utf8") + text = bytearray(pad_token) + + tokens.append(text) + + gguf_writer.add_token_list(tokens) + + if "added_tokens" in tokenizer_json and Path(dir_model + "/tokenizer_config.json").is_file(): + print("gguf: get special token ids") + + with open(dir_model + "/tokenizer_config.json", "r", encoding="utf-8") as f: + tokenizer_config = json.load(f) + + # find special token ids + + if "bos_token" in tokenizer_config: + for key in tokenizer_json["added_tokens"]: + if key["content"] == tokenizer_config["bos_token"]: + gguf_writer.add_bos_token_id(key["id"]) + + if "eos_token" in tokenizer_config: + for key in tokenizer_json["added_tokens"]: + if key["content"] == tokenizer_config["eos_token"]: + gguf_writer.add_eos_token_id(key["id"]) + + if "unk_token" in tokenizer_config: + for key in tokenizer_json["added_tokens"]: + if key["content"] == tokenizer_config["unk_token"]: + gguf_writer.add_unk_token_id(key["id"]) + + if "sep_token" in tokenizer_config: + for key in tokenizer_json["added_tokens"]: + if key["content"] == tokenizer_config["sep_token"]: + gguf_writer.add_sep_token_id(key["id"]) + + if "pad_token" in tokenizer_config: + for key in tokenizer_json["added_tokens"]: + if key["content"] == tokenizer_config["pad_token"]: + gguf_writer.add_pad_token_id(key["id"]) + + +# TENSORS + +tensor_map = gguf.get_tensor_name_map(ARCH,block_count) + +# params for qkv transform +n_head = hparams["n_head"] +n_head_kv = hparams["n_head_kv"] if "n_head_kv" in hparams else 1 +head_dim = hparams["hidden_size"] // n_head + +# tensor info +print("gguf: get tensor metadata") + +if num_parts == 0: + part_names = ("pytorch_model.bin",) +else: + part_names = ( + f"pytorch_model-{n:05}-of-{num_parts:05}.bin" for n in range(1, num_parts + 1) + ) + +for part_name in part_names: + print("gguf: loading model part '" + part_name + "'") + model_part = torch.load(f"{dir_model}/{part_name}", map_location="cpu") + + for name in model_part.keys(): + data = model_part[name] + + old_dtype = data.dtype + + # convert any unsupported data types to float32 + if data.dtype != torch.float16 and data.dtype != torch.float32: + data = data.to(torch.float32) + + # QKV tensor transform + # The original query_key_value tensor contains n_head_kv "kv groups", + # each consisting of n_head/n_head_kv query weights followed by one key + # and one value weight (shared by all query heads in the kv group). + # This layout makes it a big pain to work with in GGML. + # So we rearrange them here,, so that we have n_head query weights + # followed by n_head_kv key weights followed by n_head_kv value weights, + # in contiguous fashion. + # ref: https://github.com/jploski/ggml/blob/falcon40b/examples/falcon/convert-hf-to-ggml.py + + if "query_key_value" in name: + qkv = data.view(n_head_kv, n_head // n_head_kv + 2, head_dim, head_dim * n_head) + q = qkv[:, :-2 ].reshape(n_head * head_dim, head_dim * n_head) + k = qkv[:, [-2]].reshape(n_head_kv * head_dim, head_dim * n_head) + v = qkv[:, [-1]].reshape(n_head_kv * head_dim, head_dim * n_head) + data = torch.cat((q,k,v)).reshape_as(data) + + data = data.squeeze().numpy() + + # map tensor names + if name.endswith(".weight") and name[:-7] in tensor_map: + name = tensor_map[name[:-7]] + ".weight" + elif name.endswith(".bias") and name[:-5] in tensor_map: + name = tensor_map[name[:-5]] + ".bias" + else: + print("Can not map tensor '" + name + "'") + sys.exit() + + n_dims = len(data.shape) + data_dtype = data.dtype + + # if f32 desired, convert any float16 to float32 + if ftype == 0 and data_dtype == np.float16: + data = data.astype(np.float32) + + # TODO: Why cant we use these float16 as-is? There should be not reason to store float16 as float32 + if ftype == 1 and data_dtype == np.float16 and n_dims == 1: + data = data.astype(np.float32) + + # if f16 desired, convert any float32 2-dim weight tensors to float16 + if ftype == 1 and data_dtype == np.float32 and name.endswith(".weight") and n_dims == 2: + data = data.astype(np.float16) + + print(name + ", n_dims = " + str(n_dims) + ", " + str(old_dtype) + " --> " + str(data.dtype)) + + gguf_writer.add_tensor(name, data) + + +print("gguf: write header") +gguf_writer.write_header_to_file() +print("gguf: write metadata") +gguf_writer.write_kv_data_to_file() +print("gguf: write tensors") +gguf_writer.write_tensors_to_file() + +gguf_writer.close() + +print("gguf: model successfully exported to '" + fname_out + "'") +print("") diff --git a/convert-gptneox-hf-to-gguf.py b/convert-gptneox-hf-to-gguf.py new file mode 100644 index 000000000..a7cefc6f3 --- /dev/null +++ b/convert-gptneox-hf-to-gguf.py @@ -0,0 +1,266 @@ +# HF gptneox--> gguf conversion + +import gguf +import os +import sys +import struct +import json +import numpy as np +import torch + +from typing import Any, List +from pathlib import Path +from transformers import AutoTokenizer + +# ref: https://github.com/openai/gpt-2/blob/master/src/encoder.py + + +def bytes_to_unicode(): + """ + Returns list of utf-8 byte and a corresponding list of unicode strings. + The reversible bpe codes work on unicode strings. + This means you need a large # of unicode characters in your vocab if you want to avoid UNKs. + When you're at something like a 10B token dataset you end up needing around 5K for decent coverage. + This is a significant percentage of your normal, say, 32K bpe vocab. + To avoid that, we want lookup tables between utf-8 bytes and unicode strings. + And avoids mapping to whitespace/control characters the bpe code barfs on. + """ + bs = list(range(ord("!"), ord("~")+1))+list(range(ord("¡"), ord("¬")+1))+list(range(ord("®"), ord("ÿ")+1)) + cs = bs[:] + n = 0 + for b in range(2**8): + if b not in bs: + bs.append(b) + cs.append(2**8+n) + n += 1 + cs = [chr(n) for n in cs] + return dict(zip(bs, cs)) + + +def count_model_parts(dir_model: str) -> int: + num_parts = 0 + for filename in os.listdir(dir_model): + if filename.startswith("pytorch_model-"): + num_parts += 1 + + if num_parts > 0: + print("gguf: found " + str(num_parts) + " model parts") + return num_parts + + +if len(sys.argv) < 3: + print("Usage: convert-h5-to-ggml.py dir-model ftype\n") + print(" ftype == 0 -> float32") + print(" ftype == 1 -> float16") + sys.exit(1) + + +# output in the same directory as the model +dir_model = sys.argv[1] +last_dir = os.path.basename(os.path.normpath(dir_model)) + +# possible tensor data types +# ftype == 0 -> float32 +# ftype == 1 -> float16 + +# map from ftype to string +ftype_str = ["f32", "f16"] + +ftype = 1 +if len(sys.argv) > 2: + ftype = int(sys.argv[2]) + if ftype < 0 or ftype > 1: + print("Invalid ftype: " + str(ftype)) + + sys.exit(1) + +fname_out = sys.argv[1] + "/ggml-model-" + ftype_str[ftype] + ".gguf" + +print("gguf: loading model "+last_dir) + +with open(dir_model + "/config.json", "r", encoding="utf-8") as f: + hparams = json.load(f) + +if hparams["architectures"][0] != "GPTNeoXForCausalLM": + print("Model architecture not supported: " + hparams["architectures"][0]) + + sys.exit() + +# get number of model parts +num_parts = count_model_parts(dir_model) + +ARCH=gguf.MODEL_ARCH.GPTNEOX +gguf_writer = gguf.GGUFWriter(fname_out, gguf.MODEL_ARCH_NAMES[ARCH]) + +print("gguf: get model metadata") + +block_count = hparams["num_hidden_layers"] + +gguf_writer.add_name(last_dir) +gguf_writer.add_context_length(hparams["max_position_embeddings"]) +gguf_writer.add_embedding_length(hparams["hidden_size"]) +gguf_writer.add_block_count(block_count) +gguf_writer.add_feed_forward_length(hparams["intermediate_size"]) +gguf_writer.add_rope_dimension_count(int(hparams["rotary_pct"]*(hparams["hidden_size"]//hparams["num_attention_heads"]))) +gguf_writer.add_head_count(hparams["num_attention_heads"]) +gguf_writer.add_parallel_residual(hparams["use_parallel_residual"] if "use_parallel_residual" in hparams else True) +gguf_writer.add_layer_norm_eps(hparams["layer_norm_eps"]) + +# TOKENIZATION + +print("gguf: get tokenizer metadata") + +tokens: List[str] = [] +merges: List[str] = [] + + +if Path(dir_model + "/tokenizer.json").is_file(): + # gpt2 tokenizer + gguf_writer.add_tokenizer_model("gpt2") + + print("gguf: get gpt2 tokenizer merges") + + with open(dir_model + "/tokenizer.json", "r", encoding="utf-8") as f: + tokenizer_json = json.load(f) + merges = tokenizer_json["model"]["merges"] + + gguf_writer.add_token_merges(merges) + + print("gguf: get gpt2 tokenizer vocab") + + vocab_size = len(tokenizer_json["model"]["vocab"]) + + # ref: https://github.com/cmp-nct/ggllm.cpp/blob/master/falcon_convert.py + tokenizer = AutoTokenizer.from_pretrained(dir_model) + + reverse_vocab = {id: encoded_tok for encoded_tok, id in tokenizer.vocab.items()} + byte_encoder = bytes_to_unicode() + byte_decoder = {v: k for k, v in byte_encoder.items()} + + for i in range(vocab_size): + if i in reverse_vocab: + try: + text = bytearray([byte_decoder[c] for c in reverse_vocab[i]]) + except KeyError: + text = bytearray() + for c in reverse_vocab[i]: + if ord(c) < 256: # single byte character + text.append(byte_decoder[ord(c)]) + else: # multibyte special token character + text.extend(c.encode('utf-8')) + else: + print(f"Key {i} not in tokenizer vocabulary. Padding with an arbitrary token.") + pad_token = f"[PAD{i}]".encode("utf8") + text = bytearray(pad_token) + + tokens.append(text) + + gguf_writer.add_token_list(tokens) + + if "added_tokens" in tokenizer_json and Path(dir_model + "/tokenizer_config.json").is_file(): + print("gguf: get special token ids") + + with open(dir_model + "/tokenizer_config.json", "r", encoding="utf-8") as f: + tokenizer_config = json.load(f) + + # find special token ids + + if "bos_token" in tokenizer_config: + for key in tokenizer_json["added_tokens"]: + if key["content"] == tokenizer_config["bos_token"]: + gguf_writer.add_bos_token_id(key["id"]) + + if "eos_token" in tokenizer_config: + for key in tokenizer_json["added_tokens"]: + if key["content"] == tokenizer_config["eos_token"]: + gguf_writer.add_eos_token_id(key["id"]) + + if "unk_token" in tokenizer_config: + for key in tokenizer_json["added_tokens"]: + if key["content"] == tokenizer_config["unk_token"]: + gguf_writer.add_unk_token_id(key["id"]) + + if "sep_token" in tokenizer_config: + for key in tokenizer_json["added_tokens"]: + if key["content"] == tokenizer_config["sep_token"]: + gguf_writer.add_sep_token_id(key["id"]) + + if "pad_token" in tokenizer_config: + for key in tokenizer_json["added_tokens"]: + if key["content"] == tokenizer_config["pad_token"]: + gguf_writer.add_pad_token_id(key["id"]) + + +# TENSORS + +tensor_map = gguf.get_tensor_name_map(ARCH,block_count) + +# tensor info +print("gguf: get tensor metadata") + +if num_parts == 0: + part_names = ("pytorch_model.bin",) +else: + part_names = ( + f"pytorch_model-{n:05}-of-{num_parts:05}.bin" for n in range(1, num_parts + 1) + ) + +for part_name in part_names: + print("gguf: loading model part '" + part_name + "'") + model_part = torch.load(f"{dir_model}/{part_name}", map_location="cpu") + + for name in model_part.keys(): + data = model_part[name] + + # we don't need these + if name.endswith(".attention.masked_bias") or name.endswith(".attention.bias") or name.endswith(".attention.rotary_emb.inv_freq"): + continue + + old_dtype = data.dtype + + # convert any unsupported data types to float32 + if data.dtype != torch.float16 and data.dtype != torch.float32: + data = data.to(torch.float32) + + data = data.squeeze().numpy() + + # map tensor names + if name.endswith(".weight") and name[:-7] in tensor_map: + name = tensor_map[name[:-7]] + ".weight" + elif name.endswith(".bias") and name[:-5] in tensor_map: + name = tensor_map[name[:-5]] + ".bias" + else: + print("Can not map tensor '" + name + "'") + sys.exit() + + n_dims = len(data.shape) + data_dtype = data.dtype + + # if f32 desired, convert any float16 to float32 + if ftype == 0 and data_dtype == np.float16: + data = data.astype(np.float32) + + # TODO: Why cant we use these float16 as-is? There should be not reason to store float16 as float32 + if ftype == 1 and data_dtype == np.float16 and n_dims == 1: + data = data.astype(np.float32) + + # if f16 desired, convert any float32 2-dim weight tensors to float16 + if ftype == 1 and data_dtype == np.float32 and name.endswith(".weight") and n_dims == 2: + data = data.astype(np.float16) + + print(name + ", n_dims = " + str(n_dims) + ", " + str(old_dtype) + " --> " + str(data.dtype)) + + gguf_writer.add_tensor(name, data) + + +print("gguf: write header") +gguf_writer.write_header_to_file() +print("gguf: write metadata") +gguf_writer.write_kv_data_to_file() +print("gguf: write tensors") +gguf_writer.write_tensors_to_file() + +gguf_writer.close() + +print("gguf: model successfully exported to '" + fname_out + "'") +print("") diff --git a/convert-llama-7b-pth-to-gguf.py b/convert-llama-7b-pth-to-gguf.py new file mode 100644 index 000000000..ab5c80b69 --- /dev/null +++ b/convert-llama-7b-pth-to-gguf.py @@ -0,0 +1,307 @@ +# 7b pth llama --> gguf conversion +# Only models with a single datafile are supported, like 7B +# HF files required in the model dir: config.json tokenizer_config.json tokenizer.json tokenizer.model + +import gguf +import os +import sys +import struct +import json +import numpy as np +import torch + +from typing import Any, List +from pathlib import Path +from sentencepiece import SentencePieceProcessor + +#NDArray = np.ndarray[Any, Any] +# compatible with python < 3.9 +NDArray: 'TypeAlias' = 'np.ndarray[Any, Any]' + + +def count_model_parts(dir_model: str) -> int: + num_parts = 0 + for filename in os.listdir(dir_model): + if filename.startswith("consolidated."): + num_parts += 1 + + if num_parts > 0: + print("gguf: found " + str(num_parts) + " model parts") + return num_parts + + +if len(sys.argv) < 3: + print("Usage: convert-h5-to-ggml.py dir-model ftype\n") + print(" ftype == 0 -> float32") + print(" ftype == 1 -> float16") + + sys.exit(1) + + +# output in the same directory as the model +dir_model = sys.argv[1] +last_dir = os.path.basename(os.path.normpath(dir_model)) + + +# possible tensor data types +# ftype == 0 -> float32 +# ftype == 1 -> float16 + +# map from ftype to string +ftype_str = ["f32", "f16"] + +ftype = 1 +if len(sys.argv) > 2: + ftype = int(sys.argv[2]) + if ftype < 0 or ftype > 1: + print("Invalid ftype: " + str(ftype)) + + sys.exit(1) + +fname_out = sys.argv[1] + "/ggml-model-" + ftype_str[ftype] + ".gguf" + +print("gguf: loading model "+last_dir) + +with open(dir_model + "/config.json", "r", encoding="utf-8") as f: + hparams = json.load(f) + +if hparams["architectures"][0] != "LlamaForCausalLM": + print("Model architecture not supported: " + hparams["architectures"][0]) + sys.exit() + +# get number of model parts +num_parts = count_model_parts(dir_model) + +if num_parts > 1: + print("gguf: Only models with a single datafile are supported.") + + sys.exit() + +ARCH=gguf.MODEL_ARCH.LLAMA +gguf_writer = gguf.GGUFWriter(fname_out, gguf.MODEL_ARCH_NAMES[ARCH]) + + +print("gguf: get model metadata") + +block_count = hparams["num_hidden_layers"] +head_count = hparams["num_attention_heads"] + +if "num_key_value_heads" in hparams: + head_count_kv = hparams["num_key_value_heads"] +else: + head_count_kv = head_count + +if "_name_or_path" in hparams: + hf_repo = hparams["_name_or_path"] +else: + hf_repo = "" + +if "max_sequence_length" in hparams: + ctx_length = hparams["max_sequence_length"] +elif "max_position_embeddings" in hparams: + ctx_length = hparams["max_position_embeddings"] +else: + print("gguf: can not find ctx length parameter.") + + sys.exit() + + +gguf_writer.add_name(last_dir) +gguf_writer.add_source_hf_repo(hf_repo) +gguf_writer.add_tensor_data_layout("Meta AI original pth") +gguf_writer.add_context_length(ctx_length) +gguf_writer.add_embedding_length(hparams["hidden_size"]) +gguf_writer.add_block_count(block_count) +gguf_writer.add_feed_forward_length(hparams["intermediate_size"]) +gguf_writer.add_rope_dimension_count(hparams["hidden_size"] // hparams["num_attention_heads"]) +gguf_writer.add_head_count(head_count) +gguf_writer.add_head_count_kv(head_count_kv) +gguf_writer.add_layer_norm_rms_eps(hparams["rms_norm_eps"]) + +if "rope_scaling" in hparams and hparams["rope_scaling"] != None and "factor" in hparams["rope_scaling"]: + if "type" in hparams["rope_scaling"]: + if hparams["rope_scaling"]["type"] == "linear": + gguf_writer.add_rope_scale_linear(hparams["rope_scaling"]["factor"]) + + +# TOKENIZATION + +print("gguf: get tokenizer metadata") + +tokens: List[bytes] = [] +scores: List[float] = [] +toktypes: List[int] = [] + +if Path(dir_model + "/tokenizer.model").is_file(): + # vocab type sentencepiece + print("gguf: get sentencepiece tokenizer vocab and scores") + + tokenizer = SentencePieceProcessor(dir_model + "/tokenizer.model") + + for i in range(tokenizer.vocab_size()): + text: bytes + score: float + + piece = tokenizer.id_to_piece(i) + text = piece.encode("utf-8") + score = tokenizer.get_score(i) + + toktype = 1 # defualt to normal token type + if tokenizer.is_unknown(i): + toktype = 2 + if tokenizer.is_control(i): + toktype = 3 + + # toktype = 4 is user-defined = tokens from added_tokens.json + + if tokenizer.is_unused(i): + toktype = 5 + if tokenizer.is_byte(i): + toktype = 6 + + tokens.append(text) + scores.append(score) + toktypes.append(toktype) + + if Path(dir_model + "/added_tokens.json").is_file(): + with open(dir_model + "/added_tokens.json", "r", encoding="utf-8") as f: + addtokens_json = json.load(f) + + print("gguf: get added tokens") + + for key in addtokens_json: + tokens.append( key.encode("utf-8") ) + scores.append(-1000.0) + toktypes.append(4) # user-defined token type + + gguf_writer.add_tokenizer_model("llama") + gguf_writer.add_token_list(tokens) + gguf_writer.add_token_scores(scores) + gguf_writer.add_token_types(toktypes) + + +print("gguf: get special token ids") + +if Path(dir_model + "/tokenizer.json").is_file(): + # Look for special tokens in tokenizer.json if it exists + + with open(dir_model + "/tokenizer.json", "r", encoding="utf-8") as f: + tokenizer = json.load(f) + + if "added_tokens" in tokenizer and Path(dir_model + "/tokenizer_config.json").is_file(): + + with open(dir_model + "/tokenizer_config.json", "r", encoding="utf-8") as f: + tokenizer_config = json.load(f) + + if "bos_token" in tokenizer_config and tokenizer_config["bos_token"] != None: + for key in tokenizer["added_tokens"]: + if key["content"] == tokenizer_config["bos_token"]["content"]: + gguf_writer.add_bos_token_id(key["id"]) + + if "eos_token" in tokenizer_config and tokenizer_config["eos_token"] != None: + for key in tokenizer["added_tokens"]: + if key["content"] == tokenizer_config["eos_token"]["content"]: + gguf_writer.add_eos_token_id(key["id"]) + + if "unk_token" in tokenizer_config and tokenizer_config["unk_token"] != None: + for key in tokenizer["added_tokens"]: + if key["content"] == tokenizer_config["unk_token"]["content"]: + gguf_writer.add_unk_token_id(key["id"]) + + if "sep_token" in tokenizer_config and tokenizer_config["sep_token"] != None: + for key in tokenizer["added_tokens"]: + if key["content"] == tokenizer_config["sep_token"]["content"]: + gguf_writer.add_sep_token_id(key["id"]) + + if "pad_token" in tokenizer_config and tokenizer_config["pad_token"] != None: + for key in tokenizer["added_tokens"]: + if key["content"] == tokenizer_config["pad_token"]["content"]: + gguf_writer.add_pad_token_id(key["id"]) +else: + # If no tokenizer.json: Look for special tokens in config.json + + if "bos_token_id" in hparams and hparams["bos_token_id"] != None: + gguf_writer.add_bos_token_id(hparams["bos_token_id"]) + + if "eos_token_id" in hparams and hparams["eos_token_id"] != None: + gguf_writer.add_eos_token_id(hparams["eos_token_id"]) + + if "unk_token_id" in hparams and hparams["unk_token_id"] != None: + gguf_writer.add_unk_token_id(hparams["unk_token_id"]) + + if "sep_token_id" in hparams and hparams["sep_token_id"] != None: + gguf_writer.add_sep_token_id(hparams["sep_token_id"]) + + if "pad_token_id" in hparams and hparams["pad_token_id"] != None: + gguf_writer.add_pad_token_id(hparams["pad_token_id"]) + + +# TENSORS + +tensor_map = gguf.get_tensor_name_map(ARCH,block_count) + +# tensor info +print("gguf: get tensor metadata") + +part_names = (f"consolidated.{n:02}.pth" for n in range(0, num_parts)) + +for part_name in part_names: + print("gguf: loading model part '" + part_name + "'") + model_part = torch.load(f"{dir_model}/{part_name}", map_location="cpu") + + for name in model_part.keys(): + data = model_part[name] + + # we don't need these + if name == "rope.freqs": + continue + + old_dtype = data.dtype + + # convert any unsupported data types to float32 + if data.dtype != torch.float16 and data.dtype != torch.float32: + data = data.to(torch.float32) + + data = data.squeeze().numpy() + + # map tensor names + if name.endswith(".weight") and name[:-7] in tensor_map: + name = tensor_map[name[:-7]] + ".weight" + elif name.endswith(".bias") and name[:-5] in tensor_map: + name = tensor_map[name[:-5]] + ".bias" + else: + print("Can not map tensor '" + name + "'") + sys.exit() + + n_dims = len(data.shape) + data_dtype = data.dtype + + # if f32 desired, convert any float16 to float32 + if ftype == 0 and data_dtype == np.float16: + data = data.astype(np.float32) + + # TODO: Why cant we use these float16 as-is? There should be not reason to store float16 as float32 + if ftype == 1 and data_dtype == np.float16 and n_dims == 1: + data = data.astype(np.float32) + + # if f16 desired, convert any float32 2-dim weight tensors to float16 + if ftype == 1 and data_dtype == np.float32 and name.endswith(".weight") and n_dims == 2: + data = data.astype(np.float16) + + print(name + ", n_dims = " + str(n_dims) + ", " + str(old_dtype) + " --> " + str(data.dtype)) + + gguf_writer.add_tensor(name, data) + + +print("gguf: write header") +gguf_writer.write_header_to_file() +print("gguf: write metadata") +gguf_writer.write_kv_data_to_file() +print("gguf: write tensors") +gguf_writer.write_tensors_to_file() + +gguf_writer.close() + + +print("gguf: model successfully exported to '" + fname_out + "'") +print("") diff --git a/convert-llama-ggmlv3-to-gguf.py b/convert-llama-ggmlv3-to-gguf.py new file mode 100644 index 000000000..30038072f --- /dev/null +++ b/convert-llama-ggmlv3-to-gguf.py @@ -0,0 +1,334 @@ +import sys, struct, math, argparse +from pathlib import Path + +import numpy as np + +import gguf + +# Note: Does not support GGML_QKK_64 +QK_K = 256 +# Items here are (block size, type size) +GGML_QUANT_SIZES = { + gguf.GGMLQuantizationType.F32 : (1, 4), + gguf.GGMLQuantizationType.F16 : (1, 2), + gguf.GGMLQuantizationType.Q4_0 : (32, 2 + 16), + gguf.GGMLQuantizationType.Q4_1 : (32, 2 + 2 + 16), + gguf.GGMLQuantizationType.Q5_0 : (32, 2 + 4 + 16), + gguf.GGMLQuantizationType.Q5_1 : (32, 2 + 2 + 4 + 16), + gguf.GGMLQuantizationType.Q8_0 : (32, 2 + 32), + gguf.GGMLQuantizationType.Q8_1 : (32, 4 + 4 + 32), + gguf.GGMLQuantizationType.Q2_K : (256, 2 + 2 + QK_K // 16 + QK_K // 4), + gguf.GGMLQuantizationType.Q3_K : (256, 2 + QK_K // 4 + QK_K // 8 + 12), + gguf.GGMLQuantizationType.Q4_K : (256, 2 + 2 + QK_K // 2 + 12), + gguf.GGMLQuantizationType.Q5_K : (256, 2 + 2 + QK_K // 2 + QK_K // 8 + 12), + gguf.GGMLQuantizationType.Q6_K : (256, 2 + QK_K // 2 + QK_K // 4 + QK_K // 16), + gguf.GGMLQuantizationType.Q8_K : (256, 4 + QK_K + QK_K // 8), +} + +class Hyperparameters: + def __init__(self): + self.n_vocab = self.n_embd = self.n_mult = self.n_head = self.n_layer = self.n_rot = self.ftype = 0 + self.n_ff = 0 + + def set_n_ff(self, model): + ff_tensor_idx = model.tensor_map.get(b'layers.0.feed_forward.w1.weight') + assert ff_tensor_idx is not None, 'Missing layer 0 FF tensor' + ff_tensor = model.tensors[ff_tensor_idx] + self.n_ff = ff_tensor.dims[1] + + def load(self, data, offset): + ( + self.n_vocab, + self.n_embd, + self.n_mult, + self.n_head, + self.n_layer, + self.n_rot, + self.ftype, + ) = struct.unpack('<7I', data[offset:offset + (4 * 7)]) + return 4 * 7 + + def __str__(self): + return f'' + +class Vocab: + def __init__(self): + self.items = [] + + def load(self, data, offset, n_vocab): + orig_offset = offset + for _ in range(n_vocab): + itemlen = struct.unpack('= 0 and n_dims <= 4, f'Invalid tensor dimensions {n_dims}' + assert name_len < 4096, 'Absurd tensor name length' + quant = GGML_QUANT_SIZES.get(dtype) + assert quant is not None, 'Unknown tensor type' + (blksize, tysize) = quant + offset += 12 + self.dtype= dtype + self.dims = struct.unpack(f'<{n_dims}I', data[offset:offset + (4 * n_dims)]) + offset += 4 * n_dims + self.name = bytes(data[offset:offset + name_len]) + offset += name_len + pad = ((offset + 31) & ~31) - offset + offset += pad + n_elems = np.prod(self.dims) + n_bytes = (n_elems * tysize) // blksize + self.start_offset = offset + self.len_bytes = n_bytes + offset += n_bytes + # print(n_dims, name_len, dtype, self.dims, self.name, pad) + return offset - orig_offset + +class GGMLV3Model: + def __init__(self): + self.hyperparameters = None + self.vocab = None + self.tensor_map = {} + self.tensors = [] + + def validate_header(self, data, offset): + if bytes(data[offset:offset + 4]) != b'tjgg' or struct.unpack(' 0: + gguf_writer.add_token_types(toktypes) + return + print(f'* Adding {hp.n_vocab} vocab item(s)') + for (tokid, (vbytes, vscore)) in enumerate(self.model.vocab.items): + tt = 1 # Normal + if len(vbytes) == 0: + tt = 3 # Control + elif tokid >= 3 and tokid <= 258 and len(vbytes) == 1: + hv = hex(vbytes[0])[2:].upper() + vbytes = bytes(f'<0x{hv}>', encoding = 'UTF-8') + tt = 6 # Byte + else: + vbytes = vbytes.replace(b' ', b'\xe2\x96\x81') + toktypes.append(tt) + tokens.append(vbytes) + scores.append(vscore) + gguf_writer.add_token_list(tokens) + gguf_writer.add_token_scores(scores) + gguf_writer.add_token_types(toktypes) + + def add_tensors(self, gguf_writer): + nm = self.name_map + data = self.data + print(f'* Adding {len(self.model.tensors)} tensor(s)') + for tensor in self.model.tensors: + name = str(tensor.name, 'UTF-8') + if name.endswith('.weight'): + name = name[:-7] + suffix = '.weight' + elif name.endswith('.bias'): + name = name[:-5] + suffix = '.bias' + mapped_name = nm.get(name) + assert mapped_name is not None, f'Bad name {name}' + mapped_name += suffix + tempdims = list(tensor.dims[:]) + if len(tempdims) > 1: + temp = tempdims[1] + tempdims[1] = tempdims[0] + tempdims[0] = temp + # print(f'+ {tensor.name} | {mapped_name} {tensor.dims} :: {tempdims}') + gguf_writer.add_tensor(mapped_name, data[tensor.start_offset:tensor.start_offset + tensor.len_bytes], raw_shape = tempdims, raw_dtype = tensor.dtype) + +def handle_metadata(cfg, hp): + import convert + assert cfg.model_metadata_dir.is_dir(), 'Metadata dir is not a directory' + hf_config_path = cfg.model_metadata_dir / "config.json" + orig_config_path = cfg.model_metadata_dir / "params.json" + # We pass a fake model here. "original" mode will check the shapes of some + # tensors if information is missing in the .json file: other than that, the + # model data isn't used so this should be safe (at least for now). + fakemodel = { + 'tok_embeddings.weight': convert.LazyTensor.__new__(convert.LazyTensor), + 'layers.0.feed_forward.w1.weight': convert.LazyTensor.__new__(convert.LazyTensor), + } + fakemodel['tok_embeddings.weight'].shape = [hp.n_vocab] + fakemodel['layers.0.feed_forward.w1.weight'].shape = [hp.n_ff] + if hf_config_path.exists(): + params = convert.Params.loadHFTransformerJson(fakemodel, hf_config_path) + elif orig_config_path.exists(): + params = convert.Params.loadOriginalParamsJson(fakemodel, orig_config_path) + else: + raise ValueError('Unable to load metadata') + vocab = convert.load_vocab(cfg.vocab_dir if cfg.vocab_dir is not None else cfg.model_metadata_dir, cfg.vocabtype) + convert.check_vocab_size(params, vocab) + return (params, vocab) + +def handle_args(): + parser = argparse.ArgumentParser(description = 'Convert GGMLv3 models to GGUF') + parser.add_argument('--input', '-i', type = Path, help = 'Input GGMLv3 filename') + parser.add_argument('--output', '-o', type = Path, help ='Output GGUF filename') + parser.add_argument('--name', help = 'Set model name') + parser.add_argument('--desc', help = 'Set model description') + parser.add_argument('--gqa', type = int, default = 1, help = 'grouped-query attention factor (use 8 for LLaMA2 70B)') + parser.add_argument('--eps', default = '5.0e-06', help = 'RMS norm eps: Use 1e-6 for LLaMA1 and OpenLLaMA, use 1e-5 for LLaMA2') + parser.add_argument('--context-length', '-c', type=int, default = 2048, help = 'Default max context length: LLaMA1 is typically 2048, LLaMA2 is typically 4096') + parser.add_argument('--model-metadata-dir', '-m', type = Path, help ='Load HuggingFace/.pth vocab and metadata from the specified directory') + parser.add_argument("--vocab-dir", type=Path, help="directory containing tokenizer.model, if separate from model file - only meaningful with --model-metadata-dir") + parser.add_argument("--vocabtype", choices=["spm", "bpe"], help="vocab format - only meaningful with --model-metadata-dir and/or --vocab-dir (default: spm)", default="spm") + return parser.parse_args() + +def main(): + cfg = handle_args() + print(f'* Using config: {cfg}') + print('\n=== WARNING === Be aware that this conversion script is best-effort. Use a native GGUF model if possible. === WARNING ===\n') + data = np.memmap(cfg.input, mode = 'r') + model = GGMLV3Model() + print('* Scanning GGML input file') + offset = model.load(data, 0) + print(f'* GGML model hyperparameters: {model.hyperparameters}') + vocab_override = None + params_override = None + if cfg.model_metadata_dir is not None: + (params_override, vocab_override) = handle_metadata(cfg, model.hyperparameters) + print('!! Note: When overriding params the --gqa, --eps and --context-length options are ignored.') + print(f'* Overriding params: {params_override}') + print(f'* Overriding vocab: {vocab_override}') + else: + print('\n=== WARNING === Special tokens may not be converted correctly. Use --model-metadata-dir if possible === WARNING ===\n') + converter = GGMLToGGUF(model, data, cfg, params_override = params_override, vocab_override = vocab_override) + converter.save() + print(f'* Successful completion. Output saved to: {cfg.output}') + +main() diff --git a/convert-llama-hf-to-gguf.py b/convert-llama-hf-to-gguf.py new file mode 100644 index 000000000..f8cfdaa80 --- /dev/null +++ b/convert-llama-hf-to-gguf.py @@ -0,0 +1,327 @@ +# HF llama --> gguf conversion + +import gguf +import os +import sys +import struct +import json +import numpy as np +import torch + +from typing import Any, List, Optional +from pathlib import Path +from sentencepiece import SentencePieceProcessor + +#NDArray = np.ndarray[Any, Any] +# compatible with python < 3.9 +NDArray: 'TypeAlias' = 'np.ndarray[Any, Any]' + +# reverse HF permute back to original pth layout +# https://github.com/huggingface/transformers/blob/main/src/transformers/models/llama/convert_llama_weights_to_hf.py + + +def reverse_hf_permute(weights: NDArray, n_head: int, n_kv_head: Optional[int] = None) -> NDArray: + if n_kv_head is not None and n_head != n_kv_head: + n_head //= n_kv_head + + return (weights.reshape(n_head, 2, weights.shape[0] // n_head // 2, *weights.shape[1:]) + .swapaxes(1, 2) + .reshape(weights.shape)) + + +def count_model_parts(dir_model: str) -> int: + num_parts = 0 + + for filename in os.listdir(dir_model): + if filename.startswith("pytorch_model-"): + num_parts += 1 + + if num_parts > 0: + print("gguf: found " + str(num_parts) + " model parts") + + return num_parts + + +if len(sys.argv) < 3: + print("Usage: convert-h5-to-ggml.py dir-model ftype\n") + print(" ftype == 0 -> float32") + print(" ftype == 1 -> float16") + + sys.exit(1) + + +# output in the same directory as the model +dir_model = sys.argv[1] +last_dir = os.path.basename(os.path.normpath(dir_model)) + + +# possible tensor data types +# ftype == 0 -> float32 +# ftype == 1 -> float16 + + +# map from ftype to string +ftype_str = ["f32", "f16"] + +ftype = 1 +if len(sys.argv) > 2: + ftype = int(sys.argv[2]) + if ftype < 0 or ftype > 1: + print("Invalid ftype: " + str(ftype)) + + sys.exit(1) + +fname_out = sys.argv[1] + "/ggml-model-" + ftype_str[ftype] + ".gguf" + +print("gguf: loading model "+last_dir) + +with open(dir_model + "/config.json", "r", encoding="utf-8") as f: + hparams = json.load(f) + +if hparams["architectures"][0] != "LlamaForCausalLM": + print("Model architecture not supported: " + hparams["architectures"][0]) + + sys.exit() + +# get number of model parts +num_parts = count_model_parts(dir_model) + +ARCH=gguf.MODEL_ARCH.LLAMA +gguf_writer = gguf.GGUFWriter(fname_out, gguf.MODEL_ARCH_NAMES[ARCH]) + +print("gguf: get model metadata") + +block_count = hparams["num_hidden_layers"] +head_count = hparams["num_attention_heads"] + +if "num_key_value_heads" in hparams: + head_count_kv = hparams["num_key_value_heads"] +else: + head_count_kv = head_count + +if "_name_or_path" in hparams: + hf_repo = hparams["_name_or_path"] +else: + hf_repo = "" + +if "max_sequence_length" in hparams: + ctx_length = hparams["max_sequence_length"] +elif "max_position_embeddings" in hparams: + ctx_length = hparams["max_position_embeddings"] +else: + print("gguf: can not find ctx length parameter.") + + sys.exit() + + +gguf_writer.add_name(last_dir) +gguf_writer.add_source_hf_repo(hf_repo) +gguf_writer.add_tensor_data_layout("Meta AI original pth") +gguf_writer.add_context_length(ctx_length) +gguf_writer.add_embedding_length(hparams["hidden_size"]) +gguf_writer.add_block_count(block_count) +gguf_writer.add_feed_forward_length(hparams["intermediate_size"]) +gguf_writer.add_rope_dimension_count(hparams["hidden_size"] // hparams["num_attention_heads"]) +gguf_writer.add_head_count(head_count) +gguf_writer.add_head_count_kv(head_count_kv) +gguf_writer.add_layer_norm_rms_eps(hparams["rms_norm_eps"]) + +if "rope_scaling" in hparams and hparams["rope_scaling"] != None and "factor" in hparams["rope_scaling"]: + if "type" in hparams["rope_scaling"]: + if hparams["rope_scaling"]["type"] == "linear": + gguf_writer.add_rope_scale_linear(hparams["rope_scaling"]["factor"]) + + +# TOKENIZATION + +print("gguf: get tokenizer metadata") + +tokens: List[bytes] = [] +scores: List[float] = [] +toktypes: List[int] = [] + +if Path(dir_model + "/tokenizer.model").is_file(): + # vocab type sentencepiece + print("gguf: get sentencepiece tokenizer vocab, scores and token types") + + tokenizer = SentencePieceProcessor(dir_model + "/tokenizer.model") + + for i in range(tokenizer.vocab_size()): + text: bytes + score: float + + piece = tokenizer.id_to_piece(i) + text = piece.encode("utf-8") + score = tokenizer.get_score(i) + + toktype = 1 # defualt to normal token type + if tokenizer.is_unknown(i): + toktype = 2 + if tokenizer.is_control(i): + toktype = 3 + + # toktype = 4 is user-defined = tokens from added_tokens.json + + if tokenizer.is_unused(i): + toktype = 5 + if tokenizer.is_byte(i): + toktype = 6 + + tokens.append(text) + scores.append(score) + toktypes.append(toktype) + + if Path(dir_model + "/added_tokens.json").is_file(): + with open(dir_model + "/added_tokens.json", "r", encoding="utf-8") as f: + addtokens_json = json.load(f) + + print("gguf: get added tokens") + + for key in addtokens_json: + tokens.append( key.encode("utf-8") ) + scores.append(-1000.0) + toktypes.append(4) # user-defined token type + + + gguf_writer.add_tokenizer_model("llama") + gguf_writer.add_token_list(tokens) + gguf_writer.add_token_scores(scores) + gguf_writer.add_token_types(toktypes) + + +print("gguf: get special token ids") + +if Path(dir_model + "/tokenizer.json").is_file(): + # Look for special tokens in tokenizer.json if it exists + + with open(dir_model + "/tokenizer.json", "r", encoding="utf-8") as f: + tokenizer = json.load(f) + + if "added_tokens" in tokenizer and Path(dir_model + "/tokenizer_config.json").is_file(): + + with open(dir_model + "/tokenizer_config.json", "r", encoding="utf-8") as f: + tokenizer_config = json.load(f) + + if "bos_token" in tokenizer_config and tokenizer_config["bos_token"] != None: + for key in tokenizer["added_tokens"]: + if key["content"] == tokenizer_config["bos_token"]["content"]: + gguf_writer.add_bos_token_id(key["id"]) + + if "eos_token" in tokenizer_config and tokenizer_config["eos_token"] != None: + for key in tokenizer["added_tokens"]: + if key["content"] == tokenizer_config["eos_token"]["content"]: + gguf_writer.add_eos_token_id(key["id"]) + + if "unk_token" in tokenizer_config and tokenizer_config["unk_token"] != None: + for key in tokenizer["added_tokens"]: + if key["content"] == tokenizer_config["unk_token"]["content"]: + gguf_writer.add_unk_token_id(key["id"]) + + if "sep_token" in tokenizer_config and tokenizer_config["sep_token"] != None: + for key in tokenizer["added_tokens"]: + if key["content"] == tokenizer_config["sep_token"]["content"]: + gguf_writer.add_sep_token_id(key["id"]) + + if "pad_token" in tokenizer_config and tokenizer_config["pad_token"] != None: + for key in tokenizer["added_tokens"]: + if key["content"] == tokenizer_config["pad_token"]["content"]: + gguf_writer.add_pad_token_id(key["id"]) +else: + # If no tokenizer.json: Look for special tokens in config.json + + if "bos_token_id" in hparams and hparams["bos_token_id"] != None: + gguf_writer.add_bos_token_id(hparams["bos_token_id"]) + + if "eos_token_id" in hparams and hparams["eos_token_id"] != None: + gguf_writer.add_eos_token_id(hparams["eos_token_id"]) + + if "unk_token_id" in hparams and hparams["unk_token_id"] != None: + gguf_writer.add_unk_token_id(hparams["unk_token_id"]) + + if "sep_token_id" in hparams and hparams["sep_token_id"] != None: + gguf_writer.add_sep_token_id(hparams["sep_token_id"]) + + if "pad_token_id" in hparams and hparams["pad_token_id"] != None: + gguf_writer.add_pad_token_id(hparams["pad_token_id"]) + + +# TENSORS + +tensor_map = gguf.get_tensor_name_map(ARCH,block_count) + +# tensor info +print("gguf: get tensor metadata") + +if num_parts == 0: + part_names = ("pytorch_model.bin",) +else: + part_names = ( + f"pytorch_model-{n:05}-of-{num_parts:05}.bin" for n in range(1, num_parts + 1) + ) + +for part_name in part_names: + print("gguf: loading model part '" + part_name + "'") + model_part = torch.load(f"{dir_model}/{part_name}", map_location="cpu") + + for name in model_part.keys(): + data = model_part[name] + + # we don't need these + if name.endswith(".rotary_emb.inv_freq"): + continue + + old_dtype = data.dtype + + # convert any unsupported data types to float32 + if data.dtype != torch.float16 and data.dtype != torch.float32: + data = data.to(torch.float32) + + data = data.squeeze().numpy() + + # reverse permute these + if name.endswith(".q_proj.weight"): + data = reverse_hf_permute(data, head_count) + if name.endswith(".k_proj.weight"): + data = reverse_hf_permute(data, head_count, head_count_kv) + + # map tensor names + if name.endswith(".weight") and name[:-7] in tensor_map: + name = tensor_map[name[:-7]] + ".weight" + elif name.endswith(".bias") and name[:-5] in tensor_map: + name = tensor_map[name[:-5]] + ".bias" + else: + print("Can not map tensor '" + name + "'") + sys.exit() + + n_dims = len(data.shape) + data_dtype = data.dtype + + # if f32 desired, convert any float16 to float32 + if ftype == 0 and data_dtype == np.float16: + data = data.astype(np.float32) + + # TODO: Why cant we use these float16 as-is? There should be not reason to store float16 as float32 + if ftype == 1 and data_dtype == np.float16 and n_dims == 1: + data = data.astype(np.float32) + + # if f16 desired, convert any float32 2-dim weight tensors to float16 + if ftype == 1 and data_dtype == np.float32 and name.endswith(".weight") and n_dims == 2: + data = data.astype(np.float16) + + print(name + ", n_dims = " + str(n_dims) + ", " + str(old_dtype) + " --> " + str(data.dtype)) + + gguf_writer.add_tensor(name, data) + + +print("gguf: write header") +gguf_writer.write_header_to_file() +print("gguf: write metadata") +gguf_writer.write_kv_data_to_file() +print("gguf: write tensors") +gguf_writer.write_tensors_to_file() + +gguf_writer.close() + + +print("gguf: model successfully exported to '" + fname_out + "'") +print("") diff --git a/convert.py b/convert.py index f3bf17980..c29c032cd 100644 --- a/convert.py +++ b/convert.py @@ -1,4 +1,6 @@ #!/usr/bin/env python + +import gguf import argparse import concurrent.futures import copy @@ -16,13 +18,12 @@ import signal import struct import sys import zipfile +import numpy as np + from abc import ABCMeta, abstractmethod from dataclasses import dataclass from pathlib import Path -from typing import (IO, TYPE_CHECKING, Any, Callable, Dict, Iterable, List, - Literal, Optional, Sequence, Tuple, TypeVar, Union) - -import numpy as np +from typing import (IO, TYPE_CHECKING, Any, Callable, Dict, Iterable, List, Literal, Optional, Sequence, Tuple, TypeVar, Union) from sentencepiece import SentencePieceProcessor # type: ignore if TYPE_CHECKING: @@ -33,57 +34,44 @@ if hasattr(faulthandler, 'register') and hasattr(signal, 'SIGUSR1'): NDArray: 'TypeAlias' = 'np.ndarray[Any, Any]' +ARCH=gguf.MODEL_ARCH.LLAMA +NAMES=gguf.MODEL_TENSOR_NAMES[ARCH] + +# +# data types +# @dataclass(frozen=True) class UnquantizedDataType: name: str - -DT_F16 = UnquantizedDataType('F16') -DT_F32 = UnquantizedDataType('F32') -DT_I32 = UnquantizedDataType('I32') +DT_F16 = UnquantizedDataType('F16') +DT_F32 = UnquantizedDataType('F32') +DT_I32 = UnquantizedDataType('I32') DT_BF16 = UnquantizedDataType('BF16') - -@dataclass(frozen=True) -class QuantizedDataType: - groupsize: int - have_addends: bool - have_g_idx: bool - - -DT_Q4_0 = QuantizedDataType(groupsize=32, have_addends=False, have_g_idx=False) -DT_Q4_1 = QuantizedDataType(groupsize=32, have_addends=True, have_g_idx=False) - -DataType = Union[UnquantizedDataType, QuantizedDataType] - -DATA_TYPE_TO_FTYPE: Dict[DataType, int] = { - DT_F32: 0, - DT_F16: 1, - DT_Q4_0: 2, - DT_Q4_1: 3, -} - -FTYPE_TO_DATA_TYPE: Dict[int, DataType] = \ - {ftype: dtype for (dtype, ftype) in DATA_TYPE_TO_FTYPE.items()} +DataType = Union[UnquantizedDataType] DATA_TYPE_TO_NUMPY: Dict[DataType, 'np.dtype[Any]'] = { DT_BF16: np.dtype(np.uint16), - DT_F16: np.dtype(np.float16), - DT_F32: np.dtype(np.float32), - DT_I32: np.dtype(np.int32), + DT_F16: np.dtype(np.float16), + DT_F32: np.dtype(np.float32), + DT_I32: np.dtype(np.int32), } NUMPY_TYPE_TO_DATA_TYPE: Dict['np.dtype[Any]', DataType] = \ {dtype: data_type for (data_type, dtype) in DATA_TYPE_TO_NUMPY.items()} +SAFETENSORS_DATA_TYPES: Dict[str, DataType] = { + 'BF16': DT_BF16, + 'F16': DT_F16, + 'F32': DT_F32, + 'I32': DT_I32, +} class GGMLFileType(enum.Enum): - AllF32 = 0 + AllF32 = 0 MostlyF16 = 1 # except 1d tensors - MostlyQ4_0 = 2 # except 1d tensors - MostlyQ4_1 = 3 # except 1d tensors - PerLayerIsQ4_1 = 4 # but tok_embeddings.weight and output.weight are F16 def type_for_tensor(self, name: str, tensor: 'LazyTensor') -> DataType: if len(tensor.shape) == 1: @@ -93,60 +81,34 @@ class GGMLFileType(enum.Enum): return DT_F32 elif self == GGMLFileType.MostlyF16: return DT_F16 - elif self == GGMLFileType.MostlyQ4_0: - return DT_Q4_0 - elif self == GGMLFileType.MostlyQ4_1: - return DT_Q4_1 - elif self == GGMLFileType.PerLayerIsQ4_1: - if name in ('output.weight', 'tok_embeddings.weight'): - return DT_F16 - else: - return DT_Q4_1 else: raise ValueError(self) -def make_tensors_list() -> List[str]: - ret = [ - 'tok_embeddings.weight', - 'norm.weight', - 'output.weight', - ] - for i in range(80): # maximum number of layer - ret += [ - f'layers.{i}.attention.wq.weight', - f'layers.{i}.attention.wk.weight', - f'layers.{i}.attention.wv.weight', - f'layers.{i}.attention.wo.weight', - f'layers.{i}.attention_norm.weight', - f'layers.{i}.feed_forward.w1.weight', - f'layers.{i}.feed_forward.w2.weight', - f'layers.{i}.feed_forward.w3.weight', - f'layers.{i}.ffn_norm.weight', - ] - return ret - - -TENSORS_LIST = make_tensors_list() -TENSORS_SET = set(TENSORS_LIST) - - -def find_n_mult(n_ff: int, n_embd: int) -> int: - # hardcoded magic range - for n_mult in range(8192, 1, -1): - calc_ff = (((8*n_embd) // 3 + n_mult - 1) // n_mult)*n_mult - if calc_ff == n_ff: - return n_mult - raise Exception(f"failed to find n_mult for (n_ff={n_ff}, n_embd={n_embd}).") +# +# hparams loading +# @dataclass class Params: - n_vocab: int - n_embd: int - n_mult: int - n_head: int - n_layer: int - n_kv_head: Optional[int] # This parameter is only used for Llama 2 + n_vocab: int + n_embd: int + n_mult: int + n_layer: int + n_ctx: int + n_ff: int + n_head: int + n_head_kv: int + f_norm_eps: float + + @staticmethod + def find_n_mult(n_ff: int, n_embd: int) -> int: + # hardcoded magic range + for n_mult in range(8192, 1, -1): + calc_ff = (((8*n_embd) // 3 + n_mult - 1) // n_mult)*n_mult + if calc_ff == n_ff: + return n_mult + raise Exception(f"failed to find n_mult for (n_ff={n_ff}, n_embd={n_embd}).") @staticmethod def guessed(model: 'LazyModel') -> 'Params': @@ -165,37 +127,57 @@ class Params: raise Exception("failed to guess 'n_layer'. This model is unknown or unsupported.\n" "Suggestion: provide 'config.json' of the model in the same directory containing model files.") - n_head=n_embd // 128 # guessed + n_head = n_embd // 128 # guessed + n_mult = 256 # guessed + + # TODO: verify this + n_ff = int(2 * (4 * n_embd) / 3) + n_ff = n_mult * ((n_ff + n_mult - 1) // n_mult) return Params( - n_vocab = n_vocab, - n_embd = n_embd, - n_mult = 256, - n_head = n_head, - n_layer = n_layer, - n_kv_head = None, + n_vocab = n_vocab, + n_embd = n_embd, + n_mult = n_mult, + n_layer = n_layer, + n_ctx = -1, + n_ff = n_ff, + n_head = n_head, + n_head_kv = n_head, + f_norm_eps = 1e-5, ) @staticmethod def loadHFTransformerJson(model: 'LazyModel', config_path: 'Path') -> 'Params': config = json.load(open(config_path)) - n_vocab = config["vocab_size"]; - n_embd = config["hidden_size"]; - n_head = config["num_attention_heads"]; - n_layer = config["num_hidden_layers"]; - n_ff = config["intermediate_size"]; - n_kv_head = config.get("num_key_value_heads") + n_vocab = config["vocab_size"] + n_embd = config["hidden_size"] + n_layer = config["num_hidden_layers"] + n_ff = config["intermediate_size"] + n_head = config["num_attention_heads"] + n_head_kv = config["num_key_value_heads"] if "num_key_value_heads" in config else n_head + f_norm_eps = config["rms_norm_eps"] - n_mult = find_n_mult(n_ff, n_embd); + n_mult = Params.find_n_mult(n_ff, n_embd) + + if "max_sequence_length" in config: + n_ctx = config["max_sequence_length"] + elif "max_position_embeddings" in config: + n_ctx = config["max_position_embeddings"] + else: + raise Exception("failed to guess 'n_ctx'. This model is unknown or unsupported.\n" + "Suggestion: provide 'config.json' of the model in the same directory containing model files.") return Params( - n_vocab = n_vocab, - n_embd = n_embd, - n_mult = n_mult, - n_head = n_head, - n_layer = n_layer, - n_kv_head = n_kv_head, + n_vocab = n_vocab, + n_embd = n_embd, + n_mult = n_mult, + n_layer = n_layer, + n_ctx = n_ctx, + n_ff = n_ff, + n_head = n_head, + n_head_kv = n_head_kv, + f_norm_eps = f_norm_eps, ) # LLaMA v2 70B params.json @@ -204,22 +186,32 @@ class Params: def loadOriginalParamsJson(model: 'LazyModel', config_path: 'Path') -> 'Params': config = json.load(open(config_path)) - n_vocab = config["vocab_size"]; - n_embd = config["dim"]; - n_head = config["n_heads"]; - n_layer = config["n_layers"]; - n_mult = config["multiple_of"]; + n_vocab = config["vocab_size"] + n_embd = config["dim"] + n_layer = config["n_layers"] + n_mult = config["multiple_of"] + n_ctx = 2048 if config["norm_eps"] == 1e-06 else 4096 # hack to determine LLaMA v1 vs v2 + n_ff = -1 + n_head = config["n_heads"] + n_head_kv = config["n_kv_heads"] if "n_kv_heads" in config else n_head + f_norm_eps = config["norm_eps"] if n_vocab == -1: n_vocab = model["tok_embeddings.weight"].shape[0] + if n_ff == -1: + n_ff = model["layers.0.feed_forward.w1.weight"].shape[0] + return Params( - n_vocab = n_vocab, - n_embd = n_embd, - n_mult = n_mult, - n_head = n_head, - n_layer = n_layer, - n_kv_head = None, + n_vocab = n_vocab, + n_embd = n_embd, + n_mult = n_mult, + n_layer = n_layer, + n_ctx = n_ctx, + n_ff = n_ff, + n_head = n_head, + n_head_kv = n_head_kv, + f_norm_eps = f_norm_eps, ) @staticmethod @@ -234,30 +226,73 @@ class Params: else: params = Params.guessed(model_plus.model) - print(f'params: n_vocab:{params.n_vocab} n_embd:{params.n_embd} n_mult:{params.n_mult} n_head:{params.n_head} n_layer:{params.n_layer}') return params -class SentencePieceVocab: - def __init__(self, fname_tokenizer: Path, fname_added_tokens: Optional[Path], vocabtype: Optional[str]) -> None: - self.vocabtype = vocabtype - if self.vocabtype == "bpe": - self.sentencepiece_tokenizer = json.loads(open(str(fname_tokenizer)).read()) - else: - self.sentencepiece_tokenizer = SentencePieceProcessor(str(fname_tokenizer)) +# +# vocab +# + +class BpeVocab: + def __init__(self, fname_tokenizer: Path, fname_added_tokens: Optional[Path]) -> None: + self.bpe_tokenizer = json.loads(open(str(fname_tokenizer), encoding="utf-8").read()) added_tokens: Dict[str, int] if fname_added_tokens is not None: - added_tokens = json.load(open(fname_added_tokens)) + added_tokens = json.load(open(fname_added_tokens, encoding="utf-8")) else: added_tokens = {} - if self.vocabtype == "bpe": - vocab_size: int = len(self.sentencepiece_tokenizer) - else: - vocab_size: int = self.sentencepiece_tokenizer.vocab_size() - expected_ids = list(range(vocab_size, vocab_size + len(added_tokens))) - actual_ids = sorted(added_tokens.values()) + + vocab_size: int = len(self.bpe_tokenizer) + expected_ids = list(range(vocab_size, vocab_size + len(added_tokens))) + actual_ids = sorted(added_tokens.values()) if expected_ids != actual_ids: raise Exception(f"Expected added token IDs to be sequential and start at {len(added_tokens)}; got {actual_ids}") + + items = sorted(added_tokens.items(), key=lambda text_idx: text_idx[1]) + self.added_tokens_list = [text for (text, idx) in items] + self.vocab_size_base: int = vocab_size + self.vocab_size: int = self.vocab_size_base + len(self.added_tokens_list) + self.fname_tokenizer = fname_tokenizer + self.fname_added_tokens = fname_added_tokens + + def bpe_tokens(self) -> Iterable[Tuple[bytes, float, gguf.TokenType]]: + tokenizer = self.bpe_tokenizer + from transformers.models.gpt2 import tokenization_gpt2 + byte_encoder = tokenization_gpt2.bytes_to_unicode() + byte_decoder = {v: k for k, v in byte_encoder.items()} + for i, item in enumerate(tokenizer): + text: bytes = item.encode("utf-8") + score: float = -i + yield text, score, gguf.TokenType.USER_DEFINED + + def added_tokens(self) -> Iterable[Tuple[bytes, float, gguf.TokenType]]: + for text in self.added_tokens_list: + score = -1000.0 + yield text.encode("utf-8"), score, gguf.TokenType.USER_DEFINED + + def all_tokens(self) -> Iterable[Tuple[bytes, float, gguf.TokenType]]: + yield from self.bpe_tokens() + yield from self.added_tokens() + + def __repr__(self) -> str: + return f"BpeVocab with {self.vocab_size_base} base tokens and {len(self.added_tokens_list)} added tokens>" + + +class SentencePieceVocab: + def __init__(self, fname_tokenizer: Path, fname_added_tokens: Optional[Path]) -> None: + self.sentencepiece_tokenizer = SentencePieceProcessor(str(fname_tokenizer)) + added_tokens: Dict[str, int] + if fname_added_tokens is not None: + added_tokens = json.load(open(fname_added_tokens, encoding="utf-8")) + else: + added_tokens = {} + + vocab_size: int = self.sentencepiece_tokenizer.vocab_size() + expected_ids = list(range(vocab_size, vocab_size + len(added_tokens))) + actual_ids = sorted(added_tokens.values()) + if expected_ids != actual_ids: + raise Exception(f"Expected added token IDs to be sequential and start at {len(added_tokens)}; got {actual_ids}") + items = sorted(added_tokens.items(), key=lambda text_idx: text_idx[1]) self.added_tokens_list = [text for (text, idx) in items] self.vocab_size_base: int = vocab_size @@ -265,117 +300,66 @@ class SentencePieceVocab: self.fname_tokenizer = fname_tokenizer self.fname_added_tokens = fname_added_tokens - def sentencepiece_tokens(self) -> Iterable[Tuple[bytes, float]]: + def sentencepiece_tokens(self) -> Iterable[Tuple[bytes, float, gguf.TokenType]]: tokenizer = self.sentencepiece_tokenizer - if self.vocabtype == "bpe": - from transformers.models.gpt2 import tokenization_gpt2 - byte_encoder = tokenization_gpt2.bytes_to_unicode() - byte_decoder = {v: k for k, v in byte_encoder.items()} - for i, item in enumerate(tokenizer): - text: bytes - text = b''.join([x.to_bytes(1, byteorder='big') for x in [byte_decoder[y] for y in item]]) - score: float = -i - yield text, score - else: - for i in range(tokenizer.vocab_size()): - text: bytes - if tokenizer.is_unknown(i): - text = " \u2047 ".encode("utf-8") - elif tokenizer.is_control(i): - text = b"" - elif tokenizer.is_byte(i): - piece = tokenizer.id_to_piece(i) - if len(piece) != 6: - raise Exception(f"Invalid token: {piece}") - byte_value = int(piece[3:-1], 16) - text = struct.pack("B", byte_value) - else: - text = tokenizer.id_to_piece(i).replace("\u2581", " ").encode("utf-8") - score: float = tokenizer.get_score(i) - yield text, score + for i in range(tokenizer.vocab_size()): + piece = tokenizer.id_to_piece(i) + text: bytes = piece.encode("utf-8") + score: float = tokenizer.get_score(i) - def added_tokens(self) -> Iterable[Tuple[bytes, float]]: + toktype = gguf.TokenType.NORMAL + if tokenizer.is_unknown(i): + toktype = gguf.TokenType.UNKNOWN + if tokenizer.is_control(i): + toktype = gguf.TokenType.CONTROL + + # NOTE: I think added_tokens are user defined. + # ref: https://github.com/google/sentencepiece/blob/master/src/sentencepiece_model.proto + # if tokenizer.is_user_defined(i): toktype = gguf.TokenType.USER_DEFINED + + if tokenizer.is_unused(i): + toktype = gguf.TokenType.UNUSED + if tokenizer.is_byte(i): + toktype = gguf.TokenType.BYTE + + yield text, score, toktype + + def added_tokens(self) -> Iterable[Tuple[bytes, float, gguf.TokenType]]: for text in self.added_tokens_list: score = -1000.0 - yield text.encode("utf-8"), score + yield text.encode("utf-8"), score, gguf.TokenType.USER_DEFINED - def all_tokens(self) -> Iterable[Tuple[bytes, float]]: + def all_tokens(self) -> Iterable[Tuple[bytes, float, gguf.TokenType]]: yield from self.sentencepiece_tokens() yield from self.added_tokens() def __repr__(self) -> str: return f"" - -class GGMLVocab: - def __init__(self, tokens: List[Tuple[bytes, float]]): - self.tokens = tokens - self.vocab_size = len(tokens) - - def all_tokens(self) -> Iterable[Tuple[bytes, float]]: - return self.tokens - - def __repr__(self) -> str: - return f"" +Vocab = Union[BpeVocab, SentencePieceVocab] -Vocab = Union[SentencePieceVocab, GGMLVocab] +# +# data loading +# TODO: reuse (probably move to gguf.py?) +# - -def permute(weights: NDArray, n_head: int, n_kv_head: Optional[int] = None) -> NDArray: - if n_kv_head is not None and n_head != n_kv_head: - n_head //= n_kv_head +def permute(weights: NDArray, n_head: int, n_head_kv: int) -> NDArray: + #print( "permute debug " + str(weights.shape[0]) + " x " + str(weights.shape[1]) + " nhead " + str(n_head) + " nheadkv " + str(n_kv_head) ) + if n_head_kv is not None and n_head != n_head_kv: + n_head //= n_head_kv return (weights.reshape(n_head, 2, weights.shape[0] // n_head // 2, *weights.shape[1:]) .swapaxes(1, 2) .reshape(weights.shape)) -def dequantize_q4(qvalues_pack32: NDArray, scales: NDArray, addends: Optional[NDArray], g_idx: Optional[NDArray]) -> NDArray: - # First reinterpret each row from a list of int32s containing 8 values each - # to a list of uint8s containing 2 values each. - qvalues_pack8 = qvalues_pack32.view(np.uint8) - - # Then split out the two values per int8 (which requires an actual - # conversion because numpy doesn't natively support int4s). - qvalues = np.zeros([qvalues_pack8.shape[0], qvalues_pack8.shape[1] * 2], dtype=np.uint8) - qvalues[:, 0::2] = qvalues_pack8 & 0xf - qvalues[:, 1::2] = qvalues_pack8 >> 4 - - assert addends is None or addends.shape == scales.shape - assert qvalues.shape[0] == scales.shape[0] - assert qvalues.shape[1] % scales.shape[1] == 0 - if g_idx is None: - repeat_count = qvalues.shape[1] // scales.shape[1] - scales = scales[:, :, np.newaxis] - if addends is not None: - addends = addends[:, :, np.newaxis] - # Reshape so that the below computation broadcasts over scales and addends: - qvalues.shape = (qvalues.shape[0], scales.shape[1], int(repeat_count)) - else: - # In this case the scale and addend is selected for each column by g_idx: - assert addends is not None - scales = scales[:, g_idx] - addends = addends[:, g_idx] - if addends is None: - # Q4_0 - qvalues = qvalues.view(np.int8) - qvalues -= 8 - # And do the actual 'value = scale * qvalue + addend' computation. - values = scales * qvalues - if addends is not None: - values += addends - if g_idx is None: - values.shape = (values.shape[0], values.shape[1] * values.shape[2]) - return values - - class Tensor(metaclass=ABCMeta): data_type: DataType @abstractmethod def astype(self, data_type: DataType) -> 'Tensor': ... @abstractmethod - def permute(self, n_head: int, n_kv_head: Optional[int] = None) -> 'Tensor': ... + def permute(self, n_head: int, n_head_kv: int) -> 'Tensor': ... @abstractmethod def permute_part(self, n_part: int, n_head: int) -> 'UnquantizedTensor': ... @abstractmethod @@ -413,8 +397,8 @@ class UnquantizedTensor(Tensor): r = self.ndarray.shape[0] // 3 return UnquantizedTensor(self.ndarray[r * n_part : r * n_part + r, ...]) - def permute(self, n_head: int, n_kv_head: Optional[int] = None) -> 'UnquantizedTensor': - return UnquantizedTensor(permute(self.ndarray, n_head, n_kv_head)) + def permute(self, n_head: int, n_head_kv: int) -> 'UnquantizedTensor': + return UnquantizedTensor(permute(self.ndarray, n_head, n_head_kv)) def load_unquantized(lazy_tensor: 'LazyTensor', expected_dtype: Any = None, convert: bool = False) -> NDArray: @@ -433,183 +417,25 @@ def load_unquantized(lazy_tensor: 'LazyTensor', expected_dtype: Any = None, conv return tensor.ndarray -class GGMLQuantizedTensor(Tensor): - data_type: QuantizedDataType - - def __init__(self, ndarray: NDArray, shape: List[int], data_type: DataType) -> None: - rows, columns = shape - assert data_type in (DT_Q4_1, DT_Q4_0) # for now - assert isinstance(data_type, QuantizedDataType) # redundant, but mypy complains without this - assert columns % data_type.groupsize == 0 - words_in_block = 6 if data_type == DT_Q4_1 else 5 - self.ndarray = ndarray.view(dtype=np.uint32).reshape((rows, columns // data_type.groupsize, words_in_block)) - self.shape = shape[:] - self.data_type = data_type - - def astype(self, data_type: DataType) -> Tensor: - if data_type == self.data_type: - return self - scales = self.ndarray[:, :, 0].view(np.float32) - if self.data_type.have_addends: - addends = self.ndarray[:, :, 1].view(np.float32) - else: - addends = None - qweights = self.ndarray[:, :, -4:].reshape([self.shape[0], self.shape[1] // 8]) - - dq = dequantize_q4(qweights, scales, addends, g_idx=None) - return UnquantizedTensor(dq).astype(data_type) - - def to_ggml(self) -> 'GGMLQuantizedTensor': - return self - - def permute(self, n_head: int, n_kv_head: Optional[int] = None) -> 'GGMLQuantizedTensor': - return GGMLQuantizedTensor(permute(self.ndarray, n_head, n_kv_head), self.shape, self.data_type) - - def permute_part(self, n_part: int, n_head: int) -> 'UnquantizedTensor': - r = self.ndarray.shape[0] // 3 - return UnquantizedTensor(permute(self.ndarray[r * n_part : r * n_part + r, ...], n_head)) - - def part(self, n_part: int) -> 'UnquantizedTensor': - r = self.ndarray.shape[0] // 3 - return UnquantizedTensor(self.ndarray[r * n_part : r * n_part + r, ...]) - -GGMLCompatibleTensor = Union[UnquantizedTensor, GGMLQuantizedTensor] +GGMLCompatibleTensor = Union[UnquantizedTensor] class DeferredPermutedTensor(Tensor): - def __init__(self, base: Tensor, n_head: int, n_kv_head: Optional[int] = None) -> None: + def __init__(self, base: Tensor, n_head: int, n_head_kv: int) -> None: self.base = base self.n_head = n_head - self.n_kv_head = n_kv_head self.data_type = self.base.data_type def astype(self, data_type: DataType) -> Tensor: - return self.base.astype(data_type).permute(self.n_head, self.n_kv_head) + return self.base.astype(data_type).permute(self.n_head, self.n_head_kv) def to_ggml(self) -> GGMLCompatibleTensor: - return self.base.to_ggml().permute(self.n_head, self.n_kv_head) + return self.base.to_ggml().permute(self.n_head, self.n_head_kv) - def permute(self, n_head: int, n_kv_head: Optional[int] = None) -> Tensor: + def permute(self, n_head: int, n_head_kv: int) -> Tensor: raise Exception("shouldn't permute twice") -class GPTQForLLaMaQuantizedTensor(Tensor): - def __init__(self, model: 'LazyModel', namebase: str) -> None: - qweight = load_unquantized(model[f"{namebase}.qweight"], np.int32) - scales = load_unquantized(model[f"{namebase}.scales"], np.float32, convert=True) - - bias = model.get(f"{namebase}.bias") - if bias is not None: - # Q4_1 does not support bias; good thing the bias is always all zeros. - assert not np.any(load_unquantized(bias)) - - if f"{namebase}.zeros" in model: - zeros = load_unquantized(model[f"{namebase}.zeros"], np.float32) - else: - qzeros = load_unquantized(model[f"{namebase}.qzeros"], np.int32) - assert qzeros.dtype == np.int32 - zeros = dequantize_q4(qzeros, scales, scales, g_idx=None) - assert zeros.dtype == np.float32 - - assert zeros.shape == scales.shape - - # Output is transposed compared to the input, and addends have their sign flipped. - # Scales and zeros similarly must be transposed but only for newer - # versions of GPTQ-for-LLaMa; the older versions can be identified by - # having shape (n_embd, 1). - qweight = qweight.T - if scales.shape[1] != 1: - scales = scales.T - zeros = zeros.T - - # Output also has signs flipped for the addends. - self.qweight = qweight - self.scales = scales - self.addends = -zeros - - self.g_idx: Optional[NDArray] - if f"{namebase}.g_idx" in model: - self.g_idx = load_unquantized(model[f"{namebase}.g_idx"], np.int32) - assert self.g_idx.shape == (qweight.shape[1] * 8,) - else: - self.g_idx = None - - self.shape = [self.qweight.shape[0], self.qweight.shape[1] * 8] - self.data_type = QuantizedDataType(groupsize=self.groupsize(), have_addends=True, - have_g_idx=(self.g_idx is not None)) - - def inspect(self, row: int, col: int) -> None: - '''For debugging.''' - qweight = (self.qweight[row, col // 8] >> (4 * (col & 7))) & 0xf - if self.g_idx is not None: - group = self.g_idx[col] - else: - group = int(col // self.groupsize()) - scale = self.scales[row, group] - addend = self.addends[row, group] - with np.printoptions(precision=None, suppress=True): - print(f'scale:{scale} addend:{addend} qweight:{qweight}') - print('possible values:', np.arange(16) * scale + addend) - print('actual value:', qweight * scale + addend) - - def astype(self, data_type: DataType) -> Tensor: - if isinstance(data_type, QuantizedDataType): - assert self.g_idx is None and data_type.have_addends is True and data_type.have_g_idx is False - return self.regroup(data_type.groupsize) - - dequantized = dequantize_q4(np.ascontiguousarray(self.qweight), self.scales, self.addends, self.g_idx) - return UnquantizedTensor(dequantized).astype(data_type) - - def groupsize(self) -> int: - assert self.addends.shape == self.scales.shape - assert self.shape[1] % self.scales.shape[1] == 0 - return self.shape[1] // self.scales.shape[1] - - def regroup(self, new_groupsize: int = 32) -> 'GPTQForLLaMaQuantizedTensor': - # Old versions of GPTQ-for-LLaMa shared scales and addends between all the - # columns in a row. Newer versions share them between every set of N - # columns in a row, where N is the `groupsize` parameter, usually 128. The - # output format shares them between every set of 32 columns. To handle - # this, duplicate scales and addends for every smaller group. - # (In the above, 'row' and 'column' are in the sense of the output.) - assert self.g_idx is None - old_groupsize = self.groupsize() - assert old_groupsize >= new_groupsize and old_groupsize % new_groupsize == 0, old_groupsize - ret = copy.copy(self) - ret.addends = self.addends.repeat(old_groupsize // new_groupsize, axis=1) - ret.scales = self.scales.repeat(old_groupsize // new_groupsize, axis=1) - ret.data_type = QuantizedDataType(groupsize=new_groupsize, have_addends=True, have_g_idx=False) - return ret - - def permute(self, n_head: int, n_kv_head: Optional[int] = None) -> Tensor: - return DeferredPermutedTensor(self, n_head, n_kv_head) - - def to_ggml(self) -> GGMLQuantizedTensor: - # The output format looks like this: - # For each row: - # For each group of 32 columns: - # - addend (float32, 4 bytes) - # - scale (float32, 4 bytes) - # - weights (int4 * 32, 16 bytes) - - if self.groupsize() != 32: - raise Exception("should have been regrouped before converting to ggml") - - # Since the output format is mixed between integers and floats, we have - # to hackily view the floats as int32s just so numpy will let us - # concatenate them. - addends_view = self.addends.view(dtype=np.int32)[:, :, np.newaxis] - scales_view = self.scales.view(dtype=np.int32)[:, :, np.newaxis] - - # Split into groups of 4 columns (i.e. 32 columns of quantized data): - grouped = self.qweight.reshape([self.qweight.shape[0], self.qweight.shape[1] // 4, 4]) - - # And concatenate: - grouped = np.concatenate([scales_view, addends_view, grouped], axis=2, casting='no') - - return GGMLQuantizedTensor(grouped, self.shape, DT_Q4_1) - - @dataclass class LazyTensor: _load: Callable[[], Tensor] @@ -632,17 +458,6 @@ class LazyTensor: def validate_conversion_to(self, data_type: DataType) -> None: if data_type == self.data_type: return - if isinstance(data_type, QuantizedDataType): - if not isinstance(self.data_type, QuantizedDataType): - raise Exception(f"Can't turn an unquantized tensor into a quantized type ({data_type})") - if self.data_type.have_g_idx: - sys.stderr.write( - "Error: Input uses the newer GPTQ-for-LLaMa format (using g_idx), " - "which is not yet natively supported by GGML. " - "For now you can still convert this model by passing `--outtype f16` to dequantize, " - "but that will result in a much larger output file for no quality benefit.\n") - sys.exit(1) - assert not data_type.have_g_idx and self.data_type.have_addends and data_type.have_addends LazyModel = Dict[str, LazyTensor] @@ -713,10 +528,10 @@ def merge_multifile_models(models_plus: List[ModelPlus]) -> ModelPlus: return ModelPlus(model, paths, format, vocab) -def permute_lazy(lazy_tensor: LazyTensor, n_head: int, n_kv_head: Optional[int] = None) -> LazyTensor: +def permute_lazy(lazy_tensor: LazyTensor, n_head: int, n_head_kv: int) -> LazyTensor: def load() -> Tensor: - return lazy_tensor.load().permute(n_head, n_kv_head) - return LazyTensor(load, lazy_tensor.shape, lazy_tensor.data_type, f'permute({n_head}, {n_kv_head}) ' + lazy_tensor.description) + return lazy_tensor.load().permute(n_head, n_head_kv) + return LazyTensor(load, lazy_tensor.shape, lazy_tensor.data_type, f'permute({n_head}, {n_head_kv}) ' + lazy_tensor.description) def permute_part_lazy(lazy_tensor: LazyTensor, n_part: int, n_head: int) -> LazyTensor: def load() -> Tensor: @@ -732,66 +547,6 @@ def part_lazy(lazy_tensor: LazyTensor, n_part: int) -> LazyTensor: s[0] = s[0] // 3 return LazyTensor(load, s, lazy_tensor.data_type, 'part ' + lazy_tensor.description) -def convert_transformers_to_orig(model: LazyModel, params: Params) -> LazyModel: - out: LazyModel = {} - out["tok_embeddings.weight"] = model["model.embed_tokens.weight"] - out["norm.weight"] = model["model.norm.weight"] - out["output.weight"] = model["lm_head.weight"] - - for i in itertools.count(): - if f"model.layers.{i}.self_attn.q_proj.weight" in model: - out[f"layers.{i}.attention.wq.weight"] = permute_lazy(model[f"model.layers.{i}.self_attn.q_proj.weight"], params.n_head) - out[f"layers.{i}.attention.wk.weight"] = permute_lazy(model[f"model.layers.{i}.self_attn.k_proj.weight"], params.n_head, params.n_kv_head) - out[f"layers.{i}.attention.wv.weight"] = model[f"model.layers.{i}.self_attn.v_proj.weight"] - elif f"model.layers.{i}.self_attn.W_pack.weight" in model: - out[f"layers.{i}.attention.wq.weight"] = permute_part_lazy(model[f"model.layers.{i}.self_attn.W_pack.weight"], 0, params.n_head) - out[f"layers.{i}.attention.wk.weight"] = permute_part_lazy(model[f"model.layers.{i}.self_attn.W_pack.weight"], 1, params.n_head) - out[f"layers.{i}.attention.wv.weight"] = part_lazy(model[f"model.layers.{i}.self_attn.W_pack.weight"], 2) - else: - break - - out[f"layers.{i}.attention.wo.weight"] = model[f"model.layers.{i}.self_attn.o_proj.weight"] - - out[f"layers.{i}.feed_forward.w1.weight"] = model[f"model.layers.{i}.mlp.gate_proj.weight"] - out[f"layers.{i}.feed_forward.w2.weight"] = model[f"model.layers.{i}.mlp.down_proj.weight"] - out[f"layers.{i}.feed_forward.w3.weight"] = model[f"model.layers.{i}.mlp.up_proj.weight"] - - out[f"layers.{i}.attention_norm.weight"] = model[f"model.layers.{i}.input_layernorm.weight"] - out[f"layers.{i}.ffn_norm.weight"] = model[f"model.layers.{i}.post_attention_layernorm.weight"] - return out - - -def handle_quantization(model: LazyModel) -> LazyModel: - '''Convert a model with entries for 'foo.qweight', 'foo.scales', etc. - (which resolve to UnquantizedTensors with the raw data) to one with entries - for 'foo.weight' (which resolve to QuantizedTensors). - ''' - def convert(name: str) -> Tuple[str, LazyTensor]: - if name.endswith(".qweight"): - namebase = name.rsplit('.', 1)[0] - orig_name = namebase + ".weight" - - lazy_tensor = model[name] - assert len(lazy_tensor.shape) == 2 - real_shape = [lazy_tensor.shape[1], lazy_tensor.shape[0] * 8] - - # Calculate type. This replicates the logic in - # GPTQForLLaMaQuantizedTensor (which is executed when the modelis - # actually loaded). - lazy_scales = model[f"{namebase}.scales"] - scales_width = 1 if lazy_scales.shape[1] == 1 else lazy_scales.shape[0] - assert real_shape[1] % scales_width == 0 - groupsize = real_shape[1] // scales_width - have_g_idx = f"{namebase}.g_idx" in model - data_type = QuantizedDataType(groupsize=groupsize, have_addends=True, have_g_idx=have_g_idx) - - def load() -> Tensor: - return GPTQForLLaMaQuantizedTensor(model, namebase) - - return (orig_name, LazyTensor(load, real_shape, data_type, '[quantized]')) - else: - return (name, model[name]) - return dict(convert(name) for name in model) # Functionality that simulates `torch.load` but where individual tensors are # only loaded into memory on demand, not all at once. @@ -885,14 +640,6 @@ def lazy_load_torch_file(outer_fp: IO[bytes], path: Path) -> ModelPlus: return ModelPlus(model=as_dict, paths=[path], format='torch', vocab=None) -SAFETENSORS_DATA_TYPES: Dict[str, DataType] = { - 'BF16': DT_BF16, - 'F16': DT_F16, - 'F32': DT_F32, - 'I32': DT_I32, -} - - def lazy_load_safetensors_file(fp: IO[bytes], path: Path) -> ModelPlus: header_size, = struct.unpack(' bytes: return ret -def lazy_load_ggml_file(fp: io.BufferedReader, path: Path) -> ModelPlus: - magic = must_read(fp, 4)[::-1] - if magic in (b'ggmf', b'ggjt'): - version, = struct.unpack("i", must_read(fp, 4)) - assert version == 1 - else: - assert magic == b'ggml' - version = None - n_vocab, n_embd, n_mult, n_head, n_layer, rot, file_type = struct.unpack('<7i', must_read(fp, 28)) - - tokens: List[Tuple[bytes, float]] = [] - for i in range(n_vocab): - if i == 32000: - # HACK: GPT4All messed with the format without changing the magic - # number. Specifically, they changed the vocab section to contain - # `n_vocab - 1` tokens instead of `n_vocab` (i.e. omitting the - # extra pad token). Try to detect if we're reading a file like - # this. - orig_pos = fp.tell() - fp.seek(20, io.SEEK_CUR) - is_gpt4all = fp.read(21) == b'tok_embeddings.weight' - fp.seek(orig_pos) - if is_gpt4all: - break - - length, = struct.unpack("i", must_read(fp, 4)) - text = must_read(fp, length) - if magic != b'ggml': - score, = struct.unpack("f", must_read(fp, 4)) - tokens.append((text, score)) - vocab = GGMLVocab(tokens) if magic != b'ggml' else None - - model: LazyModel = {} - # Use mmap for the actual data to avoid race conditions with the file offset. - off = fp.raw.tell() - mapped = memoryview(mmap.mmap(fp.fileno(), 0, access=mmap.ACCESS_READ)) - fp.raw.seek(off) # needed on Windows - - def read_tensor() -> None: # this is a function so that variables captured in `load` don't change - shape_len, name_len, ftype = struct.unpack("iii", must_read(fp, 12)) - assert 0 <= shape_len <= 3 - shape: List[int] = list(struct.unpack(f"{shape_len}i", must_read(fp, 4 * shape_len))) - shape = shape[::-1] - name = must_read(fp, name_len).decode('utf-8') - data_type = FTYPE_TO_DATA_TYPE[ftype] - - if magic == b'ggjt': - fp.seek((fp.tell() + 31) & -32) - - if data_type == DT_Q4_1: - # See GPTQForLLaMaQuantizedTensor.ggml_ndarray() - size = 24 * (shape[1] // 32) * shape[0] - elif data_type == DT_Q4_0: - size = 20 * (shape[1] // 32) * shape[0] - else: - numpy_dtype = DATA_TYPE_TO_NUMPY[data_type] - elm_count = math.prod(shape) - size = elm_count * numpy_dtype.itemsize - offset = fp.tell() - buf = mapped[offset:offset+size] - fp.seek(size, io.SEEK_CUR) - - def load() -> Tensor: - if isinstance(data_type, QuantizedDataType): - ndarray = np.frombuffer(buf, dtype=np.uint32) - return GGMLQuantizedTensor(ndarray, shape, data_type) - else: - return UnquantizedTensor(np.frombuffer(buf, dtype=numpy_dtype).reshape(shape)) - description = f'ggml offset={offset} type={data_type} path={path}' - model[name] = LazyTensor(load, shape, data_type, description) - - while fp.read(1) != b'': - fp.seek(-1, io.SEEK_CUR) - read_tensor() - - return ModelPlus(model=model, paths=[path], format='ggml', vocab=vocab) - - @functools.lru_cache(maxsize=None) def lazy_load_file(path: Path) -> ModelPlus: fp = open(path, 'rb') @@ -1010,9 +679,6 @@ def lazy_load_file(path: Path) -> ModelPlus: if first8[:2] == b'PK': # A zip file, i.e. PyTorch format return lazy_load_torch_file(fp, path) - elif first8[2:4] == b'gg': - # GGML format - return lazy_load_ggml_file(fp, path) elif struct.unpack(' ModelPlus: In = TypeVar('In') Out = TypeVar('Out') - def bounded_parallel_map(func: Callable[[In], Out], iterable: Iterable[In], concurrency: int) -> Iterable[Out]: '''Parallel map, but with backpressure. If the caller doesn't call `next` fast enough, this will stop calling `func` at some point rather than @@ -1043,8 +708,7 @@ def bounded_parallel_map(func: Callable[[In], Out], iterable: Iterable[In], conc def check_vocab_size(params: Params, vocab: Vocab) -> None: if params.n_vocab != vocab.vocab_size: - # GGMLVocab comes from the same file as the model so shouldn't mismatch: - assert isinstance(vocab, SentencePieceVocab) + assert isinstance(vocab, BpeVocab) or isinstance(vocab, SentencePieceVocab) if params.n_vocab == vocab.vocab_size_base: print("Ignoring added_tokens.json since model matches vocab size without it.") vocab.added_tokens_list = [] @@ -1061,98 +725,154 @@ def check_vocab_size(params: Params, vocab: Vocab) -> None: class OutputFile: def __init__(self, fname_out: Path) -> None: - self.fout = open(fname_out, "wb") + self.gguf = gguf.GGUFWriter(fname_out, gguf.MODEL_ARCH_NAMES[ARCH]) - def write_file_header(self, params: Params, file_type: GGMLFileType) -> None: - self.fout.write(b"ggjt"[::-1]) # magic - values = [ - 1, # file version - params.n_vocab, - params.n_embd, - params.n_mult, - params.n_head, - params.n_layer, - params.n_embd // params.n_head, # rot (obsolete) - file_type.value, - ] - self.fout.write(struct.pack("i" * len(values), *values)) + def add_meta_arch(self, params: Params) -> None: + self.gguf.add_name ("LLaMA") + self.gguf.add_context_length (params.n_ctx) + self.gguf.add_embedding_length (params.n_embd) + self.gguf.add_block_count (params.n_layer) + self.gguf.add_feed_forward_length (params.n_ff) + self.gguf.add_rope_dimension_count(params.n_embd // params.n_head) + self.gguf.add_head_count (params.n_head) + self.gguf.add_head_count_kv (params.n_head_kv) + self.gguf.add_layer_norm_rms_eps (params.f_norm_eps) - def write_tensor_header(self, name: str, shape: Sequence[int], data_type: DataType) -> None: - sname = name.encode('utf-8') - self.fout.write(struct.pack("iii", len(shape), len(sname), DATA_TYPE_TO_FTYPE[data_type])) - self.fout.write(struct.pack("i" * len(shape), *shape[::-1])) - self.fout.write(sname) - self.fout.seek((self.fout.tell() + 31) & -32) + def add_meta_vocab(self, vocab: Vocab) -> None: + tokens = [] + scores = [] + toktypes = [] + # NOTE: `all_tokens` returns the the base vocabulary and added tokens + # TODO: add special tokens? + for text, score, toktype in vocab.all_tokens(): + tokens.append(text) + scores.append(score) + toktypes.append(toktype) - def write_vocab(self, vocab: Vocab) -> None: - for text, score in vocab.all_tokens(): - self.fout.write(struct.pack("i", len(text))) - self.fout.write(text) - self.fout.write(struct.pack("f", score)) + self.gguf.add_tokenizer_model("llama") + self.gguf.add_token_list(tokens) + self.gguf.add_token_scores(scores) + self.gguf.add_token_types(toktypes) + + def add_tensor_info(self, name: str, tensor: LazyTensor) -> None: + n_elements = 1 + for dim in tensor.shape: + n_elements *= dim + data_type = DATA_TYPE_TO_NUMPY[tensor.data_type] + data_nbytes = n_elements * data_type.itemsize + self.gguf.add_tensor_info(name, tensor.shape, data_type, data_nbytes) + + def write_meta(self) -> None: + self.gguf.write_header_to_file() + self.gguf.write_kv_data_to_file() + + def write_tensor_info(self) -> None: + self.gguf.write_ti_data_to_file() + + def close(self) -> None: + self.gguf.close() @staticmethod - def write_vocab_only(fname_out: Path, vocab: Vocab) -> None: - of = OutputFile(fname_out) - params = Params(n_vocab=vocab.vocab_size, n_embd=0, n_mult=0, n_head=1, n_layer=0) - of = OutputFile(fname_out) - of.write_file_header(params, file_type=GGMLFileType.AllF32) - of.write_vocab(vocab) - of.fout.close() - - @staticmethod - def write_all(fname_out: Path, params: Params, file_type: GGMLFileType, model: LazyModel, vocab: Vocab) -> None: + def write_vocab_only(fname_out: Path, params: Params, vocab: Vocab) -> None: check_vocab_size(params, vocab) + of = OutputFile(fname_out) - of.write_file_header(params, file_type) - print("Writing vocab...") - of.write_vocab(vocab) + + # meta data + of.add_meta_arch(params) + of.add_meta_vocab(vocab) + of.write_meta() + + of.close() + + @staticmethod + def write_all(fname_out: Path, params: Params, model: LazyModel, vocab: Vocab) -> None: + check_vocab_size(params, vocab) + + of = OutputFile(fname_out) + + # meta data + of.add_meta_arch(params) + of.add_meta_vocab(vocab) + + # tensor info + for name, lazy_tensor in model.items(): + of.add_tensor_info(name, lazy_tensor) + + of.write_meta() + of.write_tensor_info() def do_item(item: Tuple[str, LazyTensor]) -> NDArray: name, lazy_tensor = item return lazy_tensor.load().to_ggml().ndarray + # tensor data ndarrays = bounded_parallel_map(do_item, model.items(), concurrency=8) for i, ((name, lazy_tensor), ndarray) in enumerate(zip(model.items(), ndarrays)): size = ' x '.join(f"{dim:6d}" for dim in lazy_tensor.shape) padi = len(str(len(model))) print(f"[{i+1:{padi}d}/{len(model)}] Writing tensor {name:38s} | size {size:16} | type {lazy_tensor.data_type}") - of.write_tensor_header(name, lazy_tensor.shape, lazy_tensor.data_type) - ndarray.tofile(of.fout) - of.fout.close() + of.gguf.write_tensor_data(ndarray) + of.close() def pick_output_type(model: LazyModel, output_type_str: Optional[str]) -> GGMLFileType: - wq_type = model["layers.0.attention.wq.weight"].data_type - if output_type_str == "f32" or (output_type_str is None and wq_type in (DT_F32, DT_BF16)): + wq_type = model[NAMES[gguf.MODEL_TENSOR.ATTN_Q].format(bid=0)+".weight"].data_type + + if output_type_str == "f32" or (output_type_str is None and wq_type == DT_F32): return GGMLFileType.AllF32 - if output_type_str == "f16" or (output_type_str is None and wq_type == DT_F16): + if output_type_str == "f16" or (output_type_str is None and wq_type in (DT_F16, DT_BF16)): return GGMLFileType.MostlyF16 - if output_type_str == "q4_1" or (output_type_str is None and isinstance(wq_type, QuantizedDataType) and - wq_type.have_addends): - if isinstance(model["output.weight"].data_type, QuantizedDataType): - return GGMLFileType.MostlyQ4_1 - else: - return GGMLFileType.PerLayerIsQ4_1 - if output_type_str == "q4_0" or (output_type_str is None and isinstance(wq_type, QuantizedDataType)): - return GGMLFileType.MostlyQ4_0 + name_to_type = {name: lazy_tensor.data_type for (name, lazy_tensor) in model.items()} + raise Exception(f"Unexpected combination of types: {name_to_type}") - -def do_necessary_conversions(model: LazyModel, params: Params) -> LazyModel: - model = handle_quantization(model) - - if "lm_head.weight" in model: - model = convert_transformers_to_orig(model, params) - model = filter_and_sort_tensors(model) - - return model - - def convert_to_output_type(model: LazyModel, output_type: GGMLFileType) -> LazyModel: return {name: tensor.astype(output_type.type_for_tensor(name, tensor)) for (name, tensor) in model.items()} +def convert_model_names(model: LazyModel, params: Params) -> LazyModel: + tmap = gguf.get_tensor_name_map(ARCH, params.n_layer) + + tmp = model + + # HF models permut or pack some of the tensors, so we need to undo that + for i in itertools.count(): + if f"model.layers.{i}.self_attn.q_proj.weight" in model: + print(f"Permuting layer {i}") + tmp[f"model.layers.{i}.self_attn.q_proj.weight"] = permute_lazy(model[f"model.layers.{i}.self_attn.q_proj.weight"], params.n_head, params.n_head) + tmp[f"model.layers.{i}.self_attn.k_proj.weight"] = permute_lazy(model[f"model.layers.{i}.self_attn.k_proj.weight"], params.n_head, params.n_head_kv) + #tmp[f"model.layers.{i}.self_attn.v_proj.weight"] = model[f"model.layers.{i}.self_attn.v_proj.weight"] + elif f"model.layers.{i}.self_attn.W_pack.weight" in model: + print(f"Unpacking and permuting layer {i}") + tmp[f"model.layers.{i}.self_attn.q_proj.weight"] = permute_part_lazy(model[f"model.layers.{i}.self_attn.W_pack.weight"], 0, params.n_head, params.n_head) + tmp[f"model.layers.{i}.self_attn.k_proj.weight"] = permute_part_lazy(model[f"model.layers.{i}.self_attn.W_pack.weight"], 1, params.n_head, params.n_head_kv) + tmp[f"model.layers.{i}.self_attn.v_proj.weight"] = part_lazy (model[f"model.layers.{i}.self_attn.W_pack.weight"], 2) + else: + break + + out: LazyModel = {} + for name, lazy_tensor in model.items(): + name_new = name + + if name in tmap: + name_new = tmap[name] + elif name.endswith(".weight") and name[:-7] in tmap: + name_new = tmap[name[:-7]] + ".weight" + elif name.endswith(".bias") and name[:-5] in tmap: + name_new = tmap[name[:-5]] + ".bias" + else: + raise Exception(f"Unexpected tensor name: {name}") + + if gguf.should_skip_tensor_TMP(ARCH, params.n_layer, name_new): + print(f"skipping tensor {name_new}") + continue + else: + print(f"{name:48s} -> {name_new:40s} | {lazy_tensor.data_type} | {lazy_tensor.shape}") + out[name_new] = lazy_tensor + + return out def nth_multifile_path(path: Path, n: int) -> Optional[Path]: '''Given any path belonging to a multi-file model (e.g. foo.bin.1), return @@ -1203,11 +923,6 @@ def load_some_model(path: Path) -> ModelPlus: # Try the PyTorch patterns too, with lower priority globs = ["consolidated.00.pth", "pytorch_model-00001-of-*.bin", "*.pt", "pytorch_model.bin"] files = [file for glob in globs for file in path.glob(glob)] - if not files: - # Try GGML too, but with lower priority, since if both a non-GGML - # model and a GGML model exist in the same directory, we assume the - # latter was converted from the former. - files = list(path.glob("ggml-model*.bin*")) if not files: raise Exception(f"Can't find model in directory {path}") if len(files) > 1: @@ -1224,19 +939,14 @@ def load_some_model(path: Path) -> ModelPlus: return model_plus -def filter_and_sort_tensors(model: LazyModel) -> LazyModel: - return {name: model[name] for name in TENSORS_LIST if name in model} - - -def load_vocab(path: Path, vocabtype: Optional[str]) -> SentencePieceVocab: - print(f"vocabtype: {vocabtype}") +def load_vocab(path: Path, vocabtype: Optional[str]) -> Union[BpeVocab, SentencePieceVocab]: # Be extra-friendly and accept either a file or a directory. Also, if it's # a directory, it might be the model directory, and tokenizer.model might # be in the parent of that. if path.is_dir(): vocab_file = "tokenizer.model" if vocabtype == 'bpe': - vocab_file = "vocab.json" + vocab_file = "vocab.json" path2 = path / vocab_file # Use `.parent` instead of /.. to handle the symlink case better. path3 = path.parent / vocab_file @@ -1248,21 +958,24 @@ def load_vocab(path: Path, vocabtype: Optional[str]) -> SentencePieceVocab: raise FileNotFoundError( f"Could not find tokenizer.model in {path} or its parent; " "if it's in another directory, pass the directory as --vocab-dir") + + print(f"Loading vocab file '{path}', type '{vocabtype}'") + added_tokens_path = path.parent / "added_tokens.json" - print(f"Loading vocab file {path}") - return SentencePieceVocab(path, added_tokens_path if added_tokens_path.exists() else None, - vocabtype) + if vocabtype == "bpe": + return BpeVocab(path, added_tokens_path if added_tokens_path.exists() else None) + elif vocabtype == "spm": + return SentencePieceVocab(path, added_tokens_path if added_tokens_path.exists() else None) + else: + raise ValueError(f"Unsupported vocabulary type {vocabtype}") def default_outfile(model_paths: List[Path], file_type: GGMLFileType) -> Path: namestr = { - GGMLFileType.AllF32: "f32", + GGMLFileType.AllF32: "f32", GGMLFileType.MostlyF16: "f16", - GGMLFileType.MostlyQ4_0: "q4_0", - GGMLFileType.MostlyQ4_1: "q4_1", - GGMLFileType.PerLayerIsQ4_1: "q4_1", }[file_type] - ret = model_paths[0].parent / f"ggml-model-{namestr}.bin" + ret = model_paths[0].parent / f"ggml-model-{namestr}.gguf" if ret in model_paths: sys.stderr.write( f"Error: Default output path ({ret}) would overwrite the input. " @@ -1281,44 +994,59 @@ def do_dump_model(model_plus: ModelPlus) -> None: def main(args_in: Optional[List[str]] = None) -> None: parser = argparse.ArgumentParser(description="Convert a LLaMa model to a GGML compatible file") - parser.add_argument("--dump", action="store_true", help="don't convert, just show what's in the model") - parser.add_argument("--dump-single", action="store_true", help="don't convert, just show what's in a single model file") - parser.add_argument("--vocab-only", action="store_true", help="extract only the vocab") - parser.add_argument("--outtype", choices=["f32", "f16", "q4_1", "q4_0"], help="output format (default: based on input)") - parser.add_argument("--vocab-dir", type=Path, help="directory containing tokenizer.model, if separate from model file") - parser.add_argument("--outfile", type=Path, help="path to write to; default: based on input") - parser.add_argument("model", type=Path, - help="directory containing model file, or model file itself (*.pth, *.pt, *.bin)") - parser.add_argument("--vocabtype", default='spm', choices=["spm", "bpe"], help="vocab format (default: spm)") + parser.add_argument("--dump", action="store_true", help="don't convert, just show what's in the model") + parser.add_argument("--dump-single", action="store_true", help="don't convert, just show what's in a single model file") + parser.add_argument("--vocab-only", action="store_true", help="extract only the vocab") + parser.add_argument("--outtype", choices=["f32", "f16"], help="output format (default: based on input)") + parser.add_argument("--vocab-dir", type=Path, help="directory containing tokenizer.model, if separate from model file") + parser.add_argument("--outfile", type=Path, help="path to write to; default: based on input") + parser.add_argument("model", type=Path, help="directory containing model file, or model file itself (*.pth, *.pt, *.bin)") + parser.add_argument("--vocabtype", choices=["spm", "bpe"], help="vocab format (default: spm)", default="spm") + parser.add_argument("--ctx", type=int, help="model training context (default: based on input)") args = parser.parse_args(args_in) - vocab: Vocab if args.dump_single: model_plus = lazy_load_file(args.model) do_dump_model(model_plus) - elif args.vocab_only: + + model_plus = load_some_model(args.model) + + params = Params.load(model_plus) + if params.n_ctx == -1: + if args.ctx is None: + raise Exception("The model doesn't have a context size, and you didn't specify one with --ctx\n" + "Please specify one with --ctx:\n" + " - LLaMA v1: --ctx 2048\n" + " - LLaMA v2: --ctx 4096\n") + params.n_ctx = args.ctx + + print(f"params = {params}") + + vocab: Vocab + if args.vocab_only: vocab = load_vocab(args.vocab_dir or args.model, args.vocabtype) assert args.outfile, "need --outfile if using --vocab-only" outfile = args.outfile - OutputFile.write_vocab_only(outfile, vocab) + OutputFile.write_vocab_only(outfile, params, vocab) print(f"Wrote {outfile}") else: - model_plus = load_some_model(args.model) if args.dump: do_dump_model(model_plus) return + if model_plus.vocab is not None and args.vocab_dir is None: vocab = model_plus.vocab else: vocab_dir = args.vocab_dir if args.vocab_dir else model_plus.paths[0].parent vocab = load_vocab(vocab_dir, args.vocabtype) - params = Params.load(model_plus) - model = model_plus.model - model = do_necessary_conversions(model, params) + + model = model_plus.model + model = convert_model_names(model, params) output_type = pick_output_type(model, args.outtype) - model = convert_to_output_type(model, output_type) - outfile = args.outfile or default_outfile(model_plus.paths, output_type) - OutputFile.write_all(outfile, params, output_type, model, vocab) + model = convert_to_output_type(model, output_type) + outfile = args.outfile or default_outfile(model_plus.paths, output_type) + + OutputFile.write_all(outfile, params, model, vocab) print(f"Wrote {outfile}") diff --git a/docs/token_generation_performance_tips.md b/docs/token_generation_performance_tips.md index 69ba6173c..c9acff7d4 100644 --- a/docs/token_generation_performance_tips.md +++ b/docs/token_generation_performance_tips.md @@ -3,7 +3,7 @@ ## Verifying that the model is running on the GPU with cuBLAS Make sure you compiled llama with the correct env variables according to [this guide](../README.md#cublas), so that llama accepts the `-ngl N` (or `--n-gpu-layers N`) flag. When running llama, you may configure `N` to be very large, and llama will offload the maximum possible number of layers to the GPU, even if it's less than the number you configured. For example: ```shell -./main -m "path/to/model.bin" -ngl 200000 -p "Please sir, may I have some " +./main -m "path/to/model.gguf" -ngl 200000 -p "Please sir, may I have some " ``` When running llama, before it starts the inference work, it will output diagnostic information that shows whether cuBLAS is offloading work to the GPU. Look for these lines: @@ -25,9 +25,9 @@ GPU: A6000 (48GB VRAM) CPU: 7 physical cores RAM: 32GB -Model: `TheBloke_Wizard-Vicuna-30B-Uncensored-GGML/Wizard-Vicuna-30B-Uncensored.ggmlv3.q4_0.bin` (30B parameters, 4bit quantization, GGML) +Model: `TheBloke_Wizard-Vicuna-30B-Uncensored-GGML/Wizard-Vicuna-30B-Uncensored.q4_0.gguf` (30B parameters, 4bit quantization, GGML) -Run command: `./main -m "path/to/model.bin" -p "-p "An extremely detailed description of the 10 best ethnic dishes will follow, with recipes: " -n 1000 [additional benchmark flags]` +Run command: `./main -m "path/to/model.gguf" -p "An extremely detailed description of the 10 best ethnic dishes will follow, with recipes: " -n 1000 [additional benchmark flags]` Result: diff --git a/examples/CMakeLists.txt b/examples/CMakeLists.txt index d53652815..d2176c910 100644 --- a/examples/CMakeLists.txt +++ b/examples/CMakeLists.txt @@ -6,27 +6,6 @@ find_package(Threads REQUIRED) # ... -# common - -set(TARGET common) - -add_library(${TARGET} OBJECT - common.h - common.cpp - console.h - console.cpp - grammar-parser.h - grammar-parser.cpp - ) - -if (BUILD_SHARED_LIBS) - set_target_properties(${TARGET} PROPERTIES POSITION_INDEPENDENT_CODE ON) -endif() - -target_include_directories(${TARGET} PUBLIC .) -target_compile_features(${TARGET} PUBLIC cxx_std_11) -target_link_libraries(${TARGET} PRIVATE llama) - # examples include_directories(${CMAKE_CURRENT_SOURCE_DIR}) diff --git a/examples/convert-llama2c-to-ggml/convert-llama2c-to-ggml.cpp b/examples/convert-llama2c-to-ggml/convert-llama2c-to-ggml.cpp index 1a238c4dd..469d6e3de 100644 --- a/examples/convert-llama2c-to-ggml/convert-llama2c-to-ggml.cpp +++ b/examples/convert-llama2c-to-ggml/convert-llama2c-to-ggml.cpp @@ -1,5 +1,6 @@ #include "ggml.h" #include "llama.h" + #include #include #include @@ -138,14 +139,16 @@ void print_sample_weights(TransformerWeights *w){ struct llama_vocab { using id = int32_t; using token = std::string; + using ttype = llama_token_type; - struct token_score { - token tok; + struct token_data { + token text; float score; + ttype type; }; std::unordered_map token_to_id; - std::vector id_to_token; + std::vector id_to_token; }; struct my_llama_hparams { @@ -502,7 +505,7 @@ bool is_ggml_file(const char *filename) { return false; } uint32_t magic = file.read_u32(); - return magic == LLAMA_FILE_MAGIC; + return magic == GGUF_MAGIC; } void load_vocab(const char *filename, Config *config, struct llama_vocab *vocab) { @@ -515,36 +518,30 @@ void load_vocab(const char *filename, Config *config, struct llama_vocab *vocab) struct llama_model * lmodel = llama_load_model_from_file(filename, llama_params); struct llama_context * lctx = llama_new_context_with_model(lmodel, llama_params); - std::vector strings; - std::vector scores; - int n_vocab = llama_n_vocab(lctx); - strings.resize(n_vocab, NULL); - scores.resize(n_vocab, 0); - n_vocab = llama_get_vocab(lctx, strings.data(), scores.data(), n_vocab); - GGML_ASSERT(n_vocab == llama_n_vocab(lctx)); + const int n_vocab = llama_n_vocab(lctx); vocab->id_to_token.resize(n_vocab); for (int i=0; iid_to_token[i].tok = tok; - vocab->id_to_token[i].score = score; - vocab->token_to_id.emplace(tok, i); + vocab->id_to_token[i].text = llama_token_get_text(lctx, i); + vocab->id_to_token[i].score = llama_token_get_score(lctx, i); + vocab->id_to_token[i].type = llama_token_get_type(lctx, i); + vocab->token_to_id.emplace(vocab->id_to_token[i].text, i); } llama_free(lctx); llama_free_model(lmodel); } else { // assume llama2.c vocabulary printf("Assuming llama2.c vocabulary since %s is not a ggml file\n", filename); llama_file file(filename, "rb"); - uint32_t n_vocab = config->vocab_size; + const int n_vocab = config->vocab_size; /* uint32_t max_token_length = */ file.read_u32(); // unused vocab->id_to_token.resize(n_vocab); - for (uint32_t i=0; iid_to_token[i].tok = tok; + std::string text = file.read_string(len); + vocab->id_to_token[i].text = text; vocab->id_to_token[i].score = score; - vocab->token_to_id.emplace(tok, i); + vocab->id_to_token[i].type = LLAMA_TOKEN_TYPE_UNDEFINED; + vocab->token_to_id.emplace(text, i); } } } @@ -590,75 +587,80 @@ void save_as_llama_model(struct llama_vocab * vocab, struct my_llama_model * mod if (file.fp == NULL) { return; } - // write_magic - file.write_u32(LLAMA_FILE_MAGIC); // magic - file.write_u32(LLAMA_FILE_VERSION); // version - // write_hparams - file.write_u32(model->hparams.n_vocab); - file.write_u32(model->hparams.n_embd); - file.write_u32(model->hparams.n_mult); - file.write_u32(model->hparams.n_head); - file.write_u32(model->hparams.n_layer); - file.write_u32(model->hparams.n_rot); - file.write_u32(LLAMA_FTYPE_ALL_F32); - // write_vocab - for now we are just writing the existing BPE voc. assuming karpathy's vocabulary is the same. idk. - uint32_t n_vocab = model->hparams.n_vocab; - for (uint32_t i = 0; i < n_vocab; i++) { - const auto & token_score = vocab->id_to_token.at(i); - file.write_u32((uint32_t) token_score.tok.size()); - file.write_raw(token_score.tok.data(), token_score.tok.size()); - file.write_raw(&token_score.score, sizeof(token_score.score)); - } - - // stuff AK weights into GG weights one by one. - // w->token_embedding_table -> model->tok_embeddings - // float* -> struct ggml_tensor - stuff_karpathy_weights_into_gg(model->tok_embeddings, w->token_embedding_table); - stuff_karpathy_weights_into_gg(model->output, w->token_embedding_table); - - stuff_karpathy_weights_into_gg(model->norm, w->rms_final_weight); - //print_row(model->norm, 0); - - // for rms-att-weight - int row_length = model->hparams.n_embd; - const auto & hparams = model->hparams; - //int n_ff = model->hparams.n_embd; - int n_ff = get_n_ff(&hparams); - - for (uint32_t i = 0; i < model->hparams.n_layer; ++i){ - auto & layer = model->layers[i]; - // 1d - stuff_karpathy_weights_into_gg(layer.attention_norm, &w->rms_att_weight[i*row_length]); - stuff_karpathy_weights_into_gg(layer.ffn_norm , &w->rms_ffn_weight[i*row_length]); - - // from 3d matrix layer x dim x dim to 2d matrix dim x dim - stuff_karpathy_weights_into_gg(layer.wq , &w->wq[i*row_length*row_length]); - stuff_karpathy_weights_into_gg(layer.wk , &w->wk[i*row_length*row_length]); - stuff_karpathy_weights_into_gg(layer.wv , &w->wv[i*row_length*row_length]); - stuff_karpathy_weights_into_gg(layer.wo , &w->wo[i*row_length*row_length]); - - stuff_karpathy_weights_into_gg(layer.w1 , &w->w1[i*row_length*n_ff]); - stuff_karpathy_weights_into_gg(layer.w2 , &w->w2[i*n_ff*row_length]); - stuff_karpathy_weights_into_gg(layer.w3 , &w->w3[i*row_length*n_ff]); - } - // write tensors - write_tensor(&file, model->tok_embeddings); - write_tensor(&file, model->norm); - write_tensor(&file, model->output); // ? - for (uint32_t i = 0; i < model->hparams.n_layer; ++i) { - auto & layer = model->layers[i]; - - write_tensor(&file, layer.attention_norm); - write_tensor(&file, layer.wq); - write_tensor(&file, layer.wk); - write_tensor(&file, layer.wv); - write_tensor(&file, layer.wo); - write_tensor(&file, layer.ffn_norm); - write_tensor(&file, layer.w1); - write_tensor(&file, layer.w2); - write_tensor(&file, layer.w3); - } +#pragma message("TODO: implement file saving using gguf") + (void) vocab; + (void) model; + (void) w; +// // write_magic +// file.write_u32(LLAMA_FILE_MAGIC); // magic +// file.write_u32(LLAMA_FILE_VERSION); // version +// // write_hparams +// file.write_u32(model->hparams.n_vocab); +// file.write_u32(model->hparams.n_embd); +// file.write_u32(model->hparams.n_mult); +// file.write_u32(model->hparams.n_head); +// file.write_u32(model->hparams.n_layer); +// file.write_u32(model->hparams.n_rot); +// file.write_u32(LLAMA_FTYPE_ALL_F32); +// +// // write_vocab - for now we are just writing the existing BPE voc. assuming karpathy's vocabulary is the same. idk. +// uint32_t n_vocab = model->hparams.n_vocab; +// for (uint32_t i = 0; i < n_vocab; i++) { +// const auto & token_data = vocab->id_to_token.at(i); +// file.write_u32((uint32_t) token_data.tok.size()); +// file.write_raw(token_data.tok.data(), token_data.tok.size()); +// file.write_raw(&token_data.score, sizeof(token_data.score)); +// } +// +// // stuff AK weights into GG weights one by one. +// // w->token_embedding_table -> model->tok_embeddings +// // float* -> struct ggml_tensor +// stuff_karpathy_weights_into_gg(model->tok_embeddings, w->token_embedding_table); +// stuff_karpathy_weights_into_gg(model->output, w->token_embedding_table); +// +// stuff_karpathy_weights_into_gg(model->norm, w->rms_final_weight); +// //print_row(model->norm, 0); +// +// // for rms-att-weight +// int row_length = model->hparams.n_embd; +// const auto & hparams = model->hparams; +// //int n_ff = model->hparams.n_embd; +// int n_ff = get_n_ff(&hparams); +// +// for (uint32_t i = 0; i < model->hparams.n_layer; ++i){ +// auto & layer = model->layers[i]; +// // 1d +// stuff_karpathy_weights_into_gg(layer.attention_norm, &w->rms_att_weight[i*row_length]); +// stuff_karpathy_weights_into_gg(layer.ffn_norm , &w->rms_ffn_weight[i*row_length]); +// +// // from 3d matrix layer x dim x dim to 2d matrix dim x dim +// stuff_karpathy_weights_into_gg(layer.wq , &w->wq[i*row_length*row_length]); +// stuff_karpathy_weights_into_gg(layer.wk , &w->wk[i*row_length*row_length]); +// stuff_karpathy_weights_into_gg(layer.wv , &w->wv[i*row_length*row_length]); +// stuff_karpathy_weights_into_gg(layer.wo , &w->wo[i*row_length*row_length]); +// +// stuff_karpathy_weights_into_gg(layer.w1 , &w->w1[i*row_length*n_ff]); +// stuff_karpathy_weights_into_gg(layer.w2 , &w->w2[i*n_ff*row_length]); +// stuff_karpathy_weights_into_gg(layer.w3 , &w->w3[i*row_length*n_ff]); +// } +// // write tensors +// write_tensor(&file, model->tok_embeddings); +// write_tensor(&file, model->norm); +// write_tensor(&file, model->output); // ? +// for (uint32_t i = 0; i < model->hparams.n_layer; ++i) { +// auto & layer = model->layers[i]; +// +// write_tensor(&file, layer.attention_norm); +// write_tensor(&file, layer.wq); +// write_tensor(&file, layer.wk); +// write_tensor(&file, layer.wv); +// write_tensor(&file, layer.wo); +// write_tensor(&file, layer.ffn_norm); +// write_tensor(&file, layer.w1); +// write_tensor(&file, layer.w2); +// write_tensor(&file, layer.w3); +// } } struct train_params get_default_train_params() { diff --git a/examples/embd-input/embd-input-lib.cpp b/examples/embd-input/embd-input-lib.cpp index 2185b9b0e..8a6ad882e 100644 --- a/examples/embd-input/embd-input-lib.cpp +++ b/examples/embd-input/embd-input-lib.cpp @@ -167,7 +167,7 @@ llama_token sampling_id(struct MyModel* mymodel) { llama_token_data_array candidates_p = { candidates.data(), candidates.size(), false }; // TODO: Apply penalties - // float nl_logit = logits[llama_token_nl()]; + // float nl_logit = logits[llama_token_nl(ctx)]; // auto last_n_repeat = std::min(std::min((int)last_n_tokens.size(), repeat_last_n), n_ctx); // llama_sample_repetition_penalty(ctx, &candidates_p, // last_n_tokens.data() + last_n_tokens.size() - last_n_repeat, @@ -176,7 +176,7 @@ llama_token sampling_id(struct MyModel* mymodel) { // last_n_tokens.data() + last_n_tokens.size() - last_n_repeat, // last_n_repeat, alpha_frequency, alpha_presence); // if (!penalize_nl) { - // logits[llama_token_nl()] = nl_logit; + // logits[llama_token_nl(ctx)] = nl_logit; // } if (temp <= 0) { @@ -211,7 +211,7 @@ const char * sampling(struct MyModel * mymodel) { llama_context * ctx = mymodel->ctx; int id = sampling_id(mymodel); static std::string ret; - if (id == llama_token_eos()) { + if (id == llama_token_eos(ctx)) { ret = ""; } else { ret = llama_token_to_str(ctx, id); diff --git a/examples/embedding/embedding.cpp b/examples/embedding/embedding.cpp index 5192d6df5..8788571cb 100644 --- a/examples/embedding/embedding.cpp +++ b/examples/embedding/embedding.cpp @@ -67,7 +67,7 @@ int main(int argc, char ** argv) { fprintf(stderr, "%s: prompt: '%s'\n", __func__, params.prompt.c_str()); fprintf(stderr, "%s: number of tokens in prompt = %zu\n", __func__, embd_inp.size()); for (int i = 0; i < (int) embd_inp.size(); i++) { - fprintf(stderr, "%6d -> '%s'\n", embd_inp[i], llama_token_to_str(ctx, embd_inp[i])); + fprintf(stderr, "%6d -> '%s'\n", embd_inp[i], llama_token_to_str(ctx, embd_inp[i]).c_str()); } fprintf(stderr, "\n"); } diff --git a/examples/gguf/gguf.cpp b/examples/gguf/gguf.cpp new file mode 100644 index 000000000..dee00df87 --- /dev/null +++ b/examples/gguf/gguf.cpp @@ -0,0 +1,246 @@ +#include "ggml.h" +#include "llama.h" + +#include +#include +#include +#include +#include +#include + +#undef MIN +#undef MAX +#define MIN(a, b) ((a) < (b) ? (a) : (b)) +#define MAX(a, b) ((a) > (b) ? (a) : (b)) + +template +static std::string to_string(const T & val) { + std::stringstream ss; + ss << val; + return ss.str(); +} + +bool gguf_ex_write(const std::string & fname) { + struct gguf_context * ctx = gguf_init_empty(); + + gguf_set_val_u8 (ctx, "some.parameter.uint8", 0x12); + gguf_set_val_i8 (ctx, "some.parameter.int8", -0x13); + gguf_set_val_u16 (ctx, "some.parameter.uint16", 0x1234); + gguf_set_val_i16 (ctx, "some.parameter.int16", -0x1235); + gguf_set_val_u32 (ctx, "some.parameter.uint32", 0x12345678); + gguf_set_val_i32 (ctx, "some.parameter.int32", -0x12345679); + gguf_set_val_f32 (ctx, "some.parameter.float32", 0.123456789f); + gguf_set_val_bool(ctx, "some.parameter.bool", true); + gguf_set_val_str (ctx, "some.parameter.string", "hello world"); + + gguf_set_arr_data(ctx, "some.parameter.arr.i16", GGUF_TYPE_INT16, std::vector{ 1, 2, 3, 4, }.data(), 4); + gguf_set_arr_data(ctx, "some.parameter.arr.f32", GGUF_TYPE_FLOAT32, std::vector{ 3.145f, 2.718f, 1.414f, }.data(), 3); + gguf_set_arr_str (ctx, "some.parameter.arr.str", std::vector{ "hello", "world", "!" }.data(), 3); + + struct ggml_init_params params = { + /*.mem_size =*/ 128ull*1024ull*1024ull, + /*.mem_buffer =*/ NULL, + /*.no_alloc =*/ false, + }; + + struct ggml_context * ctx_data = ggml_init(params); + + const int n_tensors = 10; + + // tensor infos + for (int i = 0; i < n_tensors; ++i) { + const std::string name = "tensor_" + to_string(i); + + int64_t ne[GGML_MAX_DIMS] = { 1 }; + int32_t n_dims = rand() % GGML_MAX_DIMS + 1; + + for (int j = 0; j < n_dims; ++j) { + ne[j] = rand() % 10 + 1; + } + + struct ggml_tensor * cur = ggml_new_tensor(ctx_data, GGML_TYPE_F32, n_dims, ne); + ggml_set_name(cur, name.c_str()); + + { + float * data = (float *) cur->data; + for (int j = 0; j < ggml_nelements(cur); ++j) { + data[j] = 100 + i; + } + } + + gguf_add_tensor(ctx, cur); + } + + gguf_write_to_file(ctx, fname.c_str(), false); + + fprintf(stdout, "%s: wrote file '%s;\n", __func__, fname.c_str()); + + ggml_free(ctx_data); + gguf_free(ctx); + + return true; +} + +// just read tensor info +bool gguf_ex_read_0(const std::string & fname) { + struct gguf_init_params params = { + /*.no_alloc = */ false, + /*.ctx = */ NULL, + }; + + struct gguf_context * ctx = gguf_init_from_file(fname.c_str(), params); + + fprintf(stdout, "%s: version: %d\n", __func__, gguf_get_version(ctx)); + fprintf(stdout, "%s: alignment: %zu\n", __func__, gguf_get_alignment(ctx)); + fprintf(stdout, "%s: data offset: %zu\n", __func__, gguf_get_data_offset(ctx)); + + // kv + { + const int n_kv = gguf_get_n_kv(ctx); + + fprintf(stdout, "%s: n_kv: %d\n", __func__, n_kv); + + for (int i = 0; i < n_kv; ++i) { + const char * key = gguf_get_key(ctx, i); + + fprintf(stdout, "%s: kv[%d]: key = %s\n", __func__, i, key); + } + } + + // find kv string + { + const char * findkey = "some.parameter.string"; + + const int keyidx = gguf_find_key(ctx, findkey); + if (keyidx == -1) { + fprintf(stdout, "%s: find key: %s not found.\n", __func__, findkey); + } else { + const char * key_value = gguf_get_val_str(ctx, keyidx); + fprintf(stdout, "%s: find key: %s found, kv[%d] value = %s\n", __func__, findkey, keyidx, key_value); + } + } + + // tensor info + { + const int n_tensors = gguf_get_n_tensors(ctx); + + fprintf(stdout, "%s: n_tensors: %d\n", __func__, n_tensors); + + for (int i = 0; i < n_tensors; ++i) { + const char * name = gguf_get_tensor_name (ctx, i); + const size_t offset = gguf_get_tensor_offset(ctx, i); + + fprintf(stdout, "%s: tensor[%d]: name = %s, offset = %zu\n", __func__, i, name, offset); + } + } + + gguf_free(ctx); + + return true; +} + +// read and create ggml_context containing the tensors and their data +bool gguf_ex_read_1(const std::string & fname) { + struct ggml_context * ctx_data = NULL; + + struct gguf_init_params params = { + /*.no_alloc = */ false, + /*.ctx = */ &ctx_data, + }; + + struct gguf_context * ctx = gguf_init_from_file(fname.c_str(), params); + + fprintf(stdout, "%s: version: %d\n", __func__, gguf_get_version(ctx)); + fprintf(stdout, "%s: alignment: %zu\n", __func__, gguf_get_alignment(ctx)); + fprintf(stdout, "%s: data offset: %zu\n", __func__, gguf_get_data_offset(ctx)); + + // kv + { + const int n_kv = gguf_get_n_kv(ctx); + + fprintf(stdout, "%s: n_kv: %d\n", __func__, n_kv); + + for (int i = 0; i < n_kv; ++i) { + const char * key = gguf_get_key(ctx, i); + + fprintf(stdout, "%s: kv[%d]: key = %s\n", __func__, i, key); + } + } + + // tensor info + { + const int n_tensors = gguf_get_n_tensors(ctx); + + fprintf(stdout, "%s: n_tensors: %d\n", __func__, n_tensors); + + for (int i = 0; i < n_tensors; ++i) { + const char * name = gguf_get_tensor_name (ctx, i); + const size_t offset = gguf_get_tensor_offset(ctx, i); + + fprintf(stdout, "%s: tensor[%d]: name = %s, offset = %zu\n", __func__, i, name, offset); + } + } + + // data + { + const int n_tensors = gguf_get_n_tensors(ctx); + + for (int i = 0; i < n_tensors; ++i) { + fprintf(stdout, "%s: reading tensor %d data\n", __func__, i); + + const char * name = gguf_get_tensor_name(ctx, i); + + struct ggml_tensor * cur = ggml_get_tensor(ctx_data, name); + + fprintf(stdout, "%s: tensor[%d]: n_dims = %d, name = %s, data = %p\n", __func__, i, cur->n_dims, cur->name, cur->data); + + // print first 10 elements + const float * data = (const float *) cur->data; + + printf("%s data[:10] : ", name); + for (int j = 0; j < MIN(10, ggml_nelements(cur)); ++j) { + printf("%f ", data[j]); + } + printf("\n\n"); + + // check data + { + const float * data = (const float *) cur->data; + for (int j = 0; j < ggml_nelements(cur); ++j) { + if (data[j] != 100 + i) { + fprintf(stderr, "%s: tensor[%d]: data[%d] = %f\n", __func__, i, j, data[j]); + return false; + } + } + } + } + } + + fprintf(stdout, "%s: ctx_data size: %zu\n", __func__, ggml_get_mem_size(ctx_data)); + + ggml_free(ctx_data); + gguf_free(ctx); + + return true; +} + +int main(int argc, char ** argv) { + if (argc < 3) { + fprintf(stdout, "usage: %s data.gguf r|w\n", argv[0]); + return -1; + } + + const std::string fname(argv[1]); + const std::string mode (argv[2]); + + GGML_ASSERT((mode == "r" || mode == "w") && "mode must be r or w"); + + if (mode == "w") { + GGML_ASSERT(gguf_ex_write(fname) && "failed to write gguf file"); + } else if (mode == "r") { + GGML_ASSERT(gguf_ex_read_0(fname) && "failed to read gguf file"); + GGML_ASSERT(gguf_ex_read_1(fname) && "failed to read gguf file"); + } + + return 0; +} diff --git a/examples/gptneox-wip/cmpnct_gpt2bpe.hpp b/examples/gptneox-wip/cmpnct_gpt2bpe.hpp new file mode 100644 index 000000000..9d433f4b1 --- /dev/null +++ b/examples/gptneox-wip/cmpnct_gpt2bpe.hpp @@ -0,0 +1,1133 @@ +#ifndef CMPNCT_GPT2BPE +#define CMPNCT_GPT2BPE + +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +// Unicode GPT2 Byte Pair Encoding Tokenizer +// Adapted from https://github.com/cmp-nct/ggllm.cpp [MIT License] +// Removed loading of merges from HF json and parts made for a specific vocab + + +//----------------- +// Unicode library (from cmpnct_unicode.cpp) +//----------------- + +// Minimal library for high performance handling and categorization of UTF8 strings and characters +// Using std::string + +enum CNCTCharType { + DIGIT, // a numerical char in any language + LETTER, // a letter in any language + WHITESPACE, // any form of whitespace + ACCENT_MARK, // letter modifiers like ´ in é + PUNCTUATION, // punctuation including brackets + SYMBOL, // math, currency, other symbols + CONTROL, // control characters + MIXED, // a mix of the above + UNIDENTIFIED // something more exotic like emoji or separators +}; + +struct CNCTUnicode; + +struct CNCTString { + std::string str; + size_t utf8_chars; + + CNCTCharType char_type=UNIDENTIFIED; + bool is_sequential=false; + + size_t seq_offset_bytes=0; + size_t seq_offset_utf8_chars=0; + + bool operator==(const std::string &other) const; + bool operator==(const char other) const; + bool operator==(const CNCTString &other) const; + CNCTString &operator+=(const std::string &other); + CNCTString &operator+=(const char other); + friend CNCTString operator+(CNCTString lhs, const std::string &rhs); + friend CNCTString operator+(CNCTString lhs, const char rhs); + CNCTString& operator+=(const CNCTString& other); + friend CNCTString operator+(CNCTString lhs, const CNCTString& rhs); +}; + +struct CNCTUnicode { + static bool check_code_range(int c, const std::vector>& ranges); + static CNCTCharType get_code_type(int c); + static CNCTCharType get_code_type(const std::string &utf8_char); + static int utf8_len(const char c); + static int strlen_utf8(std::string src); + static std::vector split_utf8(const std::string &src); + static std::vector split_utf8_enhanced(const std::string &src); + static CNCTCharType string_identify(const std::string& str); + static bool string_test(const std::string& str, CNCTCharType chartype); +}; + +static const std::vector> digit_ranges = { +{0x30, 0x39}, {0xB2, 0xB3}, {0xB9, 0xB9}, {0x660, 0x669}, {0x6F0, 0x6F9}, {0x7C0, 0x7C9}, {0x966, 0x96F}, {0x9E6, 0x9EF}, {0xA66, 0xA6F}, {0xAE6, 0xAEF}, {0xB66, 0xB6F}, {0xBE6, 0xBEF}, {0xC66, 0xC6F}, +{0xCE6, 0xCEF}, {0xD66, 0xD6F}, {0xDE6, 0xDEF}, {0xE50, 0xE59}, {0xED0, 0xED9}, {0xF20, 0xF29}, {0x1040, 0x1049}, {0x1090, 0x1099}, {0x1369, 0x1371}, {0x17E0, 0x17E9}, {0x1810, 0x1819}, {0x1946, 0x194F}, +{0x19D0, 0x19DA}, {0x1A80, 0x1A89}, {0x1A90, 0x1A99}, {0x1B50, 0x1B59}, {0x1BB0, 0x1BB9}, {0x1C40, 0x1C49}, {0x1C50, 0x1C59}, {0x2070, 0x2070}, {0x2074, 0x2079}, {0x2080, 0x2089}, {0x2460, 0x2468}, +{0x2474, 0x247C}, {0x2488, 0x2490}, {0x24EA, 0x24EA}, {0x24F5, 0x24FD}, {0x24FF, 0x24FF}, {0x2776, 0x277E}, {0x2780, 0x2788}, {0x278A, 0x2792}, {0xA620, 0xA629}, {0xA8D0, 0xA8D9}, {0xA900, 0xA909}, +{0xA9D0, 0xA9D9}, {0xA9F0, 0xA9F9}, {0xAA50, 0xAA59}, {0xABF0, 0xABF9}, {0xFF10, 0xFF19}, {0x104A0, 0x104A9}, {0x10A40, 0x10A43}, {0x10D30, 0x10D39}, {0x10E60, 0x10E68}, {0x11052, 0x1105A}, +{0x11066, 0x1106F}, {0x110F0, 0x110F9}, {0x11136, 0x1113F}, {0x111D0, 0x111D9}, {0x112F0, 0x112F9}, {0x11450, 0x11459}, {0x114D0, 0x114D9}, {0x11650, 0x11659}, {0x116C0, 0x116C9}, {0x11730, 0x11739}, +{0x118E0, 0x118E9}, {0x11950, 0x11959}, {0x11C50, 0x11C59}, {0x11D50, 0x11D59}, {0x11DA0, 0x11DA9}, {0x16A60, 0x16A69}, {0x16B50, 0x16B59}, {0x1D7CE, 0x1D7FF}, {0x1E140, 0x1E149}, {0x1E2F0, 0x1E2F9}, +{0x1E950, 0x1E959}, {0x1F100, 0x1F10A}, {0x1FBF0, 0x1FBF9}, +}; + +static const std::vector> letter_ranges = { +{0x41, 0x5A}, {0x61, 0x7A}, {0xAA, 0xAA}, {0xB5, 0xB5}, {0xBA, 0xBA}, {0xC0, 0xD6}, {0xD8, 0xF6}, {0xF8, 0x2C1}, {0x2C6, 0x2D1}, {0x2E0, 0x2E4}, {0x2EC, 0x2EC}, {0x2EE, 0x2EE}, {0x370, 0x374}, +{0x376, 0x377}, {0x37A, 0x37D}, {0x37F, 0x37F}, {0x386, 0x386}, {0x388, 0x38A}, {0x38C, 0x38C}, {0x38E, 0x3A1}, {0x3A3, 0x3F5}, {0x3F7, 0x481}, {0x48A, 0x52F}, {0x531, 0x556}, {0x559, 0x559}, +{0x560, 0x588}, {0x5D0, 0x5EA}, {0x5EF, 0x5F2}, {0x620, 0x64A}, {0x66E, 0x66F}, {0x671, 0x6D3}, {0x6D5, 0x6D5}, {0x6E5, 0x6E6}, {0x6EE, 0x6EF}, {0x6FA, 0x6FC}, {0x6FF, 0x6FF}, {0x710, 0x710}, +{0x712, 0x72F}, {0x74D, 0x7A5}, {0x7B1, 0x7B1}, {0x7CA, 0x7EA}, {0x7F4, 0x7F5}, {0x7FA, 0x7FA}, {0x800, 0x815}, {0x81A, 0x81A}, {0x824, 0x824}, {0x828, 0x828}, {0x840, 0x858}, {0x860, 0x86A}, +{0x8A0, 0x8B4}, {0x8B6, 0x8C7}, {0x904, 0x939}, {0x93D, 0x93D}, {0x950, 0x950}, {0x958, 0x961}, {0x971, 0x980}, {0x985, 0x98C}, {0x98F, 0x990}, {0x993, 0x9A8}, {0x9AA, 0x9B0}, {0x9B2, 0x9B2}, +{0x9B6, 0x9B9}, {0x9BD, 0x9BD}, {0x9CE, 0x9CE}, {0x9DC, 0x9DD}, {0x9DF, 0x9E1}, {0x9F0, 0x9F1}, {0x9FC, 0x9FC}, {0xA05, 0xA0A}, {0xA0F, 0xA10}, {0xA13, 0xA28}, {0xA2A, 0xA30}, {0xA32, 0xA33}, +{0xA35, 0xA36}, {0xA38, 0xA39}, {0xA59, 0xA5C}, {0xA5E, 0xA5E}, {0xA72, 0xA74}, {0xA85, 0xA8D}, {0xA8F, 0xA91}, {0xA93, 0xAA8}, {0xAAA, 0xAB0}, {0xAB2, 0xAB3}, {0xAB5, 0xAB9}, {0xABD, 0xABD}, +{0xAD0, 0xAD0}, {0xAE0, 0xAE1}, {0xAF9, 0xAF9}, {0xB05, 0xB0C}, {0xB0F, 0xB10}, {0xB13, 0xB28}, {0xB2A, 0xB30}, {0xB32, 0xB33}, {0xB35, 0xB39}, {0xB3D, 0xB3D}, {0xB5C, 0xB5D}, {0xB5F, 0xB61}, +{0xB71, 0xB71}, {0xB83, 0xB83}, {0xB85, 0xB8A}, {0xB8E, 0xB90}, {0xB92, 0xB95}, {0xB99, 0xB9A}, {0xB9C, 0xB9C}, {0xB9E, 0xB9F}, {0xBA3, 0xBA4}, {0xBA8, 0xBAA}, {0xBAE, 0xBB9}, {0xBD0, 0xBD0}, +{0xC05, 0xC0C}, {0xC0E, 0xC10}, {0xC12, 0xC28}, {0xC2A, 0xC39}, {0xC3D, 0xC3D}, {0xC58, 0xC5A}, {0xC60, 0xC61}, {0xC80, 0xC80}, {0xC85, 0xC8C}, {0xC8E, 0xC90}, {0xC92, 0xCA8}, {0xCAA, 0xCB3}, +{0xCB5, 0xCB9}, {0xCBD, 0xCBD}, {0xCDE, 0xCDE}, {0xCE0, 0xCE1}, {0xCF1, 0xCF2}, {0xD04, 0xD0C}, {0xD0E, 0xD10}, {0xD12, 0xD3A}, {0xD3D, 0xD3D}, {0xD4E, 0xD4E}, {0xD54, 0xD56}, {0xD5F, 0xD61}, +{0xD7A, 0xD7F}, {0xD85, 0xD96}, {0xD9A, 0xDB1}, {0xDB3, 0xDBB}, {0xDBD, 0xDBD}, {0xDC0, 0xDC6}, {0xE01, 0xE30}, {0xE32, 0xE33}, {0xE40, 0xE46}, {0xE81, 0xE82}, {0xE84, 0xE84}, {0xE86, 0xE8A}, +{0xE8C, 0xEA3}, {0xEA5, 0xEA5}, {0xEA7, 0xEB0}, {0xEB2, 0xEB3}, {0xEBD, 0xEBD}, {0xEC0, 0xEC4}, {0xEC6, 0xEC6}, {0xEDC, 0xEDF}, {0xF00, 0xF00}, {0xF40, 0xF47}, {0xF49, 0xF6C}, {0xF88, 0xF8C}, +{0x1000, 0x102A}, {0x103F, 0x103F}, {0x1050, 0x1055}, {0x105A, 0x105D}, {0x1061, 0x1061}, {0x1065, 0x1066}, {0x106E, 0x1070}, {0x1075, 0x1081}, {0x108E, 0x108E}, {0x10A0, 0x10C5}, {0x10C7, 0x10C7}, +{0x10CD, 0x10CD}, {0x10D0, 0x10FA}, {0x10FC, 0x1248}, {0x124A, 0x124D}, {0x1250, 0x1256}, {0x1258, 0x1258}, {0x125A, 0x125D}, {0x1260, 0x1288}, {0x128A, 0x128D}, {0x1290, 0x12B0}, {0x12B2, 0x12B5}, +{0x12B8, 0x12BE}, {0x12C0, 0x12C0}, {0x12C2, 0x12C5}, {0x12C8, 0x12D6}, {0x12D8, 0x1310}, {0x1312, 0x1315}, {0x1318, 0x135A}, {0x1380, 0x138F}, {0x13A0, 0x13F5}, {0x13F8, 0x13FD}, {0x1401, 0x166C}, +{0x166F, 0x167F}, {0x1681, 0x169A}, {0x16A0, 0x16EA}, {0x16F1, 0x16F8}, {0x1700, 0x170C}, {0x170E, 0x1711}, {0x1720, 0x1731}, {0x1740, 0x1751}, {0x1760, 0x176C}, {0x176E, 0x1770}, {0x1780, 0x17B3}, +{0x17D7, 0x17D7}, {0x17DC, 0x17DC}, {0x1820, 0x1878}, {0x1880, 0x1884}, {0x1887, 0x18A8}, {0x18AA, 0x18AA}, {0x18B0, 0x18F5}, {0x1900, 0x191E}, {0x1950, 0x196D}, {0x1970, 0x1974}, {0x1980, 0x19AB}, +{0x19B0, 0x19C9}, {0x1A00, 0x1A16}, {0x1A20, 0x1A54}, {0x1AA7, 0x1AA7}, {0x1B05, 0x1B33}, {0x1B45, 0x1B4B}, {0x1B83, 0x1BA0}, {0x1BAE, 0x1BAF}, {0x1BBA, 0x1BE5}, {0x1C00, 0x1C23}, {0x1C4D, 0x1C4F}, +{0x1C5A, 0x1C7D}, {0x1C80, 0x1C88}, {0x1C90, 0x1CBA}, {0x1CBD, 0x1CBF}, {0x1CE9, 0x1CEC}, {0x1CEE, 0x1CF3}, {0x1CF5, 0x1CF6}, {0x1CFA, 0x1CFA}, {0x1D00, 0x1DBF}, {0x1E00, 0x1F15}, {0x1F18, 0x1F1D}, +{0x1F20, 0x1F45}, {0x1F48, 0x1F4D}, {0x1F50, 0x1F57}, {0x1F59, 0x1F59}, {0x1F5B, 0x1F5B}, {0x1F5D, 0x1F5D}, {0x1F5F, 0x1F7D}, {0x1F80, 0x1FB4}, {0x1FB6, 0x1FBC}, {0x1FBE, 0x1FBE}, {0x1FC2, 0x1FC4}, +{0x1FC6, 0x1FCC}, {0x1FD0, 0x1FD3}, {0x1FD6, 0x1FDB}, {0x1FE0, 0x1FEC}, {0x1FF2, 0x1FF4}, {0x1FF6, 0x1FFC}, {0x2071, 0x2071}, {0x207F, 0x207F}, {0x2090, 0x209C}, {0x2102, 0x2102}, {0x2107, 0x2107}, +{0x210A, 0x2113}, {0x2115, 0x2115}, {0x2119, 0x211D}, {0x2124, 0x2124}, {0x2126, 0x2126}, {0x2128, 0x2128}, {0x212A, 0x212D}, {0x212F, 0x2139}, {0x213C, 0x213F}, {0x2145, 0x2149}, {0x214E, 0x214E}, +{0x2183, 0x2184}, {0x2C00, 0x2C2E}, {0x2C30, 0x2C5E}, {0x2C60, 0x2CE4}, {0x2CEB, 0x2CEE}, {0x2CF2, 0x2CF3}, {0x2D00, 0x2D25}, {0x2D27, 0x2D27}, {0x2D2D, 0x2D2D}, {0x2D30, 0x2D67}, {0x2D6F, 0x2D6F}, +{0x2D80, 0x2D96}, {0x2DA0, 0x2DA6}, {0x2DA8, 0x2DAE}, {0x2DB0, 0x2DB6}, {0x2DB8, 0x2DBE}, {0x2DC0, 0x2DC6}, {0x2DC8, 0x2DCE}, {0x2DD0, 0x2DD6}, {0x2DD8, 0x2DDE}, {0x2E2F, 0x2E2F}, {0x3005, 0x3006}, +{0x3031, 0x3035}, {0x303B, 0x303C}, {0x3041, 0x3096}, {0x309D, 0x309F}, {0x30A1, 0x30FA}, {0x30FC, 0x30FF}, {0x3105, 0x312F}, {0x3131, 0x318E}, {0x31A0, 0x31BF}, {0x31F0, 0x31FF}, {0x3400, 0x4DBF}, +{0x4E00, 0x9FFC}, {0xA000, 0xA48C}, {0xA4D0, 0xA4FD}, {0xA500, 0xA60C}, {0xA610, 0xA61F}, {0xA62A, 0xA62B}, {0xA640, 0xA66E}, {0xA67F, 0xA69D}, {0xA6A0, 0xA6E5}, {0xA717, 0xA71F}, {0xA722, 0xA788}, +{0xA78B, 0xA7BF}, {0xA7C2, 0xA7CA}, {0xA7F5, 0xA801}, {0xA803, 0xA805}, {0xA807, 0xA80A}, {0xA80C, 0xA822}, {0xA840, 0xA873}, {0xA882, 0xA8B3}, {0xA8F2, 0xA8F7}, {0xA8FB, 0xA8FB}, {0xA8FD, 0xA8FE}, +{0xA90A, 0xA925}, {0xA930, 0xA946}, {0xA960, 0xA97C}, {0xA984, 0xA9B2}, {0xA9CF, 0xA9CF}, {0xA9E0, 0xA9E4}, {0xA9E6, 0xA9EF}, {0xA9FA, 0xA9FE}, {0xAA00, 0xAA28}, {0xAA40, 0xAA42}, {0xAA44, 0xAA4B}, +{0xAA60, 0xAA76}, {0xAA7A, 0xAA7A}, {0xAA7E, 0xAAAF}, {0xAAB1, 0xAAB1}, {0xAAB5, 0xAAB6}, {0xAAB9, 0xAABD}, {0xAAC0, 0xAAC0}, {0xAAC2, 0xAAC2}, {0xAADB, 0xAADD}, {0xAAE0, 0xAAEA}, {0xAAF2, 0xAAF4}, +{0xAB01, 0xAB06}, {0xAB09, 0xAB0E}, {0xAB11, 0xAB16}, {0xAB20, 0xAB26}, {0xAB28, 0xAB2E}, {0xAB30, 0xAB5A}, {0xAB5C, 0xAB69}, {0xAB70, 0xABE2}, {0xAC00, 0xD7A3}, {0xD7B0, 0xD7C6}, {0xD7CB, 0xD7FB}, +{0xF900, 0xFA6D}, {0xFA70, 0xFAD9}, {0xFB00, 0xFB06}, {0xFB13, 0xFB17}, {0xFB1D, 0xFB1D}, {0xFB1F, 0xFB28}, {0xFB2A, 0xFB36}, {0xFB38, 0xFB3C}, {0xFB3E, 0xFB3E}, {0xFB40, 0xFB41}, {0xFB43, 0xFB44}, +{0xFB46, 0xFBB1}, {0xFBD3, 0xFD3D}, {0xFD50, 0xFD8F}, {0xFD92, 0xFDC7}, {0xFDF0, 0xFDFB}, {0xFE70, 0xFE74}, {0xFE76, 0xFEFC}, {0xFF21, 0xFF3A}, {0xFF41, 0xFF5A}, {0xFF66, 0xFFBE}, {0xFFC2, 0xFFC7}, +{0xFFCA, 0xFFCF}, {0xFFD2, 0xFFD7}, {0xFFDA, 0xFFDC}, {0x10000, 0x1000B}, {0x1000D, 0x10026}, {0x10028, 0x1003A}, {0x1003C, 0x1003D}, {0x1003F, 0x1004D}, {0x10050, 0x1005D}, {0x10080, 0x100FA}, +{0x10280, 0x1029C}, {0x102A0, 0x102D0}, {0x10300, 0x1031F}, {0x1032D, 0x10340}, {0x10342, 0x10349}, {0x10350, 0x10375}, {0x10380, 0x1039D}, {0x103A0, 0x103C3}, {0x103C8, 0x103CF}, {0x10400, 0x1049D}, +{0x104B0, 0x104D3}, {0x104D8, 0x104FB}, {0x10500, 0x10527}, {0x10530, 0x10563}, {0x10600, 0x10736}, {0x10740, 0x10755}, {0x10760, 0x10767}, {0x10800, 0x10805}, {0x10808, 0x10808}, {0x1080A, 0x10835}, +{0x10837, 0x10838}, {0x1083C, 0x1083C}, {0x1083F, 0x10855}, {0x10860, 0x10876}, {0x10880, 0x1089E}, {0x108E0, 0x108F2}, {0x108F4, 0x108F5}, {0x10900, 0x10915}, {0x10920, 0x10939}, {0x10980, 0x109B7}, +{0x109BE, 0x109BF}, {0x10A00, 0x10A00}, {0x10A10, 0x10A13}, {0x10A15, 0x10A17}, {0x10A19, 0x10A35}, {0x10A60, 0x10A7C}, {0x10A80, 0x10A9C}, {0x10AC0, 0x10AC7}, {0x10AC9, 0x10AE4}, {0x10B00, 0x10B35}, +{0x10B40, 0x10B55}, {0x10B60, 0x10B72}, {0x10B80, 0x10B91}, {0x10C00, 0x10C48}, {0x10C80, 0x10CB2}, {0x10CC0, 0x10CF2}, {0x10D00, 0x10D23}, {0x10E80, 0x10EA9}, {0x10EB0, 0x10EB1}, {0x10F00, 0x10F1C}, +{0x10F27, 0x10F27}, {0x10F30, 0x10F45}, {0x10FB0, 0x10FC4}, {0x10FE0, 0x10FF6}, {0x11003, 0x11037}, {0x11083, 0x110AF}, {0x110D0, 0x110E8}, {0x11103, 0x11126}, {0x11144, 0x11144}, {0x11147, 0x11147}, +{0x11150, 0x11172}, {0x11176, 0x11176}, {0x11183, 0x111B2}, {0x111C1, 0x111C4}, {0x111DA, 0x111DA}, {0x111DC, 0x111DC}, {0x11200, 0x11211}, {0x11213, 0x1122B}, {0x11280, 0x11286}, {0x11288, 0x11288}, +{0x1128A, 0x1128D}, {0x1128F, 0x1129D}, {0x1129F, 0x112A8}, {0x112B0, 0x112DE}, {0x11305, 0x1130C}, {0x1130F, 0x11310}, {0x11313, 0x11328}, {0x1132A, 0x11330}, {0x11332, 0x11333}, {0x11335, 0x11339}, +{0x1133D, 0x1133D}, {0x11350, 0x11350}, {0x1135D, 0x11361}, {0x11400, 0x11434}, {0x11447, 0x1144A}, {0x1145F, 0x11461}, {0x11480, 0x114AF}, {0x114C4, 0x114C5}, {0x114C7, 0x114C7}, {0x11580, 0x115AE}, +{0x115D8, 0x115DB}, {0x11600, 0x1162F}, {0x11644, 0x11644}, {0x11680, 0x116AA}, {0x116B8, 0x116B8}, {0x11700, 0x1171A}, {0x11800, 0x1182B}, {0x118A0, 0x118DF}, {0x118FF, 0x11906}, {0x11909, 0x11909}, +{0x1190C, 0x11913}, {0x11915, 0x11916}, {0x11918, 0x1192F}, {0x1193F, 0x1193F}, {0x11941, 0x11941}, {0x119A0, 0x119A7}, {0x119AA, 0x119D0}, {0x119E1, 0x119E1}, {0x119E3, 0x119E3}, {0x11A00, 0x11A00}, +{0x11A0B, 0x11A32}, {0x11A3A, 0x11A3A}, {0x11A50, 0x11A50}, {0x11A5C, 0x11A89}, {0x11A9D, 0x11A9D}, {0x11AC0, 0x11AF8}, {0x11C00, 0x11C08}, {0x11C0A, 0x11C2E}, {0x11C40, 0x11C40}, {0x11C72, 0x11C8F}, +{0x11D00, 0x11D06}, {0x11D08, 0x11D09}, {0x11D0B, 0x11D30}, {0x11D46, 0x11D46}, {0x11D60, 0x11D65}, {0x11D67, 0x11D68}, {0x11D6A, 0x11D89}, {0x11D98, 0x11D98}, {0x11EE0, 0x11EF2}, {0x11FB0, 0x11FB0}, +{0x12000, 0x12399}, {0x12480, 0x12543}, {0x13000, 0x1342E}, {0x14400, 0x14646}, {0x16800, 0x16A38}, {0x16A40, 0x16A5E}, {0x16AD0, 0x16AED}, {0x16B00, 0x16B2F}, {0x16B40, 0x16B43}, {0x16B63, 0x16B77}, +{0x16B7D, 0x16B8F}, {0x16E40, 0x16E7F}, {0x16F00, 0x16F4A}, {0x16F50, 0x16F50}, {0x16F93, 0x16F9F}, {0x16FE0, 0x16FE1}, {0x16FE3, 0x16FE3}, {0x17000, 0x187F7}, {0x18800, 0x18CD5}, {0x18D00, 0x18D08}, +{0x1B000, 0x1B11E}, {0x1B150, 0x1B152}, {0x1B164, 0x1B167}, {0x1B170, 0x1B2FB}, {0x1BC00, 0x1BC6A}, {0x1BC70, 0x1BC7C}, {0x1BC80, 0x1BC88}, {0x1BC90, 0x1BC99}, {0x1D400, 0x1D454}, {0x1D456, 0x1D49C}, +{0x1D49E, 0x1D49F}, {0x1D4A2, 0x1D4A2}, {0x1D4A5, 0x1D4A6}, {0x1D4A9, 0x1D4AC}, {0x1D4AE, 0x1D4B9}, {0x1D4BB, 0x1D4BB}, {0x1D4BD, 0x1D4C3}, {0x1D4C5, 0x1D505}, {0x1D507, 0x1D50A}, {0x1D50D, 0x1D514}, +{0x1D516, 0x1D51C}, {0x1D51E, 0x1D539}, {0x1D53B, 0x1D53E}, {0x1D540, 0x1D544}, {0x1D546, 0x1D546}, {0x1D54A, 0x1D550}, {0x1D552, 0x1D6A5}, {0x1D6A8, 0x1D6C0}, {0x1D6C2, 0x1D6DA}, {0x1D6DC, 0x1D6FA}, +{0x1D6FC, 0x1D714}, {0x1D716, 0x1D734}, {0x1D736, 0x1D74E}, {0x1D750, 0x1D76E}, {0x1D770, 0x1D788}, {0x1D78A, 0x1D7A8}, {0x1D7AA, 0x1D7C2}, {0x1D7C4, 0x1D7CB}, {0x1E100, 0x1E12C}, {0x1E137, 0x1E13D}, +{0x1E14E, 0x1E14E}, {0x1E2C0, 0x1E2EB}, {0x1E800, 0x1E8C4}, {0x1E900, 0x1E943}, {0x1E94B, 0x1E94B}, {0x1EE00, 0x1EE03}, {0x1EE05, 0x1EE1F}, {0x1EE21, 0x1EE22}, {0x1EE24, 0x1EE24}, {0x1EE27, 0x1EE27}, +{0x1EE29, 0x1EE32}, {0x1EE34, 0x1EE37}, {0x1EE39, 0x1EE39}, {0x1EE3B, 0x1EE3B}, {0x1EE42, 0x1EE42}, {0x1EE47, 0x1EE47}, {0x1EE49, 0x1EE49}, {0x1EE4B, 0x1EE4B}, {0x1EE4D, 0x1EE4F}, {0x1EE51, 0x1EE52}, +{0x1EE54, 0x1EE54}, {0x1EE57, 0x1EE57}, {0x1EE59, 0x1EE59}, {0x1EE5B, 0x1EE5B}, {0x1EE5D, 0x1EE5D}, {0x1EE5F, 0x1EE5F}, {0x1EE61, 0x1EE62}, {0x1EE64, 0x1EE64}, {0x1EE67, 0x1EE6A}, {0x1EE6C, 0x1EE72}, +{0x1EE74, 0x1EE77}, {0x1EE79, 0x1EE7C}, {0x1EE7E, 0x1EE7E}, {0x1EE80, 0x1EE89}, {0x1EE8B, 0x1EE9B}, {0x1EEA1, 0x1EEA3}, {0x1EEA5, 0x1EEA9}, {0x1EEAB, 0x1EEBB}, {0x20000, 0x2A6DD}, {0x2A700, 0x2B734}, +{0x2B740, 0x2B81D}, {0x2B820, 0x2CEA1}, {0x2CEB0, 0x2EBE0}, {0x2F800, 0x2FA1D}, {0x30000, 0x3134A}, +}; + +static const std::vector> whitespace_ranges = { +{0x9, 0xD}, {0x1C, 0x20}, {0x85, 0x85}, {0xA0, 0xA0}, {0x1680, 0x1680}, {0x2000, 0x200A}, {0x2028, 0x2029}, {0x202F, 0x202F}, {0x205F, 0x205F}, {0x3000, 0x3000}, +}; + +static const std::vector> accent_mark_ranges = { +{0x300, 0x36F}, {0x483, 0x489}, {0x591, 0x5BD}, {0x5BF, 0x5BF}, {0x5C1, 0x5C2}, {0x5C4, 0x5C5}, {0x5C7, 0x5C7}, {0x610, 0x61A}, {0x64B, 0x65F}, {0x670, 0x670}, {0x6D6, 0x6DC}, {0x6DF, 0x6E4}, +{0x6E7, 0x6E8}, {0x6EA, 0x6ED}, {0x711, 0x711}, {0x730, 0x74A}, {0x7A6, 0x7B0}, {0x7EB, 0x7F3}, {0x7FD, 0x7FD}, {0x816, 0x819}, {0x81B, 0x823}, {0x825, 0x827}, {0x829, 0x82D}, {0x859, 0x85B}, +{0x8D3, 0x8E1}, {0x8E3, 0x903}, {0x93A, 0x93C}, {0x93E, 0x94F}, {0x951, 0x957}, {0x962, 0x963}, {0x981, 0x983}, {0x9BC, 0x9BC}, {0x9BE, 0x9C4}, {0x9C7, 0x9C8}, {0x9CB, 0x9CD}, {0x9D7, 0x9D7}, +{0x9E2, 0x9E3}, {0x9FE, 0x9FE}, {0xA01, 0xA03}, {0xA3C, 0xA3C}, {0xA3E, 0xA42}, {0xA47, 0xA48}, {0xA4B, 0xA4D}, {0xA51, 0xA51}, {0xA70, 0xA71}, {0xA75, 0xA75}, {0xA81, 0xA83}, {0xABC, 0xABC}, +{0xABE, 0xAC5}, {0xAC7, 0xAC9}, {0xACB, 0xACD}, {0xAE2, 0xAE3}, {0xAFA, 0xAFF}, {0xB01, 0xB03}, {0xB3C, 0xB3C}, {0xB3E, 0xB44}, {0xB47, 0xB48}, {0xB4B, 0xB4D}, {0xB55, 0xB57}, {0xB62, 0xB63}, +{0xB82, 0xB82}, {0xBBE, 0xBC2}, {0xBC6, 0xBC8}, {0xBCA, 0xBCD}, {0xBD7, 0xBD7}, {0xC00, 0xC04}, {0xC3E, 0xC44}, {0xC46, 0xC48}, {0xC4A, 0xC4D}, {0xC55, 0xC56}, {0xC62, 0xC63}, {0xC81, 0xC83}, +{0xCBC, 0xCBC}, {0xCBE, 0xCC4}, {0xCC6, 0xCC8}, {0xCCA, 0xCCD}, {0xCD5, 0xCD6}, {0xCE2, 0xCE3}, {0xD00, 0xD03}, {0xD3B, 0xD3C}, {0xD3E, 0xD44}, {0xD46, 0xD48}, {0xD4A, 0xD4D}, {0xD57, 0xD57}, +{0xD62, 0xD63}, {0xD81, 0xD83}, {0xDCA, 0xDCA}, {0xDCF, 0xDD4}, {0xDD6, 0xDD6}, {0xDD8, 0xDDF}, {0xDF2, 0xDF3}, {0xE31, 0xE31}, {0xE34, 0xE3A}, {0xE47, 0xE4E}, {0xEB1, 0xEB1}, {0xEB4, 0xEBC}, +{0xEC8, 0xECD}, {0xF18, 0xF19}, {0xF35, 0xF35}, {0xF37, 0xF37}, {0xF39, 0xF39}, {0xF3E, 0xF3F}, {0xF71, 0xF84}, {0xF86, 0xF87}, {0xF8D, 0xF97}, {0xF99, 0xFBC}, {0xFC6, 0xFC6}, {0x102B, 0x103E}, +{0x1056, 0x1059}, {0x105E, 0x1060}, {0x1062, 0x1064}, {0x1067, 0x106D}, {0x1071, 0x1074}, {0x1082, 0x108D}, {0x108F, 0x108F}, {0x109A, 0x109D}, {0x135D, 0x135F}, {0x1712, 0x1714}, {0x1732, 0x1734}, +{0x1752, 0x1753}, {0x1772, 0x1773}, {0x17B4, 0x17D3}, {0x17DD, 0x17DD}, {0x180B, 0x180D}, {0x1885, 0x1886}, {0x18A9, 0x18A9}, {0x1920, 0x192B}, {0x1930, 0x193B}, {0x1A17, 0x1A1B}, {0x1A55, 0x1A5E}, +{0x1A60, 0x1A7C}, {0x1A7F, 0x1A7F}, {0x1AB0, 0x1AC0}, {0x1B00, 0x1B04}, {0x1B34, 0x1B44}, {0x1B6B, 0x1B73}, {0x1B80, 0x1B82}, {0x1BA1, 0x1BAD}, {0x1BE6, 0x1BF3}, {0x1C24, 0x1C37}, {0x1CD0, 0x1CD2}, +{0x1CD4, 0x1CE8}, {0x1CED, 0x1CED}, {0x1CF4, 0x1CF4}, {0x1CF7, 0x1CF9}, {0x1DC0, 0x1DF9}, {0x1DFB, 0x1DFF}, {0x20D0, 0x20F0}, {0x2CEF, 0x2CF1}, {0x2D7F, 0x2D7F}, {0x2DE0, 0x2DFF}, {0x302A, 0x302F}, +{0x3099, 0x309A}, {0xA66F, 0xA672}, {0xA674, 0xA67D}, {0xA69E, 0xA69F}, {0xA6F0, 0xA6F1}, {0xA802, 0xA802}, {0xA806, 0xA806}, {0xA80B, 0xA80B}, {0xA823, 0xA827}, {0xA82C, 0xA82C}, {0xA880, 0xA881}, +{0xA8B4, 0xA8C5}, {0xA8E0, 0xA8F1}, {0xA8FF, 0xA8FF}, {0xA926, 0xA92D}, {0xA947, 0xA953}, {0xA980, 0xA983}, {0xA9B3, 0xA9C0}, {0xA9E5, 0xA9E5}, {0xAA29, 0xAA36}, {0xAA43, 0xAA43}, {0xAA4C, 0xAA4D}, +{0xAA7B, 0xAA7D}, {0xAAB0, 0xAAB0}, {0xAAB2, 0xAAB4}, {0xAAB7, 0xAAB8}, {0xAABE, 0xAABF}, {0xAAC1, 0xAAC1}, {0xAAEB, 0xAAEF}, {0xAAF5, 0xAAF6}, {0xABE3, 0xABEA}, {0xABEC, 0xABED}, {0xFB1E, 0xFB1E}, +{0xFE00, 0xFE0F}, {0xFE20, 0xFE2F}, {0x101FD, 0x101FD}, {0x102E0, 0x102E0}, {0x10376, 0x1037A}, {0x10A01, 0x10A03}, {0x10A05, 0x10A06}, {0x10A0C, 0x10A0F}, {0x10A38, 0x10A3A}, {0x10A3F, 0x10A3F}, +{0x10AE5, 0x10AE6}, {0x10D24, 0x10D27}, {0x10EAB, 0x10EAC}, {0x10F46, 0x10F50}, {0x11000, 0x11002}, {0x11038, 0x11046}, {0x1107F, 0x11082}, {0x110B0, 0x110BA}, {0x11100, 0x11102}, {0x11127, 0x11134}, +{0x11145, 0x11146}, {0x11173, 0x11173}, {0x11180, 0x11182}, {0x111B3, 0x111C0}, {0x111C9, 0x111CC}, {0x111CE, 0x111CF}, {0x1122C, 0x11237}, {0x1123E, 0x1123E}, {0x112DF, 0x112EA}, {0x11300, 0x11303}, +{0x1133B, 0x1133C}, {0x1133E, 0x11344}, {0x11347, 0x11348}, {0x1134B, 0x1134D}, {0x11357, 0x11357}, {0x11362, 0x11363}, {0x11366, 0x1136C}, {0x11370, 0x11374}, {0x11435, 0x11446}, {0x1145E, 0x1145E}, +{0x114B0, 0x114C3}, {0x115AF, 0x115B5}, {0x115B8, 0x115C0}, {0x115DC, 0x115DD}, {0x11630, 0x11640}, {0x116AB, 0x116B7}, {0x1171D, 0x1172B}, {0x1182C, 0x1183A}, {0x11930, 0x11935}, {0x11937, 0x11938}, +{0x1193B, 0x1193E}, {0x11940, 0x11940}, {0x11942, 0x11943}, {0x119D1, 0x119D7}, {0x119DA, 0x119E0}, {0x119E4, 0x119E4}, {0x11A01, 0x11A0A}, {0x11A33, 0x11A39}, {0x11A3B, 0x11A3E}, {0x11A47, 0x11A47}, +{0x11A51, 0x11A5B}, {0x11A8A, 0x11A99}, {0x11C2F, 0x11C36}, {0x11C38, 0x11C3F}, {0x11C92, 0x11CA7}, {0x11CA9, 0x11CB6}, {0x11D31, 0x11D36}, {0x11D3A, 0x11D3A}, {0x11D3C, 0x11D3D}, {0x11D3F, 0x11D45}, +{0x11D47, 0x11D47}, {0x11D8A, 0x11D8E}, {0x11D90, 0x11D91}, {0x11D93, 0x11D97}, {0x11EF3, 0x11EF6}, {0x16AF0, 0x16AF4}, {0x16B30, 0x16B36}, {0x16F4F, 0x16F4F}, {0x16F51, 0x16F87}, {0x16F8F, 0x16F92}, +{0x16FE4, 0x16FE4}, {0x16FF0, 0x16FF1}, {0x1BC9D, 0x1BC9E}, {0x1D165, 0x1D169}, {0x1D16D, 0x1D172}, {0x1D17B, 0x1D182}, {0x1D185, 0x1D18B}, {0x1D1AA, 0x1D1AD}, {0x1D242, 0x1D244}, {0x1DA00, 0x1DA36}, +{0x1DA3B, 0x1DA6C}, {0x1DA75, 0x1DA75}, {0x1DA84, 0x1DA84}, {0x1DA9B, 0x1DA9F}, {0x1DAA1, 0x1DAAF}, {0x1E000, 0x1E006}, {0x1E008, 0x1E018}, {0x1E01B, 0x1E021}, {0x1E023, 0x1E024}, {0x1E026, 0x1E02A}, +{0x1E130, 0x1E136}, {0x1E2EC, 0x1E2EF}, {0x1E8D0, 0x1E8D6}, {0x1E944, 0x1E94A}, {0xE0100, 0xE01EF}, +}; + +static const std::vector> punctuation_ranges = { +{0x21, 0x23}, {0x25, 0x2A}, {0x2C, 0x2F}, {0x3A, 0x3B}, {0x3F, 0x40}, {0x5B, 0x5D}, {0x5F, 0x5F}, {0x7B, 0x7B}, {0x7D, 0x7D}, {0xA1, 0xA1}, {0xA7, 0xA7}, {0xAB, 0xAB}, {0xB6, 0xB7}, {0xBB, 0xBB}, +{0xBF, 0xBF}, {0x37E, 0x37E}, {0x387, 0x387}, {0x55A, 0x55F}, {0x589, 0x58A}, {0x5BE, 0x5BE}, {0x5C0, 0x5C0}, {0x5C3, 0x5C3}, {0x5C6, 0x5C6}, {0x5F3, 0x5F4}, {0x609, 0x60A}, {0x60C, 0x60D}, +{0x61B, 0x61B}, {0x61E, 0x61F}, {0x66A, 0x66D}, {0x6D4, 0x6D4}, {0x700, 0x70D}, {0x7F7, 0x7F9}, {0x830, 0x83E}, {0x85E, 0x85E}, {0x964, 0x965}, {0x970, 0x970}, {0x9FD, 0x9FD}, {0xA76, 0xA76}, +{0xAF0, 0xAF0}, {0xC77, 0xC77}, {0xC84, 0xC84}, {0xDF4, 0xDF4}, {0xE4F, 0xE4F}, {0xE5A, 0xE5B}, {0xF04, 0xF12}, {0xF14, 0xF14}, {0xF3A, 0xF3D}, {0xF85, 0xF85}, {0xFD0, 0xFD4}, {0xFD9, 0xFDA}, +{0x104A, 0x104F}, {0x10FB, 0x10FB}, {0x1360, 0x1368}, {0x1400, 0x1400}, {0x166E, 0x166E}, {0x169B, 0x169C}, {0x16EB, 0x16ED}, {0x1735, 0x1736}, {0x17D4, 0x17D6}, {0x17D8, 0x17DA}, {0x1800, 0x180A}, +{0x1944, 0x1945}, {0x1A1E, 0x1A1F}, {0x1AA0, 0x1AA6}, {0x1AA8, 0x1AAD}, {0x1B5A, 0x1B60}, {0x1BFC, 0x1BFF}, {0x1C3B, 0x1C3F}, {0x1C7E, 0x1C7F}, {0x1CC0, 0x1CC7}, {0x1CD3, 0x1CD3}, {0x2010, 0x2027}, +{0x2030, 0x2043}, {0x2045, 0x2051}, {0x2053, 0x205E}, {0x207D, 0x207E}, {0x208D, 0x208E}, {0x2308, 0x230B}, {0x2329, 0x232A}, {0x2768, 0x2775}, {0x27C5, 0x27C6}, {0x27E6, 0x27EF}, {0x2983, 0x2998}, +{0x29D8, 0x29DB}, {0x29FC, 0x29FD}, {0x2CF9, 0x2CFC}, {0x2CFE, 0x2CFF}, {0x2D70, 0x2D70}, {0x2E00, 0x2E2E}, {0x2E30, 0x2E4F}, {0x2E52, 0x2E52}, {0x3001, 0x3003}, {0x3008, 0x3011}, {0x3014, 0x301F}, +{0x3030, 0x3030}, {0x303D, 0x303D}, {0x30A0, 0x30A0}, {0x30FB, 0x30FB}, {0xA4FE, 0xA4FF}, {0xA60D, 0xA60F}, {0xA673, 0xA673}, {0xA67E, 0xA67E}, {0xA6F2, 0xA6F7}, {0xA874, 0xA877}, {0xA8CE, 0xA8CF}, +{0xA8F8, 0xA8FA}, {0xA8FC, 0xA8FC}, {0xA92E, 0xA92F}, {0xA95F, 0xA95F}, {0xA9C1, 0xA9CD}, {0xA9DE, 0xA9DF}, {0xAA5C, 0xAA5F}, {0xAADE, 0xAADF}, {0xAAF0, 0xAAF1}, {0xABEB, 0xABEB}, {0xFD3E, 0xFD3F}, +{0xFE10, 0xFE19}, {0xFE30, 0xFE52}, {0xFE54, 0xFE61}, {0xFE63, 0xFE63}, {0xFE68, 0xFE68}, {0xFE6A, 0xFE6B}, {0xFF01, 0xFF03}, {0xFF05, 0xFF0A}, {0xFF0C, 0xFF0F}, {0xFF1A, 0xFF1B}, {0xFF1F, 0xFF20}, +{0xFF3B, 0xFF3D}, {0xFF3F, 0xFF3F}, {0xFF5B, 0xFF5B}, {0xFF5D, 0xFF5D}, {0xFF5F, 0xFF65}, {0x10100, 0x10102}, {0x1039F, 0x1039F}, {0x103D0, 0x103D0}, {0x1056F, 0x1056F}, {0x10857, 0x10857}, +{0x1091F, 0x1091F}, {0x1093F, 0x1093F}, {0x10A50, 0x10A58}, {0x10A7F, 0x10A7F}, {0x10AF0, 0x10AF6}, {0x10B39, 0x10B3F}, {0x10B99, 0x10B9C}, {0x10EAD, 0x10EAD}, {0x10F55, 0x10F59}, {0x11047, 0x1104D}, +{0x110BB, 0x110BC}, {0x110BE, 0x110C1}, {0x11140, 0x11143}, {0x11174, 0x11175}, {0x111C5, 0x111C8}, {0x111CD, 0x111CD}, {0x111DB, 0x111DB}, {0x111DD, 0x111DF}, {0x11238, 0x1123D}, {0x112A9, 0x112A9}, +{0x1144B, 0x1144F}, {0x1145A, 0x1145B}, {0x1145D, 0x1145D}, {0x114C6, 0x114C6}, {0x115C1, 0x115D7}, {0x11641, 0x11643}, {0x11660, 0x1166C}, {0x1173C, 0x1173E}, {0x1183B, 0x1183B}, {0x11944, 0x11946}, +{0x119E2, 0x119E2}, {0x11A3F, 0x11A46}, {0x11A9A, 0x11A9C}, {0x11A9E, 0x11AA2}, {0x11C41, 0x11C45}, {0x11C70, 0x11C71}, {0x11EF7, 0x11EF8}, {0x11FFF, 0x11FFF}, {0x12470, 0x12474}, {0x16A6E, 0x16A6F}, +{0x16AF5, 0x16AF5}, {0x16B37, 0x16B3B}, {0x16B44, 0x16B44}, {0x16E97, 0x16E9A}, {0x16FE2, 0x16FE2}, {0x1BC9F, 0x1BC9F}, {0x1DA87, 0x1DA8B}, {0x1E95E, 0x1E95F}, +}; + +static const std::vector> symbol_ranges = { +{0x24, 0x24}, {0x2B, 0x2B}, {0x3C, 0x3E}, {0x5E, 0x5E}, {0x60, 0x60}, {0x7C, 0x7C}, {0x7E, 0x7E}, {0xA2, 0xA6}, {0xA8, 0xA9}, {0xAC, 0xAC}, {0xAE, 0xB1}, {0xB4, 0xB4}, {0xB8, 0xB8}, {0xD7, 0xD7}, +{0xF7, 0xF7}, {0x2C2, 0x2C5}, {0x2D2, 0x2DF}, {0x2E5, 0x2EB}, {0x2ED, 0x2ED}, {0x2EF, 0x2FF}, {0x375, 0x375}, {0x384, 0x385}, {0x3F6, 0x3F6}, {0x482, 0x482}, {0x58D, 0x58F}, {0x606, 0x608}, +{0x60B, 0x60B}, {0x60E, 0x60F}, {0x6DE, 0x6DE}, {0x6E9, 0x6E9}, {0x6FD, 0x6FE}, {0x7F6, 0x7F6}, {0x7FE, 0x7FF}, {0x9F2, 0x9F3}, {0x9FA, 0x9FB}, {0xAF1, 0xAF1}, {0xB70, 0xB70}, {0xBF3, 0xBFA}, +{0xC7F, 0xC7F}, {0xD4F, 0xD4F}, {0xD79, 0xD79}, {0xE3F, 0xE3F}, {0xF01, 0xF03}, {0xF13, 0xF13}, {0xF15, 0xF17}, {0xF1A, 0xF1F}, {0xF34, 0xF34}, {0xF36, 0xF36}, {0xF38, 0xF38}, {0xFBE, 0xFC5}, +{0xFC7, 0xFCC}, {0xFCE, 0xFCF}, {0xFD5, 0xFD8}, {0x109E, 0x109F}, {0x1390, 0x1399}, {0x166D, 0x166D}, {0x17DB, 0x17DB}, {0x1940, 0x1940}, {0x19DE, 0x19FF}, {0x1B61, 0x1B6A}, {0x1B74, 0x1B7C}, +{0x1FBD, 0x1FBD}, {0x1FBF, 0x1FC1}, {0x1FCD, 0x1FCF}, {0x1FDD, 0x1FDF}, {0x1FED, 0x1FEF}, {0x1FFD, 0x1FFE}, {0x2044, 0x2044}, {0x2052, 0x2052}, {0x207A, 0x207C}, {0x208A, 0x208C}, {0x20A0, 0x20BF}, +{0x2100, 0x2101}, {0x2103, 0x2106}, {0x2108, 0x2109}, {0x2114, 0x2114}, {0x2116, 0x2118}, {0x211E, 0x2123}, {0x2125, 0x2125}, {0x2127, 0x2127}, {0x2129, 0x2129}, {0x212E, 0x212E}, {0x213A, 0x213B}, +{0x2140, 0x2144}, {0x214A, 0x214D}, {0x214F, 0x214F}, {0x218A, 0x218B}, {0x2190, 0x2307}, {0x230C, 0x2328}, {0x232B, 0x2426}, {0x2440, 0x244A}, {0x249C, 0x24E9}, {0x2500, 0x2767}, {0x2794, 0x27C4}, +{0x27C7, 0x27E5}, {0x27F0, 0x2982}, {0x2999, 0x29D7}, {0x29DC, 0x29FB}, {0x29FE, 0x2B73}, {0x2B76, 0x2B95}, {0x2B97, 0x2BFF}, {0x2CE5, 0x2CEA}, {0x2E50, 0x2E51}, {0x2E80, 0x2E99}, {0x2E9B, 0x2EF3}, +{0x2F00, 0x2FD5}, {0x2FF0, 0x2FFB}, {0x3004, 0x3004}, {0x3012, 0x3013}, {0x3020, 0x3020}, {0x3036, 0x3037}, {0x303E, 0x303F}, {0x309B, 0x309C}, {0x3190, 0x3191}, {0x3196, 0x319F}, {0x31C0, 0x31E3}, +{0x3200, 0x321E}, {0x322A, 0x3247}, {0x3250, 0x3250}, {0x3260, 0x327F}, {0x328A, 0x32B0}, {0x32C0, 0x33FF}, {0x4DC0, 0x4DFF}, {0xA490, 0xA4C6}, {0xA700, 0xA716}, {0xA720, 0xA721}, {0xA789, 0xA78A}, +{0xA828, 0xA82B}, {0xA836, 0xA839}, {0xAA77, 0xAA79}, {0xAB5B, 0xAB5B}, {0xAB6A, 0xAB6B}, {0xFB29, 0xFB29}, {0xFBB2, 0xFBC1}, {0xFDFC, 0xFDFD}, {0xFE62, 0xFE62}, {0xFE64, 0xFE66}, {0xFE69, 0xFE69}, +{0xFF04, 0xFF04}, {0xFF0B, 0xFF0B}, {0xFF1C, 0xFF1E}, {0xFF3E, 0xFF3E}, {0xFF40, 0xFF40}, {0xFF5C, 0xFF5C}, {0xFF5E, 0xFF5E}, {0xFFE0, 0xFFE6}, {0xFFE8, 0xFFEE}, {0xFFFC, 0xFFFD}, {0x10137, 0x1013F}, +{0x10179, 0x10189}, {0x1018C, 0x1018E}, {0x10190, 0x1019C}, {0x101A0, 0x101A0}, {0x101D0, 0x101FC}, {0x10877, 0x10878}, {0x10AC8, 0x10AC8}, {0x1173F, 0x1173F}, {0x11FD5, 0x11FF1}, {0x16B3C, 0x16B3F}, +{0x16B45, 0x16B45}, {0x1BC9C, 0x1BC9C}, {0x1D000, 0x1D0F5}, {0x1D100, 0x1D126}, {0x1D129, 0x1D164}, {0x1D16A, 0x1D16C}, {0x1D183, 0x1D184}, {0x1D18C, 0x1D1A9}, {0x1D1AE, 0x1D1E8}, {0x1D200, 0x1D241}, +{0x1D245, 0x1D245}, {0x1D300, 0x1D356}, {0x1D6C1, 0x1D6C1}, {0x1D6DB, 0x1D6DB}, {0x1D6FB, 0x1D6FB}, {0x1D715, 0x1D715}, {0x1D735, 0x1D735}, {0x1D74F, 0x1D74F}, {0x1D76F, 0x1D76F}, {0x1D789, 0x1D789}, +{0x1D7A9, 0x1D7A9}, {0x1D7C3, 0x1D7C3}, {0x1D800, 0x1D9FF}, {0x1DA37, 0x1DA3A}, {0x1DA6D, 0x1DA74}, {0x1DA76, 0x1DA83}, {0x1DA85, 0x1DA86}, {0x1E14F, 0x1E14F}, {0x1E2FF, 0x1E2FF}, {0x1ECAC, 0x1ECAC}, +{0x1ECB0, 0x1ECB0}, {0x1ED2E, 0x1ED2E}, {0x1EEF0, 0x1EEF1}, {0x1F000, 0x1F02B}, {0x1F030, 0x1F093}, {0x1F0A0, 0x1F0AE}, {0x1F0B1, 0x1F0BF}, {0x1F0C1, 0x1F0CF}, {0x1F0D1, 0x1F0F5}, {0x1F10D, 0x1F1AD}, +{0x1F1E6, 0x1F202}, {0x1F210, 0x1F23B}, {0x1F240, 0x1F248}, {0x1F250, 0x1F251}, {0x1F260, 0x1F265}, {0x1F300, 0x1F6D7}, {0x1F6E0, 0x1F6EC}, {0x1F6F0, 0x1F6FC}, {0x1F700, 0x1F773}, {0x1F780, 0x1F7D8}, +{0x1F7E0, 0x1F7EB}, {0x1F800, 0x1F80B}, {0x1F810, 0x1F847}, {0x1F850, 0x1F859}, {0x1F860, 0x1F887}, {0x1F890, 0x1F8AD}, {0x1F8B0, 0x1F8B1}, {0x1F900, 0x1F978}, {0x1F97A, 0x1F9CB}, {0x1F9CD, 0x1FA53}, +{0x1FA60, 0x1FA6D}, {0x1FA70, 0x1FA74}, {0x1FA78, 0x1FA7A}, {0x1FA80, 0x1FA86}, {0x1FA90, 0x1FAA8}, {0x1FAB0, 0x1FAB6}, {0x1FAC0, 0x1FAC2}, {0x1FAD0, 0x1FAD6}, {0x1FB00, 0x1FB92}, {0x1FB94, 0x1FBCA}, +}; + +static const std::vector> control_ranges = { +{0x0, 0x8}, {0xE, 0x1B}, {0x7F, 0x84}, {0x86, 0x9F}, {0xAD, 0xAD}, {0x378, 0x379}, {0x380, 0x383}, {0x38B, 0x38B}, {0x38D, 0x38D}, {0x3A2, 0x3A2}, {0x530, 0x530}, {0x557, 0x558}, {0x58B, 0x58C}, +{0x590, 0x590}, {0x5C8, 0x5CF}, {0x5EB, 0x5EE}, {0x5F5, 0x605}, {0x61C, 0x61D}, {0x6DD, 0x6DD}, {0x70E, 0x70F}, {0x74B, 0x74C}, {0x7B2, 0x7BF}, {0x7FB, 0x7FC}, {0x82E, 0x82F}, {0x83F, 0x83F}, +{0x85C, 0x85D}, {0x85F, 0x85F}, {0x86B, 0x89F}, {0x8B5, 0x8B5}, {0x8C8, 0x8D2}, {0x8E2, 0x8E2}, {0x984, 0x984}, {0x98D, 0x98E}, {0x991, 0x992}, {0x9A9, 0x9A9}, {0x9B1, 0x9B1}, {0x9B3, 0x9B5}, +{0x9BA, 0x9BB}, {0x9C5, 0x9C6}, {0x9C9, 0x9CA}, {0x9CF, 0x9D6}, {0x9D8, 0x9DB}, {0x9DE, 0x9DE}, {0x9E4, 0x9E5}, {0x9FF, 0xA00}, {0xA04, 0xA04}, {0xA0B, 0xA0E}, {0xA11, 0xA12}, {0xA29, 0xA29}, +{0xA31, 0xA31}, {0xA34, 0xA34}, {0xA37, 0xA37}, {0xA3A, 0xA3B}, {0xA3D, 0xA3D}, {0xA43, 0xA46}, {0xA49, 0xA4A}, {0xA4E, 0xA50}, {0xA52, 0xA58}, {0xA5D, 0xA5D}, {0xA5F, 0xA65}, {0xA77, 0xA80}, +{0xA84, 0xA84}, {0xA8E, 0xA8E}, {0xA92, 0xA92}, {0xAA9, 0xAA9}, {0xAB1, 0xAB1}, {0xAB4, 0xAB4}, {0xABA, 0xABB}, {0xAC6, 0xAC6}, {0xACA, 0xACA}, {0xACE, 0xACF}, {0xAD1, 0xADF}, {0xAE4, 0xAE5}, +{0xAF2, 0xAF8}, {0xB00, 0xB00}, {0xB04, 0xB04}, {0xB0D, 0xB0E}, {0xB11, 0xB12}, {0xB29, 0xB29}, {0xB31, 0xB31}, {0xB34, 0xB34}, {0xB3A, 0xB3B}, {0xB45, 0xB46}, {0xB49, 0xB4A}, {0xB4E, 0xB54}, +{0xB58, 0xB5B}, {0xB5E, 0xB5E}, {0xB64, 0xB65}, {0xB78, 0xB81}, {0xB84, 0xB84}, {0xB8B, 0xB8D}, {0xB91, 0xB91}, {0xB96, 0xB98}, {0xB9B, 0xB9B}, {0xB9D, 0xB9D}, {0xBA0, 0xBA2}, {0xBA5, 0xBA7}, +{0xBAB, 0xBAD}, {0xBBA, 0xBBD}, {0xBC3, 0xBC5}, {0xBC9, 0xBC9}, {0xBCE, 0xBCF}, {0xBD1, 0xBD6}, {0xBD8, 0xBE5}, {0xBFB, 0xBFF}, {0xC0D, 0xC0D}, {0xC11, 0xC11}, {0xC29, 0xC29}, {0xC3A, 0xC3C}, +{0xC45, 0xC45}, {0xC49, 0xC49}, {0xC4E, 0xC54}, {0xC57, 0xC57}, {0xC5B, 0xC5F}, {0xC64, 0xC65}, {0xC70, 0xC76}, {0xC8D, 0xC8D}, {0xC91, 0xC91}, {0xCA9, 0xCA9}, {0xCB4, 0xCB4}, {0xCBA, 0xCBB}, +{0xCC5, 0xCC5}, {0xCC9, 0xCC9}, {0xCCE, 0xCD4}, {0xCD7, 0xCDD}, {0xCDF, 0xCDF}, {0xCE4, 0xCE5}, {0xCF0, 0xCF0}, {0xCF3, 0xCFF}, {0xD0D, 0xD0D}, {0xD11, 0xD11}, {0xD45, 0xD45}, {0xD49, 0xD49}, +{0xD50, 0xD53}, {0xD64, 0xD65}, {0xD80, 0xD80}, {0xD84, 0xD84}, {0xD97, 0xD99}, {0xDB2, 0xDB2}, {0xDBC, 0xDBC}, {0xDBE, 0xDBF}, {0xDC7, 0xDC9}, {0xDCB, 0xDCE}, {0xDD5, 0xDD5}, {0xDD7, 0xDD7}, +{0xDE0, 0xDE5}, {0xDF0, 0xDF1}, {0xDF5, 0xE00}, {0xE3B, 0xE3E}, {0xE5C, 0xE80}, {0xE83, 0xE83}, {0xE85, 0xE85}, {0xE8B, 0xE8B}, {0xEA4, 0xEA4}, {0xEA6, 0xEA6}, {0xEBE, 0xEBF}, {0xEC5, 0xEC5}, +{0xEC7, 0xEC7}, {0xECE, 0xECF}, {0xEDA, 0xEDB}, {0xEE0, 0xEFF}, {0xF48, 0xF48}, {0xF6D, 0xF70}, {0xF98, 0xF98}, {0xFBD, 0xFBD}, {0xFCD, 0xFCD}, {0xFDB, 0xFFF}, {0x10C6, 0x10C6}, {0x10C8, 0x10CC}, +{0x10CE, 0x10CF}, {0x1249, 0x1249}, {0x124E, 0x124F}, {0x1257, 0x1257}, {0x1259, 0x1259}, {0x125E, 0x125F}, {0x1289, 0x1289}, {0x128E, 0x128F}, {0x12B1, 0x12B1}, {0x12B6, 0x12B7}, {0x12BF, 0x12BF}, +{0x12C1, 0x12C1}, {0x12C6, 0x12C7}, {0x12D7, 0x12D7}, {0x1311, 0x1311}, {0x1316, 0x1317}, {0x135B, 0x135C}, {0x137D, 0x137F}, {0x139A, 0x139F}, {0x13F6, 0x13F7}, {0x13FE, 0x13FF}, {0x169D, 0x169F}, +{0x16F9, 0x16FF}, {0x170D, 0x170D}, {0x1715, 0x171F}, {0x1737, 0x173F}, {0x1754, 0x175F}, {0x176D, 0x176D}, {0x1771, 0x1771}, {0x1774, 0x177F}, {0x17DE, 0x17DF}, {0x17EA, 0x17EF}, {0x17FA, 0x17FF}, +{0x180E, 0x180F}, {0x181A, 0x181F}, {0x1879, 0x187F}, {0x18AB, 0x18AF}, {0x18F6, 0x18FF}, {0x191F, 0x191F}, {0x192C, 0x192F}, {0x193C, 0x193F}, {0x1941, 0x1943}, {0x196E, 0x196F}, {0x1975, 0x197F}, +{0x19AC, 0x19AF}, {0x19CA, 0x19CF}, {0x19DB, 0x19DD}, {0x1A1C, 0x1A1D}, {0x1A5F, 0x1A5F}, {0x1A7D, 0x1A7E}, {0x1A8A, 0x1A8F}, {0x1A9A, 0x1A9F}, {0x1AAE, 0x1AAF}, {0x1AC1, 0x1AFF}, {0x1B4C, 0x1B4F}, +{0x1B7D, 0x1B7F}, {0x1BF4, 0x1BFB}, {0x1C38, 0x1C3A}, {0x1C4A, 0x1C4C}, {0x1C89, 0x1C8F}, {0x1CBB, 0x1CBC}, {0x1CC8, 0x1CCF}, {0x1CFB, 0x1CFF}, {0x1DFA, 0x1DFA}, {0x1F16, 0x1F17}, {0x1F1E, 0x1F1F}, +{0x1F46, 0x1F47}, {0x1F4E, 0x1F4F}, {0x1F58, 0x1F58}, {0x1F5A, 0x1F5A}, {0x1F5C, 0x1F5C}, {0x1F5E, 0x1F5E}, {0x1F7E, 0x1F7F}, {0x1FB5, 0x1FB5}, {0x1FC5, 0x1FC5}, {0x1FD4, 0x1FD5}, {0x1FDC, 0x1FDC}, +{0x1FF0, 0x1FF1}, {0x1FF5, 0x1FF5}, {0x1FFF, 0x1FFF}, {0x200B, 0x200F}, {0x202A, 0x202E}, {0x2060, 0x206F}, {0x2072, 0x2073}, {0x208F, 0x208F}, {0x209D, 0x209F}, {0x20C0, 0x20CF}, {0x20F1, 0x20FF}, +{0x218C, 0x218F}, {0x2427, 0x243F}, {0x244B, 0x245F}, {0x2B74, 0x2B75}, {0x2B96, 0x2B96}, {0x2C2F, 0x2C2F}, {0x2C5F, 0x2C5F}, {0x2CF4, 0x2CF8}, {0x2D26, 0x2D26}, {0x2D28, 0x2D2C}, {0x2D2E, 0x2D2F}, +{0x2D68, 0x2D6E}, {0x2D71, 0x2D7E}, {0x2D97, 0x2D9F}, {0x2DA7, 0x2DA7}, {0x2DAF, 0x2DAF}, {0x2DB7, 0x2DB7}, {0x2DBF, 0x2DBF}, {0x2DC7, 0x2DC7}, {0x2DCF, 0x2DCF}, {0x2DD7, 0x2DD7}, {0x2DDF, 0x2DDF}, +{0x2E53, 0x2E7F}, {0x2E9A, 0x2E9A}, {0x2EF4, 0x2EFF}, {0x2FD6, 0x2FEF}, {0x2FFC, 0x2FFF}, {0x3040, 0x3040}, {0x3097, 0x3098}, {0x3100, 0x3104}, {0x3130, 0x3130}, {0x318F, 0x318F}, {0x31E4, 0x31EF}, +{0x321F, 0x321F}, {0x9FFD, 0x9FFF}, {0xA48D, 0xA48F}, {0xA4C7, 0xA4CF}, {0xA62C, 0xA63F}, {0xA6F8, 0xA6FF}, {0xA7C0, 0xA7C1}, {0xA7CB, 0xA7F4}, {0xA82D, 0xA82F}, {0xA83A, 0xA83F}, {0xA878, 0xA87F}, +{0xA8C6, 0xA8CD}, {0xA8DA, 0xA8DF}, {0xA954, 0xA95E}, {0xA97D, 0xA97F}, {0xA9CE, 0xA9CE}, {0xA9DA, 0xA9DD}, {0xA9FF, 0xA9FF}, {0xAA37, 0xAA3F}, {0xAA4E, 0xAA4F}, {0xAA5A, 0xAA5B}, {0xAAC3, 0xAADA}, +{0xAAF7, 0xAB00}, {0xAB07, 0xAB08}, {0xAB0F, 0xAB10}, {0xAB17, 0xAB1F}, {0xAB27, 0xAB27}, {0xAB2F, 0xAB2F}, {0xAB6C, 0xAB6F}, {0xABEE, 0xABEF}, {0xABFA, 0xABFF}, {0xD7A4, 0xD7AF}, {0xD7C7, 0xD7CA}, +{0xD7FC, 0xF8FF}, {0xFA6E, 0xFA6F}, {0xFADA, 0xFAFF}, {0xFB07, 0xFB12}, {0xFB18, 0xFB1C}, {0xFB37, 0xFB37}, {0xFB3D, 0xFB3D}, {0xFB3F, 0xFB3F}, {0xFB42, 0xFB42}, {0xFB45, 0xFB45}, {0xFBC2, 0xFBD2}, +{0xFD40, 0xFD4F}, {0xFD90, 0xFD91}, {0xFDC8, 0xFDEF}, {0xFDFE, 0xFDFF}, {0xFE1A, 0xFE1F}, {0xFE53, 0xFE53}, {0xFE67, 0xFE67}, {0xFE6C, 0xFE6F}, {0xFE75, 0xFE75}, {0xFEFD, 0xFF00}, {0xFFBF, 0xFFC1}, +{0xFFC8, 0xFFC9}, {0xFFD0, 0xFFD1}, {0xFFD8, 0xFFD9}, {0xFFDD, 0xFFDF}, {0xFFE7, 0xFFE7}, {0xFFEF, 0xFFFB}, {0xFFFE, 0xFFFF}, {0x1000C, 0x1000C}, {0x10027, 0x10027}, {0x1003B, 0x1003B}, +{0x1003E, 0x1003E}, {0x1004E, 0x1004F}, {0x1005E, 0x1007F}, {0x100FB, 0x100FF}, {0x10103, 0x10106}, {0x10134, 0x10136}, {0x1018F, 0x1018F}, {0x1019D, 0x1019F}, {0x101A1, 0x101CF}, {0x101FE, 0x1027F}, +{0x1029D, 0x1029F}, {0x102D1, 0x102DF}, {0x102FC, 0x102FF}, {0x10324, 0x1032C}, {0x1034B, 0x1034F}, {0x1037B, 0x1037F}, {0x1039E, 0x1039E}, {0x103C4, 0x103C7}, {0x103D6, 0x103FF}, {0x1049E, 0x1049F}, +{0x104AA, 0x104AF}, {0x104D4, 0x104D7}, {0x104FC, 0x104FF}, {0x10528, 0x1052F}, {0x10564, 0x1056E}, {0x10570, 0x105FF}, {0x10737, 0x1073F}, {0x10756, 0x1075F}, {0x10768, 0x107FF}, {0x10806, 0x10807}, +{0x10809, 0x10809}, {0x10836, 0x10836}, {0x10839, 0x1083B}, {0x1083D, 0x1083E}, {0x10856, 0x10856}, {0x1089F, 0x108A6}, {0x108B0, 0x108DF}, {0x108F3, 0x108F3}, {0x108F6, 0x108FA}, {0x1091C, 0x1091E}, +{0x1093A, 0x1093E}, {0x10940, 0x1097F}, {0x109B8, 0x109BB}, {0x109D0, 0x109D1}, {0x10A04, 0x10A04}, {0x10A07, 0x10A0B}, {0x10A14, 0x10A14}, {0x10A18, 0x10A18}, {0x10A36, 0x10A37}, {0x10A3B, 0x10A3E}, +{0x10A49, 0x10A4F}, {0x10A59, 0x10A5F}, {0x10AA0, 0x10ABF}, {0x10AE7, 0x10AEA}, {0x10AF7, 0x10AFF}, {0x10B36, 0x10B38}, {0x10B56, 0x10B57}, {0x10B73, 0x10B77}, {0x10B92, 0x10B98}, {0x10B9D, 0x10BA8}, +{0x10BB0, 0x10BFF}, {0x10C49, 0x10C7F}, {0x10CB3, 0x10CBF}, {0x10CF3, 0x10CF9}, {0x10D28, 0x10D2F}, {0x10D3A, 0x10E5F}, {0x10E7F, 0x10E7F}, {0x10EAA, 0x10EAA}, {0x10EAE, 0x10EAF}, {0x10EB2, 0x10EFF}, +{0x10F28, 0x10F2F}, {0x10F5A, 0x10FAF}, {0x10FCC, 0x10FDF}, {0x10FF7, 0x10FFF}, {0x1104E, 0x11051}, {0x11070, 0x1107E}, {0x110BD, 0x110BD}, {0x110C2, 0x110CF}, {0x110E9, 0x110EF}, {0x110FA, 0x110FF}, +{0x11135, 0x11135}, {0x11148, 0x1114F}, {0x11177, 0x1117F}, {0x111E0, 0x111E0}, {0x111F5, 0x111FF}, {0x11212, 0x11212}, {0x1123F, 0x1127F}, {0x11287, 0x11287}, {0x11289, 0x11289}, {0x1128E, 0x1128E}, +{0x1129E, 0x1129E}, {0x112AA, 0x112AF}, {0x112EB, 0x112EF}, {0x112FA, 0x112FF}, {0x11304, 0x11304}, {0x1130D, 0x1130E}, {0x11311, 0x11312}, {0x11329, 0x11329}, {0x11331, 0x11331}, {0x11334, 0x11334}, +{0x1133A, 0x1133A}, {0x11345, 0x11346}, {0x11349, 0x1134A}, {0x1134E, 0x1134F}, {0x11351, 0x11356}, {0x11358, 0x1135C}, {0x11364, 0x11365}, {0x1136D, 0x1136F}, {0x11375, 0x113FF}, {0x1145C, 0x1145C}, +{0x11462, 0x1147F}, {0x114C8, 0x114CF}, {0x114DA, 0x1157F}, {0x115B6, 0x115B7}, {0x115DE, 0x115FF}, {0x11645, 0x1164F}, {0x1165A, 0x1165F}, {0x1166D, 0x1167F}, {0x116B9, 0x116BF}, {0x116CA, 0x116FF}, +{0x1171B, 0x1171C}, {0x1172C, 0x1172F}, {0x11740, 0x117FF}, {0x1183C, 0x1189F}, {0x118F3, 0x118FE}, {0x11907, 0x11908}, {0x1190A, 0x1190B}, {0x11914, 0x11914}, {0x11917, 0x11917}, {0x11936, 0x11936}, +{0x11939, 0x1193A}, {0x11947, 0x1194F}, {0x1195A, 0x1199F}, {0x119A8, 0x119A9}, {0x119D8, 0x119D9}, {0x119E5, 0x119FF}, {0x11A48, 0x11A4F}, {0x11AA3, 0x11ABF}, {0x11AF9, 0x11BFF}, {0x11C09, 0x11C09}, +{0x11C37, 0x11C37}, {0x11C46, 0x11C4F}, {0x11C6D, 0x11C6F}, {0x11C90, 0x11C91}, {0x11CA8, 0x11CA8}, {0x11CB7, 0x11CFF}, {0x11D07, 0x11D07}, {0x11D0A, 0x11D0A}, {0x11D37, 0x11D39}, {0x11D3B, 0x11D3B}, +{0x11D3E, 0x11D3E}, {0x11D48, 0x11D4F}, {0x11D5A, 0x11D5F}, {0x11D66, 0x11D66}, {0x11D69, 0x11D69}, {0x11D8F, 0x11D8F}, {0x11D92, 0x11D92}, {0x11D99, 0x11D9F}, {0x11DAA, 0x11EDF}, {0x11EF9, 0x11FAF}, +{0x11FB1, 0x11FBF}, {0x11FF2, 0x11FFE}, {0x1239A, 0x123FF}, {0x1246F, 0x1246F}, {0x12475, 0x1247F}, {0x12544, 0x12FFF}, {0x1342F, 0x143FF}, {0x14647, 0x167FF}, {0x16A39, 0x16A3F}, {0x16A5F, 0x16A5F}, +{0x16A6A, 0x16A6D}, {0x16A70, 0x16ACF}, {0x16AEE, 0x16AEF}, {0x16AF6, 0x16AFF}, {0x16B46, 0x16B4F}, {0x16B5A, 0x16B5A}, {0x16B62, 0x16B62}, {0x16B78, 0x16B7C}, {0x16B90, 0x16E3F}, {0x16E9B, 0x16EFF}, +{0x16F4B, 0x16F4E}, {0x16F88, 0x16F8E}, {0x16FA0, 0x16FDF}, {0x16FE5, 0x16FEF}, {0x16FF2, 0x16FFF}, {0x187F8, 0x187FF}, {0x18CD6, 0x18CFF}, {0x18D09, 0x1AFFF}, {0x1B11F, 0x1B14F}, {0x1B153, 0x1B163}, +{0x1B168, 0x1B16F}, {0x1B2FC, 0x1BBFF}, {0x1BC6B, 0x1BC6F}, {0x1BC7D, 0x1BC7F}, {0x1BC89, 0x1BC8F}, {0x1BC9A, 0x1BC9B}, {0x1BCA0, 0x1CFFF}, {0x1D0F6, 0x1D0FF}, {0x1D127, 0x1D128}, {0x1D173, 0x1D17A}, +{0x1D1E9, 0x1D1FF}, {0x1D246, 0x1D2DF}, {0x1D2F4, 0x1D2FF}, {0x1D357, 0x1D35F}, {0x1D379, 0x1D3FF}, {0x1D455, 0x1D455}, {0x1D49D, 0x1D49D}, {0x1D4A0, 0x1D4A1}, {0x1D4A3, 0x1D4A4}, {0x1D4A7, 0x1D4A8}, +{0x1D4AD, 0x1D4AD}, {0x1D4BA, 0x1D4BA}, {0x1D4BC, 0x1D4BC}, {0x1D4C4, 0x1D4C4}, {0x1D506, 0x1D506}, {0x1D50B, 0x1D50C}, {0x1D515, 0x1D515}, {0x1D51D, 0x1D51D}, {0x1D53A, 0x1D53A}, {0x1D53F, 0x1D53F}, +{0x1D545, 0x1D545}, {0x1D547, 0x1D549}, {0x1D551, 0x1D551}, {0x1D6A6, 0x1D6A7}, {0x1D7CC, 0x1D7CD}, {0x1DA8C, 0x1DA9A}, {0x1DAA0, 0x1DAA0}, {0x1DAB0, 0x1DFFF}, {0x1E007, 0x1E007}, {0x1E019, 0x1E01A}, +{0x1E022, 0x1E022}, {0x1E025, 0x1E025}, {0x1E02B, 0x1E0FF}, {0x1E12D, 0x1E12F}, {0x1E13E, 0x1E13F}, {0x1E14A, 0x1E14D}, {0x1E150, 0x1E2BF}, {0x1E2FA, 0x1E2FE}, {0x1E300, 0x1E7FF}, {0x1E8C5, 0x1E8C6}, +{0x1E8D7, 0x1E8FF}, {0x1E94C, 0x1E94F}, {0x1E95A, 0x1E95D}, {0x1E960, 0x1EC70}, {0x1ECB5, 0x1ED00}, {0x1ED3E, 0x1EDFF}, {0x1EE04, 0x1EE04}, {0x1EE20, 0x1EE20}, {0x1EE23, 0x1EE23}, {0x1EE25, 0x1EE26}, +{0x1EE28, 0x1EE28}, {0x1EE33, 0x1EE33}, {0x1EE38, 0x1EE38}, {0x1EE3A, 0x1EE3A}, {0x1EE3C, 0x1EE41}, {0x1EE43, 0x1EE46}, {0x1EE48, 0x1EE48}, {0x1EE4A, 0x1EE4A}, {0x1EE4C, 0x1EE4C}, {0x1EE50, 0x1EE50}, +{0x1EE53, 0x1EE53}, {0x1EE55, 0x1EE56}, {0x1EE58, 0x1EE58}, {0x1EE5A, 0x1EE5A}, {0x1EE5C, 0x1EE5C}, {0x1EE5E, 0x1EE5E}, {0x1EE60, 0x1EE60}, {0x1EE63, 0x1EE63}, {0x1EE65, 0x1EE66}, {0x1EE6B, 0x1EE6B}, +{0x1EE73, 0x1EE73}, {0x1EE78, 0x1EE78}, {0x1EE7D, 0x1EE7D}, {0x1EE7F, 0x1EE7F}, {0x1EE8A, 0x1EE8A}, {0x1EE9C, 0x1EEA0}, {0x1EEA4, 0x1EEA4}, {0x1EEAA, 0x1EEAA}, {0x1EEBC, 0x1EEEF}, {0x1EEF2, 0x1EFFF}, +{0x1F02C, 0x1F02F}, {0x1F094, 0x1F09F}, {0x1F0AF, 0x1F0B0}, {0x1F0C0, 0x1F0C0}, {0x1F0D0, 0x1F0D0}, {0x1F0F6, 0x1F0FF}, {0x1F1AE, 0x1F1E5}, {0x1F203, 0x1F20F}, {0x1F23C, 0x1F23F}, {0x1F249, 0x1F24F}, +{0x1F252, 0x1F25F}, {0x1F266, 0x1F2FF}, {0x1F6D8, 0x1F6DF}, {0x1F6ED, 0x1F6EF}, {0x1F6FD, 0x1F6FF}, {0x1F774, 0x1F77F}, {0x1F7D9, 0x1F7DF}, {0x1F7EC, 0x1F7FF}, {0x1F80C, 0x1F80F}, {0x1F848, 0x1F84F}, +{0x1F85A, 0x1F85F}, {0x1F888, 0x1F88F}, {0x1F8AE, 0x1F8AF}, {0x1F8B2, 0x1F8FF}, {0x1F979, 0x1F979}, {0x1F9CC, 0x1F9CC}, {0x1FA54, 0x1FA5F}, {0x1FA6E, 0x1FA6F}, {0x1FA75, 0x1FA77}, {0x1FA7B, 0x1FA7F}, +{0x1FA87, 0x1FA8F}, {0x1FAA9, 0x1FAAF}, {0x1FAB7, 0x1FABF}, {0x1FAC3, 0x1FACF}, {0x1FAD7, 0x1FAFF}, {0x1FB93, 0x1FB93}, {0x1FBCB, 0x1FBEF}, {0x1FBFA, 0x1FFFF}, {0x2A6DE, 0x2A6FF}, {0x2B735, 0x2B73F}, +{0x2B81E, 0x2B81F}, {0x2CEA2, 0x2CEAF}, {0x2EBE1, 0x2F7FF}, {0x2FA1E, 0x2FFFF}, {0x3134B, 0xE00FF}, {0xE01F0, 0x10FFFF}, +}; + +//String +bool CNCTString::operator==(const std::string& other) const { + return str.compare(other) == 0; +} +bool CNCTString::operator==(const char other) const { + return str.compare(std::string(1, other)) == 0; +} +bool CNCTString::operator==(const CNCTString& other) const { + return str.compare(other.str) == 0; +} +// + operators +CNCTString& CNCTString::operator+=(const std::string& other) { + str += other; + int new_len = CNCTUnicode::strlen_utf8(other); + utf8_chars += new_len; + char_type = CNCTUnicode::string_identify(str); + seq_offset_bytes += other.size(); + seq_offset_utf8_chars += new_len; + return *this; +} + +CNCTString& CNCTString::operator+=(const char other) { + std::string str = std::string(1, other); + *this += str; + return *this; +} + +CNCTString& CNCTString::operator+=(const CNCTString& other) { + str += other.str; + utf8_chars += other.utf8_chars; + char_type = CNCTUnicode::string_identify(str); + seq_offset_bytes += other.str.size(); + seq_offset_utf8_chars += other.utf8_chars; + return *this; +} + +struct CRCompare { + bool operator()(const std::pair& p, int i) { + return p.second < i; + } + bool operator()(int i, const std::pair& p) { + return i < p.first; + } +}; + +// binary search for code range +bool CNCTUnicode::check_code_range(int c, const std::vector> &ranges) { + auto it = std::upper_bound(ranges.begin(), ranges.end(), c, CRCompare()); + if (it != ranges.begin()) { + --it; + } + return c >= it->first && c <= it->second; +} + +// these are binary searches, it takes only a few operations +CNCTCharType CNCTUnicode::get_code_type(int c) { + if (check_code_range(c, letter_ranges)) { + return LETTER; + } + if (check_code_range(c, digit_ranges)) { + return DIGIT; + } + if (check_code_range(c, whitespace_ranges)) { + return WHITESPACE; + } + if (check_code_range(c, punctuation_ranges)) { + return PUNCTUATION; + } + if (check_code_range(c, symbol_ranges)) { + return SYMBOL; + } + if (check_code_range(c, accent_mark_ranges)) { + return ACCENT_MARK; + } + if (check_code_range(c, control_ranges)) { + return CONTROL; + } + return UNIDENTIFIED; +} + +static int utf8_to_unicode(const std::string& utf8_char) { + int c = 0; + int len = (int)utf8_char.size(); + if (len == 1) { + c = utf8_char[0]; + } else if (len == 2) { + c = ((utf8_char[0] & 0x1F) << 6) | (utf8_char[1] & 0x3F); + } else if (len == 3) { + c = ((utf8_char[0] & 0x0F) << 12) | ((utf8_char[1] & 0x3F) << 6) | (utf8_char[2] & 0x3F); + } else if (len == 4) { + c = ((utf8_char[0] & 0x07) << 18) | ((utf8_char[1] & 0x3F) << 12) | ((utf8_char[2] & 0x3F) << 6) | (utf8_char[3] & 0x3F); + } + return c; +} + +CNCTCharType CNCTUnicode::get_code_type(const std::string &utf8_char) { + return get_code_type(utf8_to_unicode(utf8_char)); +} + +int CNCTUnicode::utf8_len(const char c) +{ + if ((c & 0x80) == 0) { + return 1; // ASCII character + } + if ((c & 0xE0) == 0xC0) { + return 2; // 2-byte character + } + if ((c & 0xF0) == 0xE0) { + return 3; // 3-byte character + } + if ((c & 0xF0) == 0xF0) { + return 4; // 4-byte character + } + return 1; // not valid utf8 + // static const uint8_t lookup[] = { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 3, 4 }; + // return lookup[static_cast(c) >> 4]; +} + +int CNCTUnicode::strlen_utf8(const std::string src) { + int len = 0; + for (std::string::const_iterator it = src.begin(); it != src.end(); ++it) { + int char_len = utf8_len(*it); + if (char_len > 1) { + it += char_len - 1; + } + len += 1; + } + return len; +} + +// split a string into unicode strings +std::vector CNCTUnicode::split_utf8(const std::string &src) { + std::vector result; + for (std::string::const_iterator it = src.begin(); it != src.end(); ++it) { + int char_len = utf8_len(*it); + std::string str(it, it + char_len); + result.push_back(str); + if (char_len > 1) { + it += char_len - 1; + } + } + return result; +} + +// split a string into unicode strings (CNCTString) with sequence information +std::vector CNCTUnicode::split_utf8_enhanced(const std::string &src) { + std::vector result; + int seq_offset_bytes=0; + int seq_offset_utf8_chars=0; + for (std::string::const_iterator it = src.begin(); it != src.end(); ++it) { + int char_len = utf8_len(*it); + std::string str(it, it + char_len); + CNCTString cnct_str; + cnct_str.seq_offset_bytes = seq_offset_bytes; + cnct_str.seq_offset_utf8_chars = seq_offset_utf8_chars; + cnct_str.str = str; + cnct_str.utf8_chars = 1; + cnct_str.char_type = get_code_type(str); + #if 0 + switch (cnct_str.char_type) + { + case DIGIT: + printf("%s = DIGIT\n", str.c_str()); + break; + case LETTER: + printf("%s = LETTER\n", str.c_str()); + break; + case WHITESPACE: + printf("%s = WHITESPACE\n", str.c_str()); + break; + case PUNCTUATION: + printf("%s = PUNCTUATION\n", str.c_str()); + break; + case UNIDENTIFIED: + printf("%s = UNIDENTIFIED\n", str.c_str()); + break; + case SYMBOL: + printf("%s = SYMBOL\n", str.c_str()); + break; + case CONTROL: + printf("%s = CONTROL\n", str.c_str()); + break; + } + #endif + + result.push_back(cnct_str); + seq_offset_bytes += char_len; + seq_offset_utf8_chars += 1; + if (char_len > 1) { + it += char_len - 1; + } + + } + return result; +} + +// return the type of the string +CNCTCharType CNCTUnicode::string_identify(const std::string &str) { + CNCTCharType result = UNIDENTIFIED; + std::string::const_iterator it = str.begin(); + while (it != str.end()) { + int len = utf8_len(*it); + int c = 0; + for (int i = 0; i < len && it != str.end(); ++i, ++it) { + c = (c << 8) | static_cast(*it); + } + switch (get_code_type(c)) { + case DIGIT: + if (result == UNIDENTIFIED) { + result = DIGIT; + } else if (result != DIGIT) { + return MIXED; + } + break; + case LETTER: + if (result == UNIDENTIFIED) { + result = LETTER; + } else if (result != LETTER) { + return MIXED; + } + break; + case WHITESPACE: + if (result == UNIDENTIFIED) { + result = WHITESPACE; + } else if (result != WHITESPACE) { + return MIXED; + } + break; + case PUNCTUATION: + if (result == UNIDENTIFIED) { + result = PUNCTUATION; + } else if (result != PUNCTUATION) { + return MIXED; + } + break; + default: + return MIXED; + break; + } + } + return result; +} + +// verify the content of a string +bool CNCTUnicode::string_test(const std::string &str, CNCTCharType chartype) +{ + std::string::const_iterator it = str.begin(); + while (it != str.end()) { + int len = utf8_len(*it); + int c = 0; + for (int i = 0; i < len && it != str.end(); ++i, ++it) { + c = (c << 8) | static_cast(*it); + } + if (get_code_type(c) != chartype) { + return false; + } + } + return true; +} + +//----------------- +// llama.cpp GPT2 vocab (from libfalcon.cpp) +//----------------- + +std::string replaceAll(std::string str, const std::string& from, const std::string& to) { + size_t start_pos = 0; + while((start_pos = str.find(from, start_pos)) != std::string::npos) { + str.replace(start_pos, from.length(), to); + start_pos += to.length(); // Handles case where 'to' is a substring of 'from' + } + return str; +} + +struct TrieNode { + std::map map; + int32_t Id = -1; +}; + +struct Trie { + TrieNode *root; + + Trie() : root(new TrieNode()) {} + + ~Trie() { + if(root) + deleteTrie(root); + } + + // Move constructor + Trie(Trie&& other) noexcept : root(other.root) { + other.root = nullptr; + } + + // Move assignment operator + Trie& operator=(Trie&& other) noexcept { + if (this != &other) { + if(root) + deleteTrie(root); + root = other.root; + other.root = nullptr; + } + return *this; + } + + void insert(const std::string &token, int32_t Id) { + TrieNode* current = root; + for(auto ch : token) { + if(current->map.find(ch) == current->map.end()) { + current->map[ch] = new TrieNode(); + } + current = current->map[ch]; + } + current->Id = Id; + } + + void reset() { + deleteTrie(root); + root = new TrieNode(); + } + +private: + void deleteTrie(TrieNode* node) { + for(auto &it: node->map) { + deleteTrie(it.second); + } + delete node; + } + +}; + +struct gpt2bpe_vocab { + using id = int32_t; + using token = std::string; + + std::map max_token_length; // max length, for each 2byte prefix + std::map, int> bpe_ranks; + std::vector> bpe_merges; + + id special_bos_id = -1; + id special_eos_id = -1; + id special_unk_id = -1; + id special_sep_id = -1; + id special_pad_id = -1; + + id linefeed_id = -1; + + std::unordered_map token_to_id; + std::unordered_map id_to_token; + + Trie trie; // highspeed access to tokens by prefix tree + + // populate trie from map + void populate_trie_from_map() { + trie.reset(); + for (const auto& pair : token_to_id) { + trie.insert(pair.first, pair.second); + if (pair.first.size() >= 2) { + std::string prefix = pair.first.substr(0, 2); + max_token_length[prefix] = std::max(max_token_length[prefix], (uint32_t)pair.first.size()); + } + } + } + // populate token ranks map + int populate_bpe_ranks(std::vector> bpe_merges_) { + for (int i = 0; i < (int)bpe_merges_.size(); i++) { + bpe_ranks.emplace(bpe_merges_[i], i); + } + bpe_merges = bpe_merges_; + return bpe_merges_.size(); + } + + // Trim whitespace characters from the beginning and end of the string + void trim(std::string& str) { + // Remove whitespace characters from the beginning of the string + str.erase(str.begin(), std::find_if(str.begin(), str.end(), [](int ch) { + return !std::isspace(ch); + })); + + // Remove whitespace characters from the end of the string + str.erase(std::find_if(str.rbegin(), str.rend(), [](int ch) { + return !std::isspace(ch); + }).base(), str.end()); + } + + // get max token length available for a prefix of 2 bytes (string at least 2 bytes long) + int get_max_token_length(const std::string& string) const { + if (string.size() < 2) { + return -1; + } + std::string prefix = string.substr(0, 2); + if (max_token_length.find(prefix) == max_token_length.end()) { + return 0; + } + return max_token_length.at(prefix); + } + + // function to find if two tokens match in bpe_rank, return rank or -1 + int find_bpe_rank(const std::string& token1, const std::string& token2) const { + std::string left_token = token1; + std::string right_token = token2; + left_token = replaceAll(left_token, " ", "Ġ"); + left_token = replaceAll(left_token, "\n", "Ċ"); + right_token = replaceAll(right_token, " ", "Ġ"); + right_token = replaceAll(right_token, "\n", "Ċ"); + + auto it = bpe_ranks.find(std::make_pair(left_token, right_token)); + if (it == bpe_ranks.end()) { + return -1; + } + return it->second; + } + + std::pair find_longest_match(const std::string& snippet) const { + TrieNode* current = trie.root; + gpt2bpe_vocab::id last_matched_id = -1; + std::string last_matched_token = ""; + std::string current_token = ""; + for (auto ch : snippet) { + if (current->map.find(ch) == current->map.end()) { + break; + } + current = current->map[ch]; + current_token += ch; + if (current->Id != -1) { + last_matched_id = current->Id; + last_matched_token = current_token; + } + } + return {last_matched_id, last_matched_token}; + } + +}; + + +// +// tokenizer - bpe type, gpt2 tokenization compatible +// + +struct ggllm_bpe_symbol { + using index = int; + index prev; + index next; + const char * text; + size_t n; +}; + +static_assert(std::is_trivially_copyable::value, "ggllm_bpe_symbol is not trivially copyable"); + +struct ggllm_bpe_bigram { + struct comparator { + bool operator()(ggllm_bpe_bigram & l, ggllm_bpe_bigram & r) { + return l.rank > r.rank || (l.rank == r.rank && l.left > r.left); + } + }; + + using queue_storage = std::vector; + using queue = std::priority_queue; + ggllm_bpe_symbol::index left; + ggllm_bpe_symbol::index right; + std::string text; + int rank; + size_t size; +}; + +struct gpt2bpe_tokenizer { + gpt2bpe_tokenizer(const gpt2bpe_vocab & vocab, bool g2ws_): vocab_(vocab) { flag_g2ws = g2ws_; } + + void tokenize(const std::string & text, std::vector & output) { + int final_prev_index = -1; + // auto start = ggml_time_us(); + auto word_collection = bpe_gpt2_preprocess(text); + // auto end = ggml_time_us(); + // fprintf(stderr, "%s: preprocessing took %0.3f ms\n", __func__, (end - start) / 1000.0); + + symbols_final.clear(); + + for (auto & word : word_collection) { + work_queue_ = ggllm_bpe_bigram::queue(); + symbols_.clear(); + + int index = 0; + size_t offset = 0; + + while (offset < word.size()) { + ggllm_bpe_symbol sym; + size_t char_len = std::min(word.size() - offset, (size_t) CNCTUnicode::utf8_len(word[offset])); + sym.text = word.c_str() + offset; + sym.n = 1; + sym.n = char_len; + offset += sym.n; + sym.prev = index - 1; + sym.next = offset == word.size() ? -1 : index + 1; + index++; + symbols_.emplace_back(sym); + } + for (size_t i = 1; i < symbols_.size(); ++i) { + add_new_bigram(i - 1, i); + } + + // build token(s) + while (!work_queue_.empty()) { + auto bigram = work_queue_.top(); + work_queue_.pop(); + + auto & left_symbol = symbols_[bigram.left]; + auto & right_symbol = symbols_[bigram.right]; + + if (left_symbol.n == 0 || right_symbol.n == 0) { + continue; + } + std::string left_token = std::string(left_symbol.text, left_symbol.n); + std::string right_token = std::string(right_symbol.text, right_symbol.n); + if (left_token + right_token != bigram.text) { + continue; // Skip this bigram if it's outdated + } + + // merge the right sym into the left one + left_symbol.n += right_symbol.n; + right_symbol.n = 0; + + // remove the right sym from the chain + left_symbol.next = right_symbol.next; + if (right_symbol.next >= 0) { + symbols_[right_symbol.next].prev = bigram.left; + } + + add_new_bigram(left_symbol.prev, bigram.left); // left side of current symbol + add_new_bigram(bigram.left, left_symbol.next); // right side of current symbol + } + + // add the fnished tokens to the final list keeping correct order for next and prev + for (auto & sym : symbols_) { + if (sym.n > 0) { + sym.prev = final_prev_index; + sym.next = -1; + if (final_prev_index != -1) { + symbols_final[final_prev_index].next = symbols_final.size(); + } + symbols_final.emplace_back(sym); + final_prev_index = symbols_final.size() - 1; + } + } + } + + symbols_ = symbols_final; + if (symbols_.size()) + for (int i = 0; i != -1; i = symbols_[i].next) { + auto & symbol = symbols_[i]; + if (symbol.n == 0) { + continue; + } + std::string str = std::string(symbol.text, symbol.n); + std::string str_decoded = decode_token(str); + auto token = vocab_.token_to_id.find(str_decoded); + + if (token == vocab_.token_to_id.end()) { + for (auto j = str_decoded.begin(); j != str_decoded.end(); ++j) { + std::string byte_str(1, *j); + auto token_multibyte = vocab_.token_to_id.find(byte_str); + if (token_multibyte == vocab_.token_to_id.end()) { + fprintf(stderr,"ERROR: byte not found in vocab: '%s'\n", byte_str.c_str()); + } + output.push_back((*token_multibyte).second); + } + } else { + output.push_back((*token).second); + } + } + } + +private: + void add_new_bigram(int left, int right) { + if (left == -1 || right == -1) return; + + std::string left_token = std::string(symbols_[left].text, symbols_[left].n); + std::string right_token = std::string(symbols_[right].text, symbols_[right].n); + + int rank_found = -1; + rank_found = vocab_.find_bpe_rank(left_token, right_token); + + if (rank_found < 0) { + return; + } + + ggllm_bpe_bigram bigram; + bigram.left = left; + bigram.right = right; + bigram.rank = rank_found; + bigram.size = left_token.size() + right_token.size(); + bigram.text = left_token + right_token; + work_queue_.push(bigram); + } + + std::unordered_map bytes_to_unicode() { + static std::unordered_map hex_map = { + { 0x21, "\x21" }, { 0x22, "\x22" }, { 0x23, "\x23" }, { 0x24, "\x24" }, { 0x25, "\x25" }, { 0x26, "\x26" }, { 0x27, "\x27" }, { 0x28, "\x28" }, { 0x29, "\x29" }, { 0x2A, "\x2A" }, + { 0x2B, "\x2B" }, { 0x2C, "\x2C" }, { 0x2D, "\x2D" }, { 0x2E, "\x2E" }, { 0x2F, "\x2F" }, { 0x30, "\x30" }, { 0x31, "\x31" }, { 0x32, "\x32" }, { 0x33, "\x33" }, { 0x34, "\x34" }, + { 0x35, "\x35" }, { 0x36, "\x36" }, { 0x37, "\x37" }, { 0x38, "\x38" }, { 0x39, "\x39" }, { 0x3A, "\x3A" }, { 0x3B, "\x3B" }, { 0x3C, "\x3C" }, { 0x3D, "\x3D" }, { 0x3E, "\x3E" }, + { 0x3F, "\x3F" }, { 0x40, "\x40" }, { 0x41, "\x41" }, { 0x42, "\x42" }, { 0x43, "\x43" }, { 0x44, "\x44" }, { 0x45, "\x45" }, { 0x46, "\x46" }, { 0x47, "\x47" }, { 0x48, "\x48" }, + { 0x49, "\x49" }, { 0x4A, "\x4A" }, { 0x4B, "\x4B" }, { 0x4C, "\x4C" }, { 0x4D, "\x4D" }, { 0x4E, "\x4E" }, { 0x4F, "\x4F" }, { 0x50, "\x50" }, { 0x51, "\x51" }, { 0x52, "\x52" }, + { 0x53, "\x53" }, { 0x54, "\x54" }, { 0x55, "\x55" }, { 0x56, "\x56" }, { 0x57, "\x57" }, { 0x58, "\x58" }, { 0x59, "\x59" }, { 0x5A, "\x5A" }, { 0x5B, "\x5B" }, { 0x5C, "\x5C" }, + { 0x5D, "\x5D" }, { 0x5E, "\x5E" }, { 0x5F, "\x5F" }, { 0x60, "\x60" }, { 0x61, "\x61" }, { 0x62, "\x62" }, { 0x63, "\x63" }, { 0x64, "\x64" }, { 0x65, "\x65" }, { 0x66, "\x66" }, + { 0x67, "\x67" }, { 0x68, "\x68" }, { 0x69, "\x69" }, { 0x6A, "\x6A" }, { 0x6B, "\x6B" }, { 0x6C, "\x6C" }, { 0x6D, "\x6D" }, { 0x6E, "\x6E" }, { 0x6F, "\x6F" }, { 0x70, "\x70" }, + { 0x71, "\x71" }, { 0x72, "\x72" }, { 0x73, "\x73" }, { 0x74, "\x74" }, { 0x75, "\x75" }, { 0x76, "\x76" }, { 0x77, "\x77" }, { 0x78, "\x78" }, { 0x79, "\x79" }, { 0x7A, "\x7A" }, + { 0x7B, "\x7B" }, { 0x7C, "\x7C" }, { 0x7D, "\x7D" }, { 0x7E, "\x7E" }, { 0xA1, "\xC2\xA1" }, { 0xA2, "\xC2\xA2" }, { 0xA3, "\xC2\xA3" }, { 0xA4, "\xC2\xA4" }, { 0xA5, "\xC2\xA5" }, + { 0xA6, "\xC2\xA6" }, { 0xA7, "\xC2\xA7" }, { 0xA8, "\xC2\xA8" }, { 0xA9, "\xC2\xA9" }, { 0xAA, "\xC2\xAA" }, { 0xAB, "\xC2\xAB" }, { 0xAC, "\xC2\xAC" }, { 0xAE, "\xC2\xAE" }, + { 0xAF, "\xC2\xAF" }, { 0xB0, "\xC2\xB0" }, { 0xB1, "\xC2\xB1" }, { 0xB2, "\xC2\xB2" }, { 0xB3, "\xC2\xB3" }, { 0xB4, "\xC2\xB4" }, { 0xB5, "\xC2\xB5" }, { 0xB6, "\xC2\xB6" }, + { 0xB7, "\xC2\xB7" }, { 0xB8, "\xC2\xB8" }, { 0xB9, "\xC2\xB9" }, { 0xBA, "\xC2\xBA" }, { 0xBB, "\xC2\xBB" }, { 0xBC, "\xC2\xBC" }, { 0xBD, "\xC2\xBD" }, { 0xBE, "\xC2\xBE" }, + { 0xBF, "\xC2\xBF" }, { 0xC0, "\xC3\x80" }, { 0xC1, "\xC3\x81" }, { 0xC2, "\xC3\x82" }, { 0xC3, "\xC3\x83" }, { 0xC4, "\xC3\x84" }, { 0xC5, "\xC3\x85" }, { 0xC6, "\xC3\x86" }, + { 0xC7, "\xC3\x87" }, { 0xC8, "\xC3\x88" }, { 0xC9, "\xC3\x89" }, { 0xCA, "\xC3\x8A" }, { 0xCB, "\xC3\x8B" }, { 0xCC, "\xC3\x8C" }, { 0xCD, "\xC3\x8D" }, { 0xCE, "\xC3\x8E" }, + { 0xCF, "\xC3\x8F" }, { 0xD0, "\xC3\x90" }, { 0xD1, "\xC3\x91" }, { 0xD2, "\xC3\x92" }, { 0xD3, "\xC3\x93" }, { 0xD4, "\xC3\x94" }, { 0xD5, "\xC3\x95" }, { 0xD6, "\xC3\x96" }, + { 0xD7, "\xC3\x97" }, { 0xD8, "\xC3\x98" }, { 0xD9, "\xC3\x99" }, { 0xDA, "\xC3\x9A" }, { 0xDB, "\xC3\x9B" }, { 0xDC, "\xC3\x9C" }, { 0xDD, "\xC3\x9D" }, { 0xDE, "\xC3\x9E" }, + { 0xDF, "\xC3\x9F" }, { 0xE0, "\xC3\xA0" }, { 0xE1, "\xC3\xA1" }, { 0xE2, "\xC3\xA2" }, { 0xE3, "\xC3\xA3" }, { 0xE4, "\xC3\xA4" }, { 0xE5, "\xC3\xA5" }, { 0xE6, "\xC3\xA6" }, + { 0xE7, "\xC3\xA7" }, { 0xE8, "\xC3\xA8" }, { 0xE9, "\xC3\xA9" }, { 0xEA, "\xC3\xAA" }, { 0xEB, "\xC3\xAB" }, { 0xEC, "\xC3\xAC" }, { 0xED, "\xC3\xAD" }, { 0xEE, "\xC3\xAE" }, + { 0xEF, "\xC3\xAF" }, { 0xF0, "\xC3\xB0" }, { 0xF1, "\xC3\xB1" }, { 0xF2, "\xC3\xB2" }, { 0xF3, "\xC3\xB3" }, { 0xF4, "\xC3\xB4" }, { 0xF5, "\xC3\xB5" }, { 0xF6, "\xC3\xB6" }, + { 0xF7, "\xC3\xB7" }, { 0xF8, "\xC3\xB8" }, { 0xF9, "\xC3\xB9" }, { 0xFA, "\xC3\xBA" }, { 0xFB, "\xC3\xBB" }, { 0xFC, "\xC3\xBC" }, { 0xFD, "\xC3\xBD" }, { 0xFE, "\xC3\xBE" }, + { 0xFF, "\xC3\xBF" }, { 0x00, "\xC4\x80" }, { 0x01, "\xC4\x81" }, { 0x02, "\xC4\x82" }, { 0x03, "\xC4\x83" }, { 0x04, "\xC4\x84" }, { 0x05, "\xC4\x85" }, { 0x06, "\xC4\x86" }, + { 0x07, "\xC4\x87" }, { 0x08, "\xC4\x88" }, { 0x09, "\xC4\x89" }, { 0x0A, "\xC4\x8A" }, { 0x0B, "\xC4\x8B" }, { 0x0C, "\xC4\x8C" }, { 0x0D, "\xC4\x8D" }, { 0x0E, "\xC4\x8E" }, + { 0x0F, "\xC4\x8F" }, { 0x10, "\xC4\x90" }, { 0x11, "\xC4\x91" }, { 0x12, "\xC4\x92" }, { 0x13, "\xC4\x93" }, { 0x14, "\xC4\x94" }, { 0x15, "\xC4\x95" }, { 0x16, "\xC4\x96" }, + { 0x17, "\xC4\x97" }, { 0x18, "\xC4\x98" }, { 0x19, "\xC4\x99" }, { 0x1A, "\xC4\x9A" }, { 0x1B, "\xC4\x9B" }, { 0x1C, "\xC4\x9C" }, { 0x1D, "\xC4\x9D" }, { 0x1E, "\xC4\x9E" }, + { 0x1F, "\xC4\x9F" }, { 0x20, "\xC4\xA0" }, { 0x7F, "\xC4\xA1" }, { 0x80, "\xC4\xA2" }, { 0x81, "\xC4\xA3" }, { 0x82, "\xC4\xA4" }, { 0x83, "\xC4\xA5" }, { 0x84, "\xC4\xA6" }, + { 0x85, "\xC4\xA7" }, { 0x86, "\xC4\xA8" }, { 0x87, "\xC4\xA9" }, { 0x88, "\xC4\xAA" }, { 0x89, "\xC4\xAB" }, { 0x8A, "\xC4\xAC" }, { 0x8B, "\xC4\xAD" }, { 0x8C, "\xC4\xAE" }, + { 0x8D, "\xC4\xAF" }, { 0x8E, "\xC4\xB0" }, { 0x8F, "\xC4\xB1" }, { 0x90, "\xC4\xB2" }, { 0x91, "\xC4\xB3" }, { 0x92, "\xC4\xB4" }, { 0x93, "\xC4\xB5" }, { 0x94, "\xC4\xB6" }, + { 0x95, "\xC4\xB7" }, { 0x96, "\xC4\xB8" }, { 0x97, "\xC4\xB9" }, { 0x98, "\xC4\xBA" }, { 0x99, "\xC4\xBB" }, { 0x9A, "\xC4\xBC" }, { 0x9B, "\xC4\xBD" }, { 0x9C, "\xC4\xBE" }, + { 0x9D, "\xC4\xBF" }, { 0x9E, "\xC5\x80" }, { 0x9F, "\xC5\x81" }, { 0xA0, "\xC5\x82" }, { 0xAD, "\xC5\x83" } + }; + return hex_map; + } + + std::unordered_map unicode_to_bytes() { + static std::unordered_map hex_map = { + { "\x21", 0x21 }, { "\x22", 0x22 }, { "\x23", 0x23 }, { "\x24", 0x24 }, { "\x25", 0x25 }, { "\x26", 0x26 }, { "\x27", 0x27 }, { "\x28", 0x28 }, { "\x29", 0x29 }, { "\x2A", 0x2A }, + { "\x2B", 0x2B }, { "\x2C", 0x2C }, { "\x2D", 0x2D }, { "\x2E", 0x2E }, { "\x2F", 0x2F }, { "\x30", 0x30 }, { "\x31", 0x31 }, { "\x32", 0x32 }, { "\x33", 0x33 }, { "\x34", 0x34 }, + { "\x35", 0x35 }, { "\x36", 0x36 }, { "\x37", 0x37 }, { "\x38", 0x38 }, { "\x39", 0x39 }, { "\x3A", 0x3A }, { "\x3B", 0x3B }, { "\x3C", 0x3C }, { "\x3D", 0x3D }, { "\x3E", 0x3E }, + { "\x3F", 0x3F }, { "\x40", 0x40 }, { "\x41", 0x41 }, { "\x42", 0x42 }, { "\x43", 0x43 }, { "\x44", 0x44 }, { "\x45", 0x45 }, { "\x46", 0x46 }, { "\x47", 0x47 }, { "\x48", 0x48 }, + { "\x49", 0x49 }, { "\x4A", 0x4A }, { "\x4B", 0x4B }, { "\x4C", 0x4C }, { "\x4D", 0x4D }, { "\x4E", 0x4E }, { "\x4F", 0x4F }, { "\x50", 0x50 }, { "\x51", 0x51 }, { "\x52", 0x52 }, + { "\x53", 0x53 }, { "\x54", 0x54 }, { "\x55", 0x55 }, { "\x56", 0x56 }, { "\x57", 0x57 }, { "\x58", 0x58 }, { "\x59", 0x59 }, { "\x5A", 0x5A }, { "\x5B", 0x5B }, { "\x5C", 0x5C }, + { "\x5D", 0x5D }, { "\x5E", 0x5E }, { "\x5F", 0x5F }, { "\x60", 0x60 }, { "\x61", 0x61 }, { "\x62", 0x62 }, { "\x63", 0x63 }, { "\x64", 0x64 }, { "\x65", 0x65 }, { "\x66", 0x66 }, + { "\x67", 0x67 }, { "\x68", 0x68 }, { "\x69", 0x69 }, { "\x6A", 0x6A }, { "\x6B", 0x6B }, { "\x6C", 0x6C }, { "\x6D", 0x6D }, { "\x6E", 0x6E }, { "\x6F", 0x6F }, { "\x70", 0x70 }, + { "\x71", 0x71 }, { "\x72", 0x72 }, { "\x73", 0x73 }, { "\x74", 0x74 }, { "\x75", 0x75 }, { "\x76", 0x76 }, { "\x77", 0x77 }, { "\x78", 0x78 }, { "\x79", 0x79 }, { "\x7A", 0x7A }, + { "\x7B", 0x7B }, { "\x7C", 0x7C }, { "\x7D", 0x7D }, { "\x7E", 0x7E }, { "\xC2\xA1", 0xA1 }, { "\xC2\xA2", 0xA2 }, { "\xC2\xA3", 0xA3 }, { "\xC2\xA4", 0xA4 }, { "\xC2\xA5", 0xA5 }, + { "\xC2\xA6", 0xA6 }, { "\xC2\xA7", 0xA7 }, { "\xC2\xA8", 0xA8 }, { "\xC2\xA9", 0xA9 }, { "\xC2\xAA", 0xAA }, { "\xC2\xAB", 0xAB }, { "\xC2\xAC", 0xAC }, { "\xC2\xAE", 0xAE }, + { "\xC2\xAF", 0xAF }, { "\xC2\xB0", 0xB0 }, { "\xC2\xB1", 0xB1 }, { "\xC2\xB2", 0xB2 }, { "\xC2\xB3", 0xB3 }, { "\xC2\xB4", 0xB4 }, { "\xC2\xB5", 0xB5 }, { "\xC2\xB6", 0xB6 }, + { "\xC2\xB7", 0xB7 }, { "\xC2\xB8", 0xB8 }, { "\xC2\xB9", 0xB9 }, { "\xC2\xBA", 0xBA }, { "\xC2\xBB", 0xBB }, { "\xC2\xBC", 0xBC }, { "\xC2\xBD", 0xBD }, { "\xC2\xBE", 0xBE }, + { "\xC2\xBF", 0xBF }, { "\xC3\x80", 0xC0 }, { "\xC3\x81", 0xC1 }, { "\xC3\x82", 0xC2 }, { "\xC3\x83", 0xC3 }, { "\xC3\x84", 0xC4 }, { "\xC3\x85", 0xC5 }, { "\xC3\x86", 0xC6 }, + { "\xC3\x87", 0xC7 }, { "\xC3\x88", 0xC8 }, { "\xC3\x89", 0xC9 }, { "\xC3\x8A", 0xCA }, { "\xC3\x8B", 0xCB }, { "\xC3\x8C", 0xCC }, { "\xC3\x8D", 0xCD }, { "\xC3\x8E", 0xCE }, + { "\xC3\x8F", 0xCF }, { "\xC3\x90", 0xD0 }, { "\xC3\x91", 0xD1 }, { "\xC3\x92", 0xD2 }, { "\xC3\x93", 0xD3 }, { "\xC3\x94", 0xD4 }, { "\xC3\x95", 0xD5 }, { "\xC3\x96", 0xD6 }, + { "\xC3\x97", 0xD7 }, { "\xC3\x98", 0xD8 }, { "\xC3\x99", 0xD9 }, { "\xC3\x9A", 0xDA }, { "\xC3\x9B", 0xDB }, { "\xC3\x9C", 0xDC }, { "\xC3\x9D", 0xDD }, { "\xC3\x9E", 0xDE }, + { "\xC3\x9F", 0xDF }, { "\xC3\xA0", 0xE0 }, { "\xC3\xA1", 0xE1 }, { "\xC3\xA2", 0xE2 }, { "\xC3\xA3", 0xE3 }, { "\xC3\xA4", 0xE4 }, { "\xC3\xA5", 0xE5 }, { "\xC3\xA6", 0xE6 }, + { "\xC3\xA7", 0xE7 }, { "\xC3\xA8", 0xE8 }, { "\xC3\xA9", 0xE9 }, { "\xC3\xAA", 0xEA }, { "\xC3\xAB", 0xEB }, { "\xC3\xAC", 0xEC }, { "\xC3\xAD", 0xED }, { "\xC3\xAE", 0xEE }, + { "\xC3\xAF", 0xEF }, { "\xC3\xB0", 0xF0 }, { "\xC3\xB1", 0xF1 }, { "\xC3\xB2", 0xF2 }, { "\xC3\xB3", 0xF3 }, { "\xC3\xB4", 0xF4 }, { "\xC3\xB5", 0xF5 }, { "\xC3\xB6", 0xF6 }, + { "\xC3\xB7", 0xF7 }, { "\xC3\xB8", 0xF8 }, { "\xC3\xB9", 0xF9 }, { "\xC3\xBA", 0xFA }, { "\xC3\xBB", 0xFB }, { "\xC3\xBC", 0xFC }, { "\xC3\xBD", 0xFD }, { "\xC3\xBE", 0xFE }, + { "\xC3\xBF", 0xFF }, { "\xC4\x80", 0x00 }, { "\xC4\x81", 0x01 }, { "\xC4\x82", 0x02 }, { "\xC4\x83", 0x03 }, { "\xC4\x84", 0x04 }, { "\xC4\x85", 0x05 }, { "\xC4\x86", 0x06 }, + { "\xC4\x87", 0x07 }, { "\xC4\x88", 0x08 }, { "\xC4\x89", 0x09 }, { "\xC4\x8A", 0x0A }, { "\xC4\x8B", 0x0B }, { "\xC4\x8C", 0x0C }, { "\xC4\x8D", 0x0D }, { "\xC4\x8E", 0x0E }, + { "\xC4\x8F", 0x0F }, { "\xC4\x90", 0x10 }, { "\xC4\x91", 0x11 }, { "\xC4\x92", 0x12 }, { "\xC4\x93", 0x13 }, { "\xC4\x94", 0x14 }, { "\xC4\x95", 0x15 }, { "\xC4\x96", 0x16 }, + { "\xC4\x97", 0x17 }, { "\xC4\x98", 0x18 }, { "\xC4\x99", 0x19 }, { "\xC4\x9A", 0x1A }, { "\xC4\x9B", 0x1B }, { "\xC4\x9C", 0x1C }, { "\xC4\x9D", 0x1D }, { "\xC4\x9E", 0x1E }, + { "\xC4\x9F", 0x1F }, { "\xC4\xA0", 0x20 }, { "\xC4\xA1", 0x7F }, { "\xC4\xA2", 0x80 }, { "\xC4\xA3", 0x81 }, { "\xC4\xA4", 0x82 }, { "\xC4\xA5", 0x83 }, { "\xC4\xA6", 0x84 }, + { "\xC4\xA7", 0x85 }, { "\xC4\xA8", 0x86 }, { "\xC4\xA9", 0x87 }, { "\xC4\xAA", 0x88 }, { "\xC4\xAB", 0x89 }, { "\xC4\xAC", 0x8A }, { "\xC4\xAD", 0x8B }, { "\xC4\xAE", 0x8C }, + { "\xC4\xAF", 0x8D }, { "\xC4\xB0", 0x8E }, { "\xC4\xB1", 0x8F }, { "\xC4\xB2", 0x90 }, { "\xC4\xB3", 0x91 }, { "\xC4\xB4", 0x92 }, { "\xC4\xB5", 0x93 }, { "\xC4\xB6", 0x94 }, + { "\xC4\xB7", 0x95 }, { "\xC4\xB8", 0x96 }, { "\xC4\xB9", 0x97 }, { "\xC4\xBA", 0x98 }, { "\xC4\xBB", 0x99 }, { "\xC4\xBC", 0x9A }, { "\xC4\xBD", 0x9B }, { "\xC4\xBE", 0x9C }, + { "\xC4\xBF", 0x9D }, { "\xC5\x80", 0x9E }, { "\xC5\x81", 0x9F }, { "\xC5\x82", 0xA0 }, { "\xC5\x83", 0xAD } + }; + return hex_map; + } + + // len must be available + bool inline str_is_equal(const char* str1, const char* str2, size_t len) { + for (size_t i = 0; i < len; ++i) { + if (str1[i] != str2[i]) { + return false; + } + } + return true; + } + + std::vector bpe_gpt2_preprocess(const std::string& text) { + static std::unordered_map< unsigned char, std::string> byte_encoder = bytes_to_unicode(); + std::vector bpe_words; + std::vector bpe_encoded_words; + + std::string token=""; + const char *raw_text_p = text.c_str(); + // GPT2 system regex: 's|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+ + bool collecting_numeric = false; + bool collecting_letter = false; + bool collecting_special = false; + bool collecting_whitespace_lookahead = false; + bool collecting=false; + + std::vector text_utf; + text_utf.reserve(text.size()); + bpe_words.reserve(text.size()); + bpe_encoded_words.reserve(text.size()); + + text_utf = CNCTUnicode::split_utf8_enhanced(text); + + for (int i = 0; i < (int)text_utf.size(); i++) { + const CNCTString &utf_char = text_utf[i]; + bool split_condition = false; + const char *text_pos = raw_text_p + utf_char.seq_offset_bytes; + int bytes_remain = strlen(text_pos); + // forward backward lookups + const CNCTString &utf_char_next = (i+1 < (int)text_utf.size()) ? text_utf[i+1] : CNCTString(); + const CNCTString &utf_char_next_next = (i+2 < (int)text_utf.size()) ? text_utf[i+2] : CNCTString(); + // const CNCTString &utf_char_prev = (i > 0) ? text_utf[i-1] : CNCTString(); + + // handling contractions + if (!split_condition && bytes_remain >= 2) { + // 's|'t|'m|'d + if (utf_char == '\'' && (utf_char_next == 's' || utf_char_next == 't' || utf_char_next == 'm' || utf_char_next == 'd')) { + split_condition = true; + } + if (split_condition) { + if (token.size()) { + bpe_words.emplace_back(token); // push previous content as token + } + token = utf_char.str + utf_char_next.str; + bpe_words.emplace_back(token); + token=""; + i++; + continue; + } + } + if (!split_condition && bytes_remain >= 3) { + // 're|'ve|'ll + if (utf_char == '\'' && ( + (utf_char_next == 'r' || utf_char_next_next == 'e') || + (utf_char_next == 'v' || utf_char_next_next == 'e') || + (utf_char_next == 'l' || utf_char_next_next == 'l')) + ) { + split_condition = true; + } + if (split_condition) { + // current token + next token can be defined + if (token.size()) { + bpe_words.emplace_back(token); // push previous content as token + } + token = utf_char.str + utf_char_next.str + utf_char_next_next.str; + bpe_words.emplace_back(token); // the contraction + token=""; + i+=2; + continue; + } + } + + if (!split_condition && !collecting) { + if (utf_char.char_type == CNCTCharType::LETTER || (!token.size() && utf_char==" " && utf_char_next.char_type == CNCTCharType::LETTER)) { + collecting_letter = true; + collecting = true; + } else if (utf_char.char_type == CNCTCharType::DIGIT || (!token.size() && utf_char==" " && utf_char_next.char_type == CNCTCharType::DIGIT)) { + collecting_numeric = true; + collecting = true; + } else if ( + ((utf_char.char_type != CNCTCharType::LETTER && utf_char.char_type != CNCTCharType::DIGIT) && (utf_char.char_type != CNCTCharType::WHITESPACE)) || + (!token.size() && utf_char==" " && utf_char_next.char_type != CNCTCharType::LETTER && utf_char_next.char_type != CNCTCharType::DIGIT && utf_char_next.char_type != CNCTCharType::WHITESPACE) + ) { + collecting_special = true; + collecting = true; + } else if (utf_char.char_type == CNCTCharType::WHITESPACE && utf_char_next.char_type == CNCTCharType::WHITESPACE) { + collecting_whitespace_lookahead = true; + collecting = true; + } else if (utf_char.char_type == CNCTCharType::WHITESPACE) { + split_condition = true; + } + } else if (!split_condition && collecting) { + if (collecting_letter && utf_char.char_type != CNCTCharType::LETTER) { + split_condition = true; + } else if (collecting_numeric && utf_char.char_type != CNCTCharType::DIGIT) { + split_condition = true; + } else if (collecting_special && (utf_char.char_type == CNCTCharType::LETTER || utf_char.char_type == CNCTCharType::DIGIT || utf_char.char_type == CNCTCharType::WHITESPACE)) { + split_condition = true; + } else if (collecting_whitespace_lookahead && utf_char_next.char_type != CNCTCharType::WHITESPACE) { + split_condition = true; + } + } + + if(utf_char_next.str.size() == 0) { + split_condition = true; // final + token += utf_char.str; + } + + if (split_condition) { + if (token.size()) { + bpe_words.emplace_back(token); + } + token = utf_char.str; + collecting = false; + collecting_letter = false; + collecting_numeric = false; + collecting_special = false; + collecting_whitespace_lookahead = false; + } else { + token += utf_char.str; + } + } + + for (std::string& word : bpe_words) { + std::string encoded_token=""; + for (char& c : word) { + encoded_token += byte_encoder[c]; + } + bpe_encoded_words.emplace_back(encoded_token); + } + + return bpe_encoded_words; + } + + // decoder (for one token) + std::string decode_token(const std::string& token) { + static std::unordered_map< std::string, unsigned char> byte_decoder = unicode_to_bytes(); + std::string decoded_token=""; + auto unicode_seqeunces = CNCTUnicode::split_utf8(token); + for (auto& unicode_sequence : unicode_seqeunces) { + decoded_token += byte_decoder[unicode_sequence]; + } + + return decoded_token; + } + + const gpt2bpe_vocab & vocab_; + std::vector symbols_; + std::vector symbols_final; + ggllm_bpe_bigram::queue work_queue_; + bool flag_g2ws=false; +}; + +static std::vector gpt2bpe_tokenize(const gpt2bpe_vocab & vocab, const std::string & text, bool bos, bool g2ws ) { + gpt2bpe_tokenizer tokenizer(vocab, g2ws); + std::vector output; + + if (text.empty()) { + return output; + } + + if (bos && vocab.special_bos_id != -1) { + output.push_back(vocab.special_bos_id); + } + + tokenizer.tokenize(text, output); + return output; +} + +#endif // CMPNCT_GPT2BPE diff --git a/examples/gptneox-wip/falcon-main.cpp b/examples/gptneox-wip/falcon-main.cpp new file mode 100644 index 000000000..43b6a29f3 --- /dev/null +++ b/examples/gptneox-wip/falcon-main.cpp @@ -0,0 +1,1111 @@ +#include "ggml.h" +#include "cmpnct_gpt2bpe.hpp" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#if defined(_MSC_VER) +#pragma warning(disable: 4244 4267) // possible loss of data +#endif + +// default hparams +struct falcon_hparams { + size_t n_merges = 0; + size_t n_vocab = 0; + uint32_t n_ctx = 0; + uint32_t n_embd = 0; + uint32_t n_head = 0; + uint32_t n_head_kv = 1; // Needs to be 1 for 7B model + uint32_t n_ff = 0; + uint32_t n_block = 0; + float norm_eps = 1e-5; +}; +struct falcon_block { + // normalization + struct ggml_tensor* input_layernorm; + struct ggml_tensor* input_layernorm_b; + struct ggml_tensor* attention_norm; // Falcon-40B only + struct ggml_tensor* attention_norm_b; // Falcon-40B only + + // attention + struct ggml_tensor* query_key_value; + struct ggml_tensor* wo; + + // ff + struct ggml_tensor* ffn_up; + struct ggml_tensor* ffn_down; +}; + +struct falcon_model { + falcon_hparams hparams; + + struct ggml_tensor* tok_embeddings; + struct ggml_tensor* output_norm; + struct ggml_tensor* output_norm_b; + struct ggml_tensor* lm_head; + + std::vector blocks; + + // key + value memory + struct ggml_tensor* memory_k; + struct ggml_tensor* memory_v; + + struct gguf_context * ggufctx; + struct ggml_context * ctx; + struct ggml_context * kvctx; + + std::map tensors; +}; + +struct gpt_params { + int32_t seed = -1; // RNG seed + int32_t n_threads = std::min(4, (int32_t) std::thread::hardware_concurrency()); + uint32_t n_predict = 200; // new tokens to predict + uint32_t n_batch = 512; // batch size for prompt processing + + // sampling parameters + int32_t top_k = 40; + float top_p = 1.0f; + float temp = 0.8f; + int32_t repeat_last_n = 64; + float repeat_penalty = 1.02f; + + std::string model = ""; // model path + std::string prompt = ""; + + std::string token_test = ""; + bool interactive = false; + int32_t interactive_port = -1; + int32_t n_gpu_layers = 0; +}; + +void gpt_print_usage(int /*argc*/, char ** argv, const gpt_params & params) { + fprintf(stderr, "usage: %s [options]\n", argv[0]); + fprintf(stderr, "\n"); + fprintf(stderr, "options:\n"); + fprintf(stderr, " -h, --help show this help message and exit\n"); + fprintf(stderr, " -s SEED, --seed SEED RNG seed (default: -1)\n"); + fprintf(stderr, " -t N, --threads N number of threads to use during computation (default: %d)\n", params.n_threads); + fprintf(stderr, " -ngl N, --gpu-layers N number of layers to offload to GPU on supported models (default: %d)\n", params.n_gpu_layers); + fprintf(stderr, " -p PROMPT, --prompt PROMPT\n"); + fprintf(stderr, " prompt to start generation with (default: random)\n"); + fprintf(stderr, " -f FNAME, --file FNAME\n"); + fprintf(stderr, " load prompt from a file\n"); + fprintf(stderr, " -tt TOKEN_TEST, --token_test TOKEN_TEST\n"); + fprintf(stderr, " test tokenization\n"); + fprintf(stderr, " -n N, --n_predict N number of tokens to predict (default: %d)\n", params.n_predict); + fprintf(stderr, " --top_k N top-k sampling, 0 = n_vocab (default: %d)\n", params.top_k); + fprintf(stderr, " --top_p N top-p sampling (default: %.1f)\n", params.top_p); + fprintf(stderr, " --temp N temperature (default: %.1f)\n", params.temp); + fprintf(stderr, " --repeat-last-n N last n tokens to consider for penalize (default: %d, 0 = disabled)\n", params.repeat_last_n); + fprintf(stderr, " --repeat-penalty N penalize repeat sequence of tokens (default: %.2f, 1.0 = disabled)\n", (double)params.repeat_penalty); + fprintf(stderr, " -b N, --batch_size N batch size for prompt processing (default: %d)\n", params.n_batch); + fprintf(stderr, " -m FNAME, --model FNAME\n"); + fprintf(stderr, " model path (default: %s)\n", params.model.c_str()); + fprintf(stderr, "\n"); +} + +// Function to check if the next argument exists +std::string get_next_arg(int& i, int argc, char** argv, const std::string& flag, gpt_params& params) { + if (i + 1 < argc && argv[i + 1][0] != '-') { + return argv[++i]; + } else { + fprintf(stderr, "error: %s requires one argument.\n", flag.c_str()); + gpt_print_usage(argc, argv, params); + exit(0); + } +} + +bool gpt_params_parse(int argc, char ** argv, gpt_params & params) { + for (int i = 1; i < argc; i++) { + std::string arg = argv[i]; + + if (arg == "-s" || arg == "--seed") { + params.seed = std::stoi(get_next_arg(i, argc, argv, arg, params)); + } else if (arg == "-t" || arg == "--threads") { + params.n_threads = std::stoi(get_next_arg(i, argc, argv, arg, params)); + } else if (arg == "-ngl" || arg == "--gpu-layers" || arg == "--n-gpu-layers") { + params.n_gpu_layers = std::stoi(get_next_arg(i, argc, argv, arg, params)); + } else if (arg == "-p" || arg == "--prompt") { + params.prompt = get_next_arg(i, argc, argv, arg, params); + } else if (arg == "-n" || arg == "--n_predict") { + params.n_predict = std::stoi(get_next_arg(i, argc, argv, arg, params)); + } else if (arg == "--top_k") { + params.top_k = std::stoi(get_next_arg(i, argc, argv, arg, params)); + } else if (arg == "--top_p") { + params.top_p = std::stof(get_next_arg(i, argc, argv, arg, params)); + } else if (arg == "--temp") { + params.temp = std::stof(get_next_arg(i, argc, argv, arg, params)); + } else if (arg == "--repeat-last-n") { + params.repeat_last_n = std::stoi(get_next_arg(i, argc, argv, arg, params)); + } else if (arg == "--repeat-penalty") { + params.repeat_penalty = std::stof(get_next_arg(i, argc, argv, arg, params)); + } else if (arg == "-b" || arg == "--batch_size") { + params.n_batch= std::stoi(get_next_arg(i, argc, argv, arg, params)); + } else if (arg == "-m" || arg == "--model") { + params.model = get_next_arg(i, argc, argv, arg, params); + } else if (arg == "-i" || arg == "--interactive") { + params.interactive = true; + } else if (arg == "-ip" || arg == "--interactive-port") { + params.interactive = true; + params.interactive_port = std::stoi(get_next_arg(i, argc, argv, arg, params)); + } else if (arg == "-h" || arg == "--help") { + gpt_print_usage(argc, argv, params); + exit(0); + } else if (arg == "-f" || arg == "--file") { + get_next_arg(i, argc, argv, arg, params); + std::ifstream file(argv[i]); + if (!file) { + fprintf(stderr, "error: failed to open file '%s'\n", argv[i]); + break; + } + std::copy(std::istreambuf_iterator(file), std::istreambuf_iterator(), back_inserter(params.prompt)); + if (params.prompt.back() == '\n') { + params.prompt.pop_back(); + } + } else if (arg == "-tt" || arg == "--token_test") { + params.token_test = get_next_arg(i, argc, argv, arg, params); + } + else { + fprintf(stderr, "error: unknown argument: %s\n", arg.c_str()); + gpt_print_usage(argc, argv, params); + exit(0); + } + } + + return true; +} + +gpt2bpe_vocab::id sample_top_k_top_p_repeat( + const gpt2bpe_vocab & vocab, + const float * logits, + const int32_t * last_n_tokens_data, + size_t last_n_tokens_data_size, + int top_k, + double top_p, + double temp, + int repeat_last_n, + float repeat_penalty, + std::mt19937 & rng) { + + int n_logits = vocab.id_to_token.size(); + + const auto * plogits = logits; + + const auto last_n_tokens = std::vector(last_n_tokens_data, last_n_tokens_data + last_n_tokens_data_size); + + if (temp <= 0) { + // select the token with the highest logit directly + float max_logit = plogits[0]; + gpt2bpe_vocab::id max_id = 0; + + for (int i = 1; i < n_logits; ++i) { + if (plogits[i] > max_logit) { + max_logit = plogits[i]; + max_id = i; + } + } + return max_id; + } + + + std::vector> logits_id; + logits_id.reserve(n_logits); + + { + const float scale = 1.0f/temp; + for (int i = 0; i < n_logits; ++i) { + // repetition penalty from ctrl paper (https://arxiv.org/abs/1909.05858) + // credit https://github.com/facebookresearch/llama/compare/main...shawwn:llama:main + if (repeat_last_n > 0 && std::find(last_n_tokens.end()-repeat_last_n, last_n_tokens.end(), i) != last_n_tokens.end()) { + // if score < 0 then repetition penalty has to multiplied to reduce the previous token probability + if (plogits[i] < 0.0f) { + logits_id.push_back(std::make_pair(plogits[i]*scale*repeat_penalty, i)); + } else { + logits_id.push_back(std::make_pair(plogits[i]*scale/repeat_penalty, i)); + } + } else { + logits_id.push_back(std::make_pair(plogits[i]*scale, i)); + } + } + } + + // find the top K tokens + std::partial_sort( + logits_id.begin(), + logits_id.begin() + top_k, logits_id.end(), + [](const std::pair & a, const std::pair & b) { + return a.first > b.first; + }); + + logits_id.resize(top_k); + + double maxl = -INFINITY; + for (const auto & kv : logits_id) { + maxl = std::max(maxl, kv.first); + } + + // compute probs for the top K tokens + std::vector probs; + probs.reserve(logits_id.size()); + + double sum = 0.0; + for (const auto & kv : logits_id) { + double p = exp(kv.first - maxl); + probs.push_back(p); + sum += p; + } + + // normalize the probs + for (auto & p : probs) { + p /= sum; + } + + if (top_p < 1.0f) { + double cumsum = 0.0f; + for (int i = 0; i < top_k; i++) { + cumsum += probs[i]; + if (cumsum >= top_p) { + top_k = i + 1; + probs.resize(top_k); + logits_id.resize(top_k); + break; + } + } + + cumsum = 1.0/cumsum; + for (int i = 0; i < (int) probs.size(); i++) { + probs[i] *= cumsum; + } + } + +// printf("\n"); +// for (int i = 0; i < (int) probs.size(); i++) { +// for (int i = 0; i < 10; i++) { +// printf("%d: '%s' %f\n", i, vocab.id_to_token.at(logits_id[i].second).c_str(), probs[i]); +// } + + std::discrete_distribution<> dist(probs.begin(), probs.end()); + int idx = dist(rng); + + return logits_id[idx].second; + +} + +struct ggml_tensor * get_tensor_ex( struct ggml_context * ctx, std::string name){ + + struct ggml_tensor * cur = ggml_get_tensor(ctx, name.c_str()); + if( cur == NULL ) { + fprintf(stdout, "%s: tensor '%s' not found!\n", __func__, name.c_str()); + } else { +// fprintf(stdout, "%s: n_dims = %d, name = '%s'\n", __func__, cur->n_dims, cur->name); + } + + return cur; +} + +// load the model's weights from a file +bool falcon_model_load(const std::string & fname, falcon_model & model, gpt2bpe_vocab & vocab) { + printf("%s: loading model from '%s'..\n", __func__, fname.c_str()); + + model.ctx = NULL; + + struct gguf_init_params ggufparams = { + /*.no_alloc = */ false, + /*.ctx = */ &model.ctx, + }; + + auto & ggufctx = model.ggufctx; + + ggufctx = gguf_init_from_file(fname.c_str(), ggufparams); + + if (!ggufctx) { + fprintf(stderr, "%s: gguf_init_from_file() failed\n", __func__); + return false; + } + + fprintf(stdout, "%s: gguf version = %d\n", __func__, gguf_get_version(ggufctx)); + fprintf(stdout, "%s: gguf alignment = %zu\n", __func__, gguf_get_alignment(ggufctx)); + fprintf(stdout, "%s: gguf data offset = %zu\n", __func__, gguf_get_data_offset(ggufctx)); + + // print all kv + #if 0 + { + const int n_kv = gguf_get_n_kv(ggufctx); + + fprintf(stdout, "%s: n_kv: %d\n", __func__, n_kv); + + for (int i = 0; i < n_kv; ++i) { + const char * key = gguf_get_key(ggufctx, i); + + fprintf(stdout, "%s: kv[%d]: key = %s\n", __func__, i, key); + } + } + #endif + + // print some standard metadata + { + int keyidx; + + keyidx = gguf_find_key(ggufctx, "general.name"); + if (keyidx != -1) { fprintf(stdout, "%s: model name = %s\n", __func__, gguf_get_val_str(ggufctx, keyidx)); } + keyidx = gguf_find_key(ggufctx, "general.description"); + if (keyidx != -1) { fprintf(stdout, "%s: model description = %s\n", __func__, gguf_get_val_str(ggufctx, keyidx)); } + keyidx = gguf_find_key(ggufctx, "general.author"); + if (keyidx != -1) { fprintf(stdout, "%s: model author = %s\n", __func__, gguf_get_val_str(ggufctx, keyidx)); } + keyidx = gguf_find_key(ggufctx, "general.license"); + if (keyidx != -1) { fprintf(stdout, "%s: model license = %s\n", __func__, gguf_get_val_str(ggufctx, keyidx)); } + keyidx = gguf_find_key(ggufctx, "general.architecture"); + if (keyidx != -1) { fprintf(stdout, "%s: model architecture = %s\n", __func__, gguf_get_val_str(ggufctx, keyidx)); } + keyidx = gguf_find_key(ggufctx, "general.file_type"); + if (keyidx != -1) { fprintf(stdout, "%s: model file type = %s\n", __func__, gguf_get_val_str(ggufctx, keyidx)); } + keyidx = gguf_find_key(ggufctx, "gptneox.tensor_data_layout"); + if (keyidx != -1) { fprintf(stdout, "%s: model data layout = %s\n", __func__, gguf_get_val_str(ggufctx, keyidx)); } + keyidx = gguf_find_key(ggufctx, "general.source.hugginface.repository"); + if (keyidx != -1) { fprintf(stdout, "%s: model source HF repo = %s\n", __func__, gguf_get_val_str(ggufctx, keyidx)); } + } + + // check required metadata + { + int keyidx; + + // check model architecture kv + keyidx = gguf_find_key(ggufctx, "general.architecture"); + if (keyidx != -1) { + if ( strcmp(gguf_get_val_str(ggufctx, keyidx), "falcon") != 0) { + fprintf(stdout, "%s: model architecture not supported!\n", __func__); + return false; + } + } else { + fprintf(stdout, "%s: gguf model architecture not found!\n", __func__); + return false; + } + + // check model tensor data layout kv + keyidx = gguf_find_key(ggufctx, "falcon.tensor_data_layout"); + if (keyidx != -1) { + if ( strcmp(gguf_get_val_str(ggufctx, keyidx), "jploski") != 0) { + fprintf(stdout, "%s: model tensor data layout not supported!\n", __func__); + return false; + } + } else { + fprintf(stdout, "%s: gguf model tensor data layout not found!\n", __func__); + return false; + } + + } + + // load hparams + { + auto & hparams = model.hparams; + + bool ok = true; + int keyidx; + + if (ok) { keyidx = gguf_find_key(ggufctx, "falcon.context_length"); + if (keyidx != -1) { hparams.n_ctx = gguf_get_val_u32(ggufctx, keyidx); } else { ok = false; } } + + if (ok) { keyidx = gguf_find_key(ggufctx, "falcon.embedding_length"); + if (keyidx != -1) { hparams.n_embd = gguf_get_val_u32(ggufctx, keyidx); } else { ok = false; } } + + if (ok) { keyidx = gguf_find_key(ggufctx, "falcon.attention.head_count"); + if (keyidx != -1) { hparams.n_head = gguf_get_val_u32(ggufctx, keyidx); } else { ok = false; } } + + if (ok) { keyidx = gguf_find_key(ggufctx, "falcon.feed_forward_length"); + if (keyidx != -1) { hparams.n_ff = gguf_get_val_u32(ggufctx, keyidx); } else { ok = false; } } + + if (ok) { keyidx = gguf_find_key(ggufctx, "falcon.block_count"); + if (keyidx != -1) { hparams.n_block = gguf_get_val_u32(ggufctx, keyidx); } else { ok = false; } } + + if (ok) { keyidx = gguf_find_key(ggufctx, "falcon.attention.layer_norm_epsilon"); + if (keyidx != -1) { hparams.norm_eps= gguf_get_val_f32(ggufctx, keyidx); } else { ok = false; } } + + if (!ok) { + fprintf(stderr, "%s: required hparam missing!\n", __func__); + return false; + } + + keyidx = gguf_find_key(ggufctx, "falcon.attention.head_count_kv"); + if (keyidx != -1) { hparams.n_head_kv = gguf_get_val_u32(ggufctx, keyidx); } + + + printf("%s: n_ctx = %d\n", __func__, hparams.n_ctx); + printf("%s: n_embd = %d\n", __func__, hparams.n_embd); + printf("%s: n_head = %d\n", __func__, hparams.n_head); + printf("%s: n_head_kv = %d\n", __func__, hparams.n_head_kv); + printf("%s: n_block = %d\n", __func__, hparams.n_block); + printf("%s: norm_eps = %g\n", __func__, hparams.norm_eps); + + } + + // load vocab + { + auto & hparams = model.hparams; + + int keyidx = gguf_find_key(ggufctx, "tokenizer.ggml.model"); + + if (keyidx != -1) { + if ( strcmp(gguf_get_val_str(ggufctx, keyidx), "gpt2") != 0) { + fprintf(stdout, "%s: tokenizer model not supported!\n", __func__); + return false; + } + } else { + fprintf(stdout, "%s: tokenizer model not found!\n", __func__); + return false; + } + + + int tokens_keyidx = gguf_find_key(ggufctx, "tokenizer.ggml.tokens"); + + if (tokens_keyidx == -1) { + fprintf(stdout, "%s: gpt2 tokenizer vocab not found!\n", __func__); + return false; + } + + int merges_keyidx = gguf_find_key(ggufctx, "tokenizer.ggml.merges"); + + if (merges_keyidx == -1) { + fprintf(stdout, "%s: gpt2 tokenizer merges not found!\n", __func__); + return false; + } + + hparams.n_vocab = gguf_get_arr_n(ggufctx,tokens_keyidx); + hparams.n_merges = gguf_get_arr_n(ggufctx,merges_keyidx); + + fprintf(stdout, "%s: gpt2 tokenizer vocab = %zu\n", __func__, hparams.n_vocab); + fprintf(stdout, "%s: gpt2 tokenizer merges = %zu\n", __func__, hparams.n_merges); + + for (size_t i = 0; i < hparams.n_vocab; i++) { + std::string word = gguf_get_arr_str(ggufctx, tokens_keyidx, i); + +// printf("token %d = '%s'\n",i,word.c_str() ); + + vocab.token_to_id[word] = i; + vocab.id_to_token[i] = word; + + if( vocab.id_to_token[i] == "\n" ) { + vocab.linefeed_id = i; + } + } + + std::vector> bpe_merges; + + for (size_t i = 0; i < hparams.n_merges; i++) { + + std::string word = gguf_get_arr_str(ggufctx, merges_keyidx, i); + + // Split the merges + std::string first, second; + size_t pos = word.find(' ', 1); // Start the search from the second character + if (pos != std::string::npos) { + first = word.substr(0, pos); + second = word.substr(pos + 1); + } + + bpe_merges.push_back(std::make_pair(first, second)); + } + + vocab.populate_bpe_ranks(bpe_merges); + + + keyidx = gguf_find_key(ggufctx, "tokenizer.ggml.bos_token_id"); if( keyidx != -1 ) { vocab.special_bos_id = (int32_t)gguf_get_val_u32(ggufctx, keyidx); } + keyidx = gguf_find_key(ggufctx, "tokenizer.ggml.eos_token_id"); if( keyidx != -1 ) { vocab.special_eos_id = (int32_t)gguf_get_val_u32(ggufctx, keyidx); } + keyidx = gguf_find_key(ggufctx, "tokenizer.ggml.unknown_token_id"); if( keyidx != -1 ) { vocab.special_unk_id = (int32_t)gguf_get_val_u32(ggufctx, keyidx); } + keyidx = gguf_find_key(ggufctx, "tokenizer.ggml.separator_token_id"); if( keyidx != -1 ) { vocab.special_sep_id = (int32_t)gguf_get_val_u32(ggufctx, keyidx); } + keyidx = gguf_find_key(ggufctx, "tokenizer.ggml.padding_token_id"); if( keyidx != -1 ) { vocab.special_pad_id = (int32_t)gguf_get_val_u32(ggufctx, keyidx); } + + if( vocab.special_bos_id != -1 ) { fprintf(stdout, "%s: BOS token = %d '%s'\n", __func__, vocab.special_bos_id, vocab.id_to_token[vocab.special_bos_id].c_str() ); } + if( vocab.special_eos_id != -1 ) { fprintf(stdout, "%s: EOS token = %d '%s'\n", __func__, vocab.special_eos_id, vocab.id_to_token[vocab.special_eos_id].c_str() ); } + if( vocab.special_unk_id != -1 ) { fprintf(stdout, "%s: UNK token = %d '%s'\n", __func__, vocab.special_unk_id, vocab.id_to_token[vocab.special_unk_id].c_str() ); } + if( vocab.special_sep_id != -1 ) { fprintf(stdout, "%s: SEP token = %d '%s'\n", __func__, vocab.special_sep_id, vocab.id_to_token[vocab.special_sep_id].c_str() ); } + if( vocab.special_pad_id != -1 ) { fprintf(stdout, "%s: PAD token = %d '%s'\n", __func__, vocab.special_pad_id, vocab.id_to_token[vocab.special_pad_id].c_str() ); } + if( vocab.linefeed_id != -1 ) { fprintf(stdout, "%s: LF token = %d\n", __func__, vocab.linefeed_id ); } + + } + + + auto & ctx = model.ctx; + size_t ctx_size = ggml_get_mem_size(ctx); + + printf("%s: ggml ctx size = %6.2f MB\n", __func__, ctx_size/(1024.0*1024.0)); + + // print tensor info + #if 0 + { + const int n_tensors = gguf_get_n_tensors(ggufctx); + + fprintf(stdout, "%s: n_tensors: %d\n", __func__, n_tensors); + + for (int i = 0; i < n_tensors; ++i) { + const char * name = gguf_get_tensor_name (ggufctx, i); + const size_t offset = gguf_get_tensor_offset(ggufctx, i); + + fprintf(stdout, "%s: tensor[%d]: name = %s, offset = %zu\n", __func__, i, name, offset); + } + } + #endif + + // prepare memory for the weights + { + + auto & hparams = model.hparams; + + const int n_block = hparams.n_block; + + model.blocks.resize(n_block); + + model.tok_embeddings = ggml_get_tensor(ctx, "token_embd.weight"); + + model.output_norm = ggml_get_tensor(ctx, "output_norm.weight"); + model.output_norm_b = ggml_get_tensor(ctx, "output_norm.bias"); + model.lm_head = ggml_get_tensor(ctx, "output.weight"); + + // map by name + model.tensors["token_embd.weight"] = model.tok_embeddings; + model.tensors["output_norm.weight"] = model.output_norm; + model.tensors["output_norm.bias"] = model.output_norm_b; + model.tensors["output.weight"] = model.lm_head; + + for (int i = 0; i < n_block; ++i) { + + auto& block = model.blocks[i]; + std::string blocknamestart = "blk." + std::to_string(i) + "."; + + block.input_layernorm = get_tensor_ex(ctx, blocknamestart + "attn_norm.weight" ); + block.input_layernorm_b = get_tensor_ex(ctx, blocknamestart + "attn_norm.bias" ); + + if ( hparams.n_head_kv == 8 ) { // Falcon-40B + block.attention_norm = get_tensor_ex(ctx, blocknamestart + "attn_norm_2.weight" ); + block.attention_norm_b = get_tensor_ex(ctx, blocknamestart + "attn_norm_2.bias" ); + } + + // query_key_value shape for config.multi_query == True: + block.query_key_value = get_tensor_ex(ctx, blocknamestart + "attn_qkv.weight" ); + block.wo = get_tensor_ex(ctx, blocknamestart + "attn_output.weight" ); + + block.ffn_up = get_tensor_ex(ctx, blocknamestart + "ffn_up.weight" ); + block.ffn_down = get_tensor_ex(ctx, blocknamestart + "ffn_down.weight" ); + + // map by name + if ( hparams.n_head_kv == 8 ) { // Falcon-40B + // Falcon-40B: + model.tensors[blocknamestart + "attn_norm.weight"] = block.input_layernorm; + model.tensors[blocknamestart + "attn_norm.bias"] = block.input_layernorm_b; + model.tensors[blocknamestart + "attn_norm_2.weight"] = block.attention_norm; + model.tensors[blocknamestart + "attn_norm_2.bias"] = block.attention_norm_b; + } else { + // Falcon-7B: + model.tensors[blocknamestart + "attn_norm.weight"] = block.input_layernorm; + model.tensors[blocknamestart + "attn_norm.bias"] = block.input_layernorm_b; + } + + model.tensors[blocknamestart + "attn_qkv.weight"] = block.query_key_value; + model.tensors[blocknamestart + "attn_output.weight"] = block.wo; + + model.tensors[blocknamestart + "ffn_up.weight"] = block.ffn_up; + model.tensors[blocknamestart + "ffn_down.weight"] = block.ffn_down; + } + } + + // key + value memory + { + const auto & kvctx = model.kvctx; + const auto & hparams = model.hparams; + + const int n_block = hparams.n_block; + const int n_ctx = hparams.n_ctx; + const int n_embd = hparams.n_embd; + + const int64_t n_mem = n_block*n_ctx; + const int64_t n_elements = n_embd*n_mem; + + // create the ggml context + { + struct ggml_init_params params = { + /*.mem_size =*/ size_t(n_elements*4+ggml_tensor_overhead()*2), + /*.mem_buffer =*/ NULL, + /*.no_alloc =*/ false, + }; + + model.kvctx = ggml_init(params); + if (!model.kvctx) { + fprintf(stderr, "%s: kv ggml_init() failed\n", __func__); + return false; + } + + } + + + model.memory_k = ggml_new_tensor_1d(kvctx, GGML_TYPE_F16, n_elements); + model.memory_v = ggml_new_tensor_1d(kvctx, GGML_TYPE_F16, n_elements); + + const size_t memory_size = ggml_nbytes(model.memory_k) + ggml_nbytes(model.memory_v); + + printf("%s: memory_size = %8.2f MB, n_mem = %" PRId64 "\n", __func__, memory_size/1024.0/1024.0, n_mem); + } + + return true; +} + + +// evaluate the transformer +// +// - model: the model +// - n_threads: number of threads to use +// - n_past: the context size so far +// - embd_inp: the embeddings of the tokens in the context +// - embd_w: the predicted logits for the next token +// +bool falcon_eval( + const falcon_model & model, + const int n_threads, + const int n_past, + const std::vector & embd_inp, + std::vector & embd_w, + size_t & mem_per_token) { + + + const int N = embd_inp.size(); + + const auto & hparams = model.hparams; + + const int n_embd = hparams.n_embd; + const int n_block = hparams.n_block; + const int n_ctx = hparams.n_ctx; + const int n_head = hparams.n_head; + const int n_head_kv = hparams.n_head_kv; + const int n_vocab = hparams.n_vocab; + const size_t head_dim = n_embd / n_head; + + static size_t buf_size = 256u*1024*1024; + static void * buf = malloc(buf_size); + + // use 2 scratch buffers + // TODO: very hacky solution - reimplement in a more elegant way + static size_t scr0_size = 256u*1024*1024; + static void * scr0 = malloc(scr0_size); + + static size_t scr1_size = 256u*1024*1024; + static void * scr1 = malloc(scr1_size); + + if (mem_per_token > 0 && mem_per_token*N > buf_size) { + const size_t buf_size_new = 1.1*(mem_per_token*N); // add 10% to account for ggml object overhead + //printf("\n%s: reallocating buffer from %zu to %zu bytes\n", __func__, buf_size, buf_size_new); + + // reallocate + buf_size = buf_size_new; + buf = realloc(buf, buf_size); + if (buf == nullptr) { + fprintf(stderr, "%s: failed to allocate %zu bytes\n", __func__, buf_size); + return false; + } + } + + struct ggml_init_params params = { + /*.mem_size =*/ buf_size, + /*.mem_buffer =*/ buf, + /*.no_alloc =*/ false, + }; + + struct ggml_context * ctx0 = ggml_init(params); + struct ggml_cgraph gf = {}; +// gf.n_threads = n_threads; + + struct ggml_tensor * embd = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, N); + memcpy(embd->data, embd_inp.data(), N*ggml_element_size(embd)); + + // wte + struct ggml_tensor * inpL = ggml_get_rows(ctx0, model.tok_embeddings, embd); +// struct ggml_tensor* repeat_dummy = ggml_new_tensor_3d(ctx0, inpL->type, head_dim, N + n_past, n_head); + + ggml_type wtype = GGML_TYPE_F32; + const int sizeof_wtype = ggml_type_sizef(wtype); + + for (int il = 0; il < n_block; ++il) { + struct ggml_tensor * cur; + struct ggml_tensor * layernorm_output; + + ggml_set_scratch(ctx0, { 0, scr0_size, scr0, }); + + // self-attention + { + layernorm_output = ggml_norm(ctx0, inpL); + + layernorm_output = ggml_add(ctx0, + ggml_mul(ctx0, + ggml_repeat(ctx0, model.blocks[il].input_layernorm, layernorm_output), + layernorm_output), + ggml_repeat(ctx0, model.blocks[il].input_layernorm_b, layernorm_output)); + + if ( hparams.n_head_kv == 8 ) { // Falcon-40B + cur = ggml_norm(ctx0, inpL); + + cur = ggml_add(ctx0, + ggml_mul(ctx0, + ggml_repeat(ctx0, model.blocks[il].attention_norm, cur), + cur), + ggml_repeat(ctx0, model.blocks[il].attention_norm_b, cur)); + } + else { // Falcon 7B + cur = layernorm_output; + } + + // compute QKV + + cur = ggml_mul_mat(ctx0, model.blocks[il].query_key_value, cur); + + // Note that the strides for Kcur, Vcur are set up so that the + // resulting views are misaligned with the tensor's storage + // (by applying the K/V offset we shift the tensor's original + // view to stick out behind the viewed QKV tensor's allocated + // memory, so to say). This is ok because no actual accesses + // happen to that out-of-range memory, but it can require some + // trickery when trying to accurately dump these views for + // debugging. + + struct ggml_tensor * Qcur = ggml_view_3d( + ctx0, cur, head_dim, n_head, N, + head_dim * sizeof_wtype, + head_dim * (n_head + 2 * n_head_kv) * sizeof_wtype, + 0); + + struct ggml_tensor * Kcur = ggml_view_3d( + ctx0, cur, head_dim, n_head_kv, N, + head_dim * sizeof_wtype, + head_dim * (n_head + 2 * n_head_kv) * sizeof_wtype, + head_dim * n_head * sizeof_wtype); + + struct ggml_tensor * Vcur = ggml_view_3d( + ctx0, cur, head_dim, n_head_kv, N, + head_dim * sizeof_wtype, + head_dim * (n_head + 2 * n_head_kv) * sizeof_wtype, + head_dim * (n_head + n_head_kv) * sizeof_wtype); + + // using mode = 2 for neox mode + Qcur = ggml_rope_inplace(ctx0, Qcur, n_past, head_dim, 2, 0); + Kcur = ggml_rope_inplace(ctx0, Kcur, n_past, head_dim, 2, 0); + + // store key and value to memory + { + struct ggml_tensor* k = ggml_view_1d( + ctx0, model.memory_k, N * n_head_kv * head_dim, + (ggml_element_size(model.memory_k) * n_head_kv * head_dim) * + (il * n_ctx + n_past)); + struct ggml_tensor* v = ggml_view_1d( + ctx0, model.memory_v, N * n_head_kv * head_dim, + (ggml_element_size(model.memory_v) * n_head_kv * head_dim) * + (il * n_ctx + n_past)); + + ggml_build_forward_expand(&gf, ggml_cpy(ctx0, Kcur, k)); + ggml_build_forward_expand(&gf, ggml_cpy(ctx0, Vcur, v)); + } + + struct ggml_tensor * K = ggml_permute( + ctx0, + ggml_reshape_3d( + ctx0, + ggml_view_1d(ctx0, model.memory_k, (n_past + N) * n_head_kv * head_dim, + il * n_ctx * + ggml_element_size(model.memory_k) * + n_head_kv * + head_dim), + head_dim, n_head_kv, n_past + N), + 0, 2, 1, 3); + + // K * Q + +// K = ggml_cont(ctx0, ggml_repeat2(ctx0, K, repeat_dummy)); + + struct ggml_tensor * Q = ggml_permute(ctx0, Qcur, 0, 2, 1, 3); + struct ggml_tensor * KQ = ggml_mul_mat(ctx0, K, Q); + + // KQ_scaled = KQ / sqrt(n_embd/n_head) + struct ggml_tensor * KQ_scaled = + ggml_scale_inplace(ctx0, + KQ, + ggml_new_f32(ctx0, 1.0f/sqrt(float(head_dim))) + ); + + // KQ_masked = mask_past(KQ_scaled) + struct ggml_tensor * KQ_masked = ggml_diag_mask_inf_inplace(ctx0, KQ_scaled, n_past); + + // KQ = soft_max(KQ_masked) + struct ggml_tensor * KQ_soft_max = ggml_soft_max_inplace(ctx0, KQ_masked); + + // V_trans = Vmem.view(n_embd/n_head, n_head, n_past + N).permute(1, 2, 0, 3).contiguous() + struct ggml_tensor* V = ggml_permute( + ctx0, + ggml_reshape_3d( + ctx0, + ggml_view_1d(ctx0, model.memory_v, (n_past + N) * n_head_kv * head_dim, + il * n_ctx * + ggml_element_size(model.memory_v) * + n_head_kv * + head_dim), + head_dim, n_head_kv, n_past + N), + 0, 2, 1, 3); + +// V = ggml_cont(ctx0, ggml_transpose(ctx0, ggml_repeat2(ctx0, V, repeat_dummy))); + V = ggml_cont(ctx0, ggml_transpose(ctx0, V)); + + // KQV = transpose(V) * KQ_soft_max + struct ggml_tensor * KQV = ggml_mul_mat(ctx0, V, KQ_soft_max); + + // KQV_merged = KQV.permute(0, 2, 1, 3) + struct ggml_tensor * KQV_merged = ggml_permute(ctx0, KQV, 0, 2, 1, 3); + + // cur = KQV_merged.contiguous().view(n_embd, N) + cur = ggml_cpy(ctx0, + KQV_merged, + ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_embd, N)); + + // projection + { + cur = ggml_mul_mat(ctx0, + model.blocks[il].wo, + cur); + } + } + + ggml_set_scratch(ctx0, { 0, scr1_size, scr1, }); + + struct ggml_tensor* inpFF = layernorm_output; + struct ggml_tensor* attn_out = ggml_cpy( + ctx0, cur, ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_embd, N)); + + { + cur = ggml_mul_mat(ctx0, model.blocks[il].ffn_up, inpFF); + cur = ggml_gelu(ctx0, cur); + cur = ggml_mul_mat(ctx0, model.blocks[il].ffn_down, cur); + } + + cur = ggml_add(ctx0, cur, attn_out); + cur = ggml_add(ctx0, cur, inpL); + // input for next layer + inpL = cur; + } + + ggml_set_scratch(ctx0, { 0, scr0_size, scr0, }); + + // norm + { + inpL = ggml_norm(ctx0, inpL); + + // inpL = ln_f_g*inpL + ln_f_b + inpL = ggml_add(ctx0, + ggml_mul(ctx0, + ggml_repeat(ctx0, model.output_norm, inpL), + inpL), + ggml_repeat(ctx0, model.output_norm_b, inpL)); + } + + ggml_set_scratch(ctx0, { 0, 0, nullptr, }); + + // lm_head + { + inpL = ggml_mul_mat(ctx0, model.lm_head, inpL); + + //inpL = ggml_add(ctx0, + // ggml_repeat(ctx0, model.lmh_b, inpL), + // inpL); + } + + // logits -> probs + //inpL = ggml_soft_max_inplace(ctx0, inpL); + + // run the computation + ggml_build_forward_expand(&gf, inpL); +// ggml_graph_compute (ctx0, &gf); + ggml_graph_compute_with_ctx(ctx0, &gf, n_threads); + + //if (n_past%100 == 0) { + // ggml_graph_print (&gf); + // ggml_graph_dump_dot(&gf, NULL, "gpt-2.dot"); + //} + + // return result for just the last token + embd_w.resize(n_vocab); + memcpy(embd_w.data(), (float *)ggml_get_data(inpL) + (n_vocab * (N - 1)), sizeof(float) * n_vocab); + + if (mem_per_token == 0) { + mem_per_token = ggml_used_mem(ctx0)/N; + } + //printf("used_mem = %zu\n", ggml_used_mem(ctx0)); + + ggml_free(ctx0); + + return true; +} + +int main(int argc, char ** argv) { + ggml_time_init(); + + const int64_t t_main_start_us = ggml_time_us(); + + gpt_params params; + + if (gpt_params_parse(argc, argv, params) == false) { + return 1; + } + + int64_t t_load_us = 0; + + gpt2bpe_vocab vocab; + falcon_model model; + + // load the model + { + const int64_t t_start_us = ggml_time_us(); + + if (!falcon_model_load(params.model, model, vocab)) { + fprintf(stderr, "%s: failed to load model from '%s'\n", __func__, params.model.c_str()); + return 1; + } + + t_load_us = ggml_time_us() - t_start_us; + + } + + if (params.seed < 0) { + params.seed = time(NULL); + } + + if (params.top_k == 0) { + params.top_k = model.hparams.n_vocab; + } + + printf("%s: seed = %d\n", __func__, params.seed); + printf("%s: temp = %.3f\n", __func__, params.temp); + printf("%s: top_k = %d\n", __func__, params.top_k); + printf("%s: top_p = %.3f\n", __func__, params.top_p); + printf("%s: repeat_last_n = %d\n", __func__, params.repeat_last_n); + printf("%s: repeat_penalty = %.3f\n", __func__, params.repeat_penalty); + + std::mt19937 rng(params.seed); + + if (params.prompt.empty()) { + params.prompt = "Once upon"; + } + + std::vector last_n_tokens(model.hparams.n_ctx); + std::fill(last_n_tokens.begin(), last_n_tokens.end(), 0); + + int n_past = 0; + + int64_t t_sample_us = 0; + int64_t t_predict_us = 0; + + std::vector logits; + + // tokenize the prompt + std::vector embd_inp = gpt2bpe_tokenize(vocab, params.prompt,false, false); + + params.n_predict = std::min(params.n_predict, model.hparams.n_ctx - (int) embd_inp.size()); + + printf("%s: number of tokens in prompt = %zu\n", __func__, embd_inp.size()); +// for (size_t i = 0; i < embd_inp.size(); i++) { +// printf("%s: token[%zu] = %6d, %s\n", __func__, i, embd_inp[i], vocab.id_to_token[embd_inp[i]].c_str()); +// } + + if( model.hparams.n_ctx < params.n_predict+embd_inp.size() ) { + params.n_predict = model.hparams.n_ctx-embd_inp.size(); + } + + printf("%s: n_predict = %d\n", __func__, params.n_predict); + printf("\n"); + + std::vector embd; + + // determine the required inference memory per token: + size_t mem_per_token = 0; + falcon_eval(model, params.n_threads, 0, { 0, 1, 2, 3 }, logits, mem_per_token); + + for (size_t i = embd.size(); i < embd_inp.size() + params.n_predict; i++) { + // predict + if (embd.size() > 0) { + const int64_t t_start_us = ggml_time_us(); + + if (!falcon_eval(model, params.n_threads, n_past, embd, logits, mem_per_token)) { + printf("Failed to predict\n"); + return 1; + } + + t_predict_us += ggml_time_us() - t_start_us; + } + + n_past += embd.size(); + embd.clear(); + + if (i >= embd_inp.size()) { + // sample next token + const int top_k = params.top_k; + const float top_p = params.top_p; + const float temp = params.temp; + const int repeat_last_n = params.repeat_last_n; + const float repeat_penalty = params.repeat_penalty; + + const int n_vocab = model.hparams.n_vocab; + + gpt2bpe_vocab::id id = 0; + + { + const int64_t t_start_sample_us = ggml_time_us(); + + id = sample_top_k_top_p_repeat(vocab, logits.data() + (logits.size() - n_vocab), last_n_tokens.data(), last_n_tokens.size(), top_k, top_p, temp, repeat_last_n, repeat_penalty, rng); + + last_n_tokens.erase(last_n_tokens.begin()); + last_n_tokens.push_back(id); + + t_sample_us += ggml_time_us() - t_start_sample_us; + } + + // add it to the context + embd.push_back(id); + } else { + // if here, it means we are still processing the input prompt + for (size_t k = i; k < embd_inp.size(); k++) { + embd.push_back(embd_inp[k]); + if (embd.size() > params.n_batch) { + break; + } + } + i += embd.size() - 1; + } + + // display text + for (auto id : embd) { + printf("%s", vocab.id_to_token[id].c_str() ); + } + fflush(stdout); + + // end of text token + if (vocab.special_eos_id != -1 && embd.back() == vocab.special_eos_id) { + break; + } + } + + // report timing + { + const int64_t t_main_end_us = ggml_time_us(); + + printf("\n\n"); + printf("%s: mem per token = %8zu bytes\n", __func__, mem_per_token); + printf("%s: load time = %8.2f ms\n", __func__, t_load_us/1000.0f); + printf("%s: sample time = %8.2f ms\n", __func__, t_sample_us/1000.0f); + printf("%s: predict time = %8.2f ms / %.2f ms per token\n", __func__, t_predict_us/1000.0f, t_predict_us/1000.0f/n_past); + printf("%s: total time = %8.2f ms\n", __func__, (t_main_end_us - t_main_start_us)/1000.0f); + } + + ggml_free(model.ctx); + + return 0; +} diff --git a/examples/gptneox-wip/gptneox-main.cpp b/examples/gptneox-wip/gptneox-main.cpp new file mode 100644 index 000000000..04af50245 --- /dev/null +++ b/examples/gptneox-wip/gptneox-main.cpp @@ -0,0 +1,1082 @@ +#include "ggml.h" +#include "cmpnct_gpt2bpe.hpp" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#if defined(_MSC_VER) +#pragma warning(disable: 4244 4267) // possible loss of data +#endif + +// default hparams +struct gpt_neox_hparams { + size_t n_merges = 0; + size_t n_vocab = 0; + uint32_t n_ctx = 0; + uint32_t n_embd = 0; + uint32_t n_head = 0; + uint32_t n_block = 0; + uint32_t n_rot = 0; // rotary_pct * (n_embd / n_head) + bool par_res = true; + float norm_eps = 1e-5; +}; + +struct gpt_neox_block { + // pre normalization + struct ggml_tensor * ln_1_g; + struct ggml_tensor * ln_1_b; + + // attention + struct ggml_tensor * c_attn_attn_w; + struct ggml_tensor * c_attn_attn_b; + + struct ggml_tensor * c_attn_proj_w; + struct ggml_tensor * c_attn_proj_b; + + // post normalization + struct ggml_tensor * ln_2_g; + struct ggml_tensor * ln_2_b; + + // ff + struct ggml_tensor * c_mlp_fc_w; + struct ggml_tensor * c_mlp_fc_b; + + struct ggml_tensor * c_mlp_proj_w; + struct ggml_tensor * c_mlp_proj_b; +}; + +struct gpt_neox_model { + gpt_neox_hparams hparams; + + // normalization + struct ggml_tensor * ln_f_g; + struct ggml_tensor * ln_f_b; + + struct ggml_tensor * wte; // position embedding + + struct ggml_tensor * lmh_g; // language model head + + std::vector blocks; + + // key + value memory + struct ggml_tensor * memory_k; + struct ggml_tensor * memory_v; + + // + struct gguf_context * ggufctx; + struct ggml_context * ctx; + struct ggml_context * kvctx; + + std::map tensors; +}; + +struct gpt_params { + int32_t seed = -1; // RNG seed + int32_t n_threads = std::min(4, (int32_t) std::thread::hardware_concurrency()); + uint32_t n_predict = 200; // new tokens to predict + uint32_t n_batch = 512; // batch size for prompt processing + + // sampling parameters + int32_t top_k = 40; + float top_p = 1.0f; + float temp = 0.8f; + int32_t repeat_last_n = 64; + float repeat_penalty = 1.02f; + + std::string model = ""; // model path + std::string prompt = ""; + + std::string token_test = ""; + bool interactive = false; + int32_t interactive_port = -1; + int32_t n_gpu_layers = 0; +}; + +void gpt_print_usage(int /*argc*/, char ** argv, const gpt_params & params) { + fprintf(stderr, "usage: %s [options]\n", argv[0]); + fprintf(stderr, "\n"); + fprintf(stderr, "options:\n"); + fprintf(stderr, " -h, --help show this help message and exit\n"); + fprintf(stderr, " -s SEED, --seed SEED RNG seed (default: -1)\n"); + fprintf(stderr, " -t N, --threads N number of threads to use during computation (default: %d)\n", params.n_threads); + fprintf(stderr, " -ngl N, --gpu-layers N number of layers to offload to GPU on supported models (default: %d)\n", params.n_gpu_layers); + fprintf(stderr, " -p PROMPT, --prompt PROMPT\n"); + fprintf(stderr, " prompt to start generation with (default: random)\n"); + fprintf(stderr, " -f FNAME, --file FNAME\n"); + fprintf(stderr, " load prompt from a file\n"); + fprintf(stderr, " -tt TOKEN_TEST, --token_test TOKEN_TEST\n"); + fprintf(stderr, " test tokenization\n"); + fprintf(stderr, " -n N, --n_predict N number of tokens to predict (default: %d)\n", params.n_predict); + fprintf(stderr, " --top_k N top-k sampling, 0 = n_vocab (default: %d)\n", params.top_k); + fprintf(stderr, " --top_p N top-p sampling (default: %.1f)\n", params.top_p); + fprintf(stderr, " --temp N temperature (default: %.1f)\n", params.temp); + fprintf(stderr, " --repeat-last-n N last n tokens to consider for penalize (default: %d, 0 = disabled)\n", params.repeat_last_n); + fprintf(stderr, " --repeat-penalty N penalize repeat sequence of tokens (default: %.2f, 1.0 = disabled)\n", (double)params.repeat_penalty); + fprintf(stderr, " -b N, --batch_size N batch size for prompt processing (default: %d)\n", params.n_batch); + fprintf(stderr, " -m FNAME, --model FNAME\n"); + fprintf(stderr, " model path (default: %s)\n", params.model.c_str()); + fprintf(stderr, "\n"); +} + +// Function to check if the next argument exists +std::string get_next_arg(int& i, int argc, char** argv, const std::string& flag, gpt_params& params) { + if (i + 1 < argc && argv[i + 1][0] != '-') { + return argv[++i]; + } else { + fprintf(stderr, "error: %s requires one argument.\n", flag.c_str()); + gpt_print_usage(argc, argv, params); + exit(0); + } +} + +bool gpt_params_parse(int argc, char ** argv, gpt_params & params) { + for (int i = 1; i < argc; i++) { + std::string arg = argv[i]; + + if (arg == "-s" || arg == "--seed") { + params.seed = std::stoi(get_next_arg(i, argc, argv, arg, params)); + } else if (arg == "-t" || arg == "--threads") { + params.n_threads = std::stoi(get_next_arg(i, argc, argv, arg, params)); + } else if (arg == "-ngl" || arg == "--gpu-layers" || arg == "--n-gpu-layers") { + params.n_gpu_layers = std::stoi(get_next_arg(i, argc, argv, arg, params)); + } else if (arg == "-p" || arg == "--prompt") { + params.prompt = get_next_arg(i, argc, argv, arg, params); + } else if (arg == "-n" || arg == "--n_predict") { + params.n_predict = std::stoi(get_next_arg(i, argc, argv, arg, params)); + } else if (arg == "--top_k") { + params.top_k = std::stoi(get_next_arg(i, argc, argv, arg, params)); + } else if (arg == "--top_p") { + params.top_p = std::stof(get_next_arg(i, argc, argv, arg, params)); + } else if (arg == "--temp") { + params.temp = std::stof(get_next_arg(i, argc, argv, arg, params)); + } else if (arg == "--repeat-last-n") { + params.repeat_last_n = std::stoi(get_next_arg(i, argc, argv, arg, params)); + } else if (arg == "--repeat-penalty") { + params.repeat_penalty = std::stof(get_next_arg(i, argc, argv, arg, params)); + } else if (arg == "-b" || arg == "--batch_size") { + params.n_batch= std::stoi(get_next_arg(i, argc, argv, arg, params)); + } else if (arg == "-m" || arg == "--model") { + params.model = get_next_arg(i, argc, argv, arg, params); + } else if (arg == "-i" || arg == "--interactive") { + params.interactive = true; + } else if (arg == "-ip" || arg == "--interactive-port") { + params.interactive = true; + params.interactive_port = std::stoi(get_next_arg(i, argc, argv, arg, params)); + } else if (arg == "-h" || arg == "--help") { + gpt_print_usage(argc, argv, params); + exit(0); + } else if (arg == "-f" || arg == "--file") { + get_next_arg(i, argc, argv, arg, params); + std::ifstream file(argv[i]); + if (!file) { + fprintf(stderr, "error: failed to open file '%s'\n", argv[i]); + break; + } + std::copy(std::istreambuf_iterator(file), std::istreambuf_iterator(), back_inserter(params.prompt)); + if (params.prompt.back() == '\n') { + params.prompt.pop_back(); + } + } else if (arg == "-tt" || arg == "--token_test") { + params.token_test = get_next_arg(i, argc, argv, arg, params); + } + else { + fprintf(stderr, "error: unknown argument: %s\n", arg.c_str()); + gpt_print_usage(argc, argv, params); + exit(0); + } + } + + return true; +} + +gpt2bpe_vocab::id sample_top_k_top_p_repeat( + const gpt2bpe_vocab & vocab, + const float * logits, + const int32_t * last_n_tokens_data, + size_t last_n_tokens_data_size, + int top_k, + double top_p, + double temp, + int repeat_last_n, + float repeat_penalty, + std::mt19937 & rng) { + + int n_logits = vocab.id_to_token.size(); + + const auto * plogits = logits; + + const auto last_n_tokens = std::vector(last_n_tokens_data, last_n_tokens_data + last_n_tokens_data_size); + + if (temp <= 0) { + // select the token with the highest logit directly + float max_logit = plogits[0]; + gpt2bpe_vocab::id max_id = 0; + + for (int i = 1; i < n_logits; ++i) { + if (plogits[i] > max_logit) { + max_logit = plogits[i]; + max_id = i; + } + } + return max_id; + } + + + std::vector> logits_id; + logits_id.reserve(n_logits); + + { + const float scale = 1.0f/temp; + for (int i = 0; i < n_logits; ++i) { + // repetition penalty from ctrl paper (https://arxiv.org/abs/1909.05858) + // credit https://github.com/facebookresearch/llama/compare/main...shawwn:llama:main + if (repeat_last_n > 0 && std::find(last_n_tokens.end()-repeat_last_n, last_n_tokens.end(), i) != last_n_tokens.end()) { + // if score < 0 then repetition penalty has to multiplied to reduce the previous token probability + if (plogits[i] < 0.0f) { + logits_id.push_back(std::make_pair(plogits[i]*scale*repeat_penalty, i)); + } else { + logits_id.push_back(std::make_pair(plogits[i]*scale/repeat_penalty, i)); + } + } else { + logits_id.push_back(std::make_pair(plogits[i]*scale, i)); + } + } + } + + // find the top K tokens + std::partial_sort( + logits_id.begin(), + logits_id.begin() + top_k, logits_id.end(), + [](const std::pair & a, const std::pair & b) { + return a.first > b.first; + }); + + logits_id.resize(top_k); + + double maxl = -INFINITY; + for (const auto & kv : logits_id) { + maxl = std::max(maxl, kv.first); + } + + // compute probs for the top K tokens + std::vector probs; + probs.reserve(logits_id.size()); + + double sum = 0.0; + for (const auto & kv : logits_id) { + double p = exp(kv.first - maxl); + probs.push_back(p); + sum += p; + } + + // normalize the probs + for (auto & p : probs) { + p /= sum; + } + + if (top_p < 1.0f) { + double cumsum = 0.0f; + for (int i = 0; i < top_k; i++) { + cumsum += probs[i]; + if (cumsum >= top_p) { + top_k = i + 1; + probs.resize(top_k); + logits_id.resize(top_k); + break; + } + } + + cumsum = 1.0/cumsum; + for (int i = 0; i < (int) probs.size(); i++) { + probs[i] *= cumsum; + } + } + +// printf("\n"); +// for (int i = 0; i < (int) probs.size(); i++) { +// for (int i = 0; i < 10; i++) { +// printf("%d: '%s' %f\n", i, vocab.id_to_token.at(logits_id[i].second).c_str(), probs[i]); +// } + + std::discrete_distribution<> dist(probs.begin(), probs.end()); + int idx = dist(rng); + + return logits_id[idx].second; + +} + +struct ggml_tensor * get_tensor_ex( struct ggml_context * ctx, std::string name){ + + struct ggml_tensor * cur = ggml_get_tensor(ctx, name.c_str()); + if( cur == NULL ) { + fprintf(stdout, "%s: tensor '%s' not found!\n", __func__, name.c_str()); + } else { +// fprintf(stdout, "%s: n_dims = %d, name = '%s'\n", __func__, cur->n_dims, cur->name); + } + + return cur; +} + +// load the model's weights from a file +bool gpt_neox_model_load(const std::string & fname, gpt_neox_model & model, gpt2bpe_vocab & vocab) { + printf("%s: loading model from '%s'..\n", __func__, fname.c_str()); + + model.ctx = NULL; + + struct gguf_init_params ggufparams = { + /*.no_alloc = */ false, + /*.ctx = */ &model.ctx, + }; + + auto & ggufctx = model.ggufctx; + + ggufctx = gguf_init_from_file(fname.c_str(), ggufparams); + + if (!ggufctx) { + fprintf(stderr, "%s: gguf_init_from_file() failed\n", __func__); + return false; + } + + fprintf(stdout, "%s: gguf version = %d\n", __func__, gguf_get_version(ggufctx)); + fprintf(stdout, "%s: gguf alignment = %zu\n", __func__, gguf_get_alignment(ggufctx)); + fprintf(stdout, "%s: gguf data offset = %zu\n", __func__, gguf_get_data_offset(ggufctx)); + + // print all kv + #if 0 + { + const int n_kv = gguf_get_n_kv(ggufctx); + + fprintf(stdout, "%s: n_kv: %d\n", __func__, n_kv); + + for (int i = 0; i < n_kv; ++i) { + const char * key = gguf_get_key(ggufctx, i); + + fprintf(stdout, "%s: kv[%d]: key = %s\n", __func__, i, key); + } + } + #endif + + // print some standard metadata + { + int keyidx; + + keyidx = gguf_find_key(ggufctx, "general.name"); + if (keyidx != -1) { fprintf(stdout, "%s: model name = %s\n", __func__, gguf_get_val_str(ggufctx, keyidx)); } + keyidx = gguf_find_key(ggufctx, "general.description"); + if (keyidx != -1) { fprintf(stdout, "%s: model description = %s\n", __func__, gguf_get_val_str(ggufctx, keyidx)); } + keyidx = gguf_find_key(ggufctx, "general.author"); + if (keyidx != -1) { fprintf(stdout, "%s: model author = %s\n", __func__, gguf_get_val_str(ggufctx, keyidx)); } + keyidx = gguf_find_key(ggufctx, "general.license"); + if (keyidx != -1) { fprintf(stdout, "%s: model license = %s\n", __func__, gguf_get_val_str(ggufctx, keyidx)); } + keyidx = gguf_find_key(ggufctx, "general.architecture"); + if (keyidx != -1) { fprintf(stdout, "%s: model architecture = %s\n", __func__, gguf_get_val_str(ggufctx, keyidx)); } + keyidx = gguf_find_key(ggufctx, "general.file_type"); + if (keyidx != -1) { fprintf(stdout, "%s: model file type = %s\n", __func__, gguf_get_val_str(ggufctx, keyidx)); } + keyidx = gguf_find_key(ggufctx, "gptneox.tensor_data_layout"); + if (keyidx != -1) { fprintf(stdout, "%s: model data layout = %s\n", __func__, gguf_get_val_str(ggufctx, keyidx)); } + keyidx = gguf_find_key(ggufctx, "general.source.hugginface.repository"); + if (keyidx != -1) { fprintf(stdout, "%s: model source HF repo = %s\n", __func__, gguf_get_val_str(ggufctx, keyidx)); } + } + + // check required metadata + { + int keyidx; + + // check model architecture kv + keyidx = gguf_find_key(ggufctx, "general.architecture"); + if (keyidx != -1) { + if ( strcmp(gguf_get_val_str(ggufctx, keyidx), "gptneox") != 0) { + fprintf(stdout, "%s: model architecture not supported!\n", __func__); + return false; + } + } else { + fprintf(stdout, "%s: gguf model architecture not found!\n", __func__); + return false; + } + + } + + // load hparams + { + auto & hparams = model.hparams; + + bool ok = true; + int keyidx; + + if (ok) { keyidx = gguf_find_key(ggufctx, "gptneox.context_length"); + if (keyidx != -1) { hparams.n_ctx = gguf_get_val_u32(ggufctx, keyidx); } else { ok = false; } } + + if (ok) { keyidx = gguf_find_key(ggufctx, "gptneox.embedding_length"); + if (keyidx != -1) { hparams.n_embd = gguf_get_val_u32(ggufctx, keyidx); } else { ok = false; } } + + if (ok) { keyidx = gguf_find_key(ggufctx, "gptneox.attention.head_count"); + if (keyidx != -1) { hparams.n_head = gguf_get_val_u32(ggufctx, keyidx); } else { ok = false; } } + + if (ok) { keyidx = gguf_find_key(ggufctx, "gptneox.block_count"); + if (keyidx != -1) { hparams.n_block = gguf_get_val_u32(ggufctx, keyidx); } else { ok = false; } } + + if (ok) { keyidx = gguf_find_key(ggufctx, "gptneox.rope.dimension_count"); + if (keyidx != -1) { hparams.n_rot = gguf_get_val_u32(ggufctx, keyidx); } else { ok = false; } } + + if (ok) { keyidx = gguf_find_key(ggufctx, "gptneox.use_parallel_residual"); + if (keyidx != -1) { hparams.par_res = gguf_get_val_bool(ggufctx, keyidx); } else { ok = false; } } + + if (ok) { keyidx = gguf_find_key(ggufctx, "gptneox.attention.layer_norm_epsilon"); + if (keyidx != -1) { hparams.norm_eps= gguf_get_val_f32(ggufctx, keyidx); } else { ok = false; } } + + if (!ok) { + fprintf(stderr, "%s: required hparam missing!\n", __func__); + return false; + } + + printf("%s: n_ctx = %d\n", __func__, hparams.n_ctx); + printf("%s: n_embd = %d\n", __func__, hparams.n_embd); + printf("%s: n_head = %d\n", __func__, hparams.n_head); + printf("%s: n_block = %d\n", __func__, hparams.n_block); + printf("%s: n_rot = %d\n", __func__, hparams.n_rot); + printf("%s: par_res = %d\n", __func__, hparams.par_res); + printf("%s: norm_eps = %g\n", __func__, hparams.norm_eps); + + } + + // load vocab + { + auto & hparams = model.hparams; + + int keyidx = gguf_find_key(ggufctx, "tokenizer.ggml.model"); + + if (keyidx != -1) { + if ( strcmp(gguf_get_val_str(ggufctx, keyidx), "gpt2") != 0) { + fprintf(stdout, "%s: tokenizer model not supported!\n", __func__); + return false; + } + } else { + fprintf(stdout, "%s: tokenizer model not found!\n", __func__); + return false; + } + + + int tokens_keyidx = gguf_find_key(ggufctx, "tokenizer.ggml.tokens"); + + if (tokens_keyidx == -1) { + fprintf(stdout, "%s: gpt2 tokenizer vocab not found!\n", __func__); + return false; + } + + int merges_keyidx = gguf_find_key(ggufctx, "tokenizer.ggml.merges"); + + if (merges_keyidx == -1) { + fprintf(stdout, "%s: gpt2 tokenizer merges not found!\n", __func__); + return false; + } + + hparams.n_vocab = gguf_get_arr_n(ggufctx,tokens_keyidx); + hparams.n_merges = gguf_get_arr_n(ggufctx,merges_keyidx); + + fprintf(stdout, "%s: gpt2 tokenizer vocab = %zu\n", __func__, hparams.n_vocab); + fprintf(stdout, "%s: gpt2 tokenizer merges = %zu\n", __func__, hparams.n_merges); + + for (size_t i = 0; i < hparams.n_vocab; i++) { + std::string word = gguf_get_arr_str(ggufctx, tokens_keyidx, i); + +// printf("token %d = '%s'\n",i,word.c_str() ); + + vocab.token_to_id[word] = i; + vocab.id_to_token[i] = word; + + if( vocab.id_to_token[i] == "\n" ) { + vocab.linefeed_id = i; + } + } + + std::vector> bpe_merges; + + for (size_t i = 0; i < hparams.n_merges; i++) { + + std::string word = gguf_get_arr_str(ggufctx, merges_keyidx, i); + + // Split the merges + std::string first, second; + size_t pos = word.find(' ', 1); // Start the search from the second character + if (pos != std::string::npos) { + first = word.substr(0, pos); + second = word.substr(pos + 1); + } + + bpe_merges.push_back(std::make_pair(first, second)); + } + + vocab.populate_bpe_ranks(bpe_merges); + + + keyidx = gguf_find_key(ggufctx, "tokenizer.ggml.bos_token_id"); if( keyidx != -1 ) { vocab.special_bos_id = (int32_t)gguf_get_val_u32(ggufctx, keyidx); } + keyidx = gguf_find_key(ggufctx, "tokenizer.ggml.eos_token_id"); if( keyidx != -1 ) { vocab.special_eos_id = (int32_t)gguf_get_val_u32(ggufctx, keyidx); } + keyidx = gguf_find_key(ggufctx, "tokenizer.ggml.unknown_token_id"); if( keyidx != -1 ) { vocab.special_unk_id = (int32_t)gguf_get_val_u32(ggufctx, keyidx); } + keyidx = gguf_find_key(ggufctx, "tokenizer.ggml.separator_token_id"); if( keyidx != -1 ) { vocab.special_sep_id = (int32_t)gguf_get_val_u32(ggufctx, keyidx); } + keyidx = gguf_find_key(ggufctx, "tokenizer.ggml.padding_token_id"); if( keyidx != -1 ) { vocab.special_pad_id = (int32_t)gguf_get_val_u32(ggufctx, keyidx); } + + if( vocab.special_bos_id != -1 ) { fprintf(stdout, "%s: BOS token = %d '%s'\n", __func__, vocab.special_bos_id, vocab.id_to_token[vocab.special_bos_id].c_str() ); } + if( vocab.special_eos_id != -1 ) { fprintf(stdout, "%s: EOS token = %d '%s'\n", __func__, vocab.special_eos_id, vocab.id_to_token[vocab.special_eos_id].c_str() ); } + if( vocab.special_unk_id != -1 ) { fprintf(stdout, "%s: UNK token = %d '%s'\n", __func__, vocab.special_unk_id, vocab.id_to_token[vocab.special_unk_id].c_str() ); } + if( vocab.special_sep_id != -1 ) { fprintf(stdout, "%s: SEP token = %d '%s'\n", __func__, vocab.special_sep_id, vocab.id_to_token[vocab.special_sep_id].c_str() ); } + if( vocab.special_pad_id != -1 ) { fprintf(stdout, "%s: PAD token = %d '%s'\n", __func__, vocab.special_pad_id, vocab.id_to_token[vocab.special_pad_id].c_str() ); } + if( vocab.linefeed_id != -1 ) { fprintf(stdout, "%s: LF token = %d\n", __func__, vocab.linefeed_id ); } + } + + + auto & ctx = model.ctx; + size_t ctx_size = ggml_get_mem_size(ctx); + + printf("%s: ggml ctx size = %6.2f MB\n", __func__, ctx_size/(1024.0*1024.0)); + + // print tensor info + #if 0 + { + const int n_tensors = gguf_get_n_tensors(ggufctx); + + fprintf(stdout, "%s: n_tensors: %d\n", __func__, n_tensors); + + for (int i = 0; i < n_tensors; ++i) { + const char * name = gguf_get_tensor_name (ggufctx, i); + const size_t offset = gguf_get_tensor_offset(ggufctx, i); + + fprintf(stdout, "%s: tensor[%d]: name = %s, offset = %zu\n", __func__, i, name, offset); + } + } + #endif + + // prepare memory for the weights + { + const int n_block = model.hparams.n_block; + + model.blocks.resize(n_block); + + model.wte = ggml_get_tensor(ctx, "token_embd.weight"); + model.ln_f_g = ggml_get_tensor(ctx, "output_norm.weight"); + model.ln_f_b = ggml_get_tensor(ctx, "output_norm.bias"); + model.lmh_g = ggml_get_tensor(ctx, "output.weight"); + + // map by name + model.tensors["token_embd.weight"] = model.wte; + model.tensors["output_norm.weight"] = model.ln_f_g; + model.tensors["output_norm.bias"] = model.ln_f_b; + model.tensors["output.weight"] = model.lmh_g; + + for (int i = 0; i < n_block; ++i) { + auto & block = model.blocks[i]; + + std::string blocknamestart = "blk." + std::to_string(i) + "."; + + block.ln_1_g = get_tensor_ex(ctx, blocknamestart + "attn_norm.weight" ); + block.ln_1_b = get_tensor_ex(ctx, blocknamestart + "attn_norm.bias" ); + + block.c_attn_attn_w = get_tensor_ex(ctx, blocknamestart + "attn_qkv.weight" ); + block.c_attn_attn_b = get_tensor_ex(ctx ,blocknamestart + "attn_qkv.bias" ); + + block.c_attn_proj_w = get_tensor_ex(ctx, blocknamestart + "attn_output.weight" ); + block.c_attn_proj_b = get_tensor_ex(ctx, blocknamestart + "attn_output.bias" ); + + block.ln_2_g = get_tensor_ex(ctx, blocknamestart + "ffn_norm.weight" ); + block.ln_2_b = get_tensor_ex(ctx, blocknamestart + "ffn_norm.bias"); + + block.c_mlp_fc_w = get_tensor_ex(ctx, blocknamestart + "ffn_up.weight" ); + block.c_mlp_fc_b = get_tensor_ex(ctx, blocknamestart + "ffn_up.bias" ); + + block.c_mlp_proj_w = get_tensor_ex(ctx, blocknamestart + "ffn_down.weight" ); + block.c_mlp_proj_b = get_tensor_ex(ctx, blocknamestart + "ffn_down.bias" ); + + // map by name + model.tensors[blocknamestart + "attn_norm.weight"] = block.ln_1_g; + model.tensors[blocknamestart + "attn_norm.bias"] = block.ln_1_b; + + model.tensors[blocknamestart + "attn_qkv.weight"] = block.c_attn_attn_w; + model.tensors[blocknamestart + "attn_qkv.bias"] = block.c_attn_attn_b; + + model.tensors[blocknamestart + "attn_output.weight"] = block.c_attn_proj_w; + model.tensors[blocknamestart + "attn_output.bias"] = block.c_attn_proj_b; + + model.tensors[blocknamestart + "ffn_norm.weight"] = block.ln_2_g; + model.tensors[blocknamestart + "ffn_norm.bias"] = block.ln_2_b; + + model.tensors[blocknamestart + "ffn_up.weight"] = block.c_mlp_fc_w; + model.tensors[blocknamestart + "ffn_up.bias"] = block.c_mlp_fc_b; + + model.tensors[blocknamestart + "ffn_down.weight"] = block.c_mlp_proj_w; + model.tensors[blocknamestart + "ffn_down.bias"] = block.c_mlp_proj_b; + } + } + + // key + value memory + { + const auto & kvctx = model.kvctx; + const auto & hparams = model.hparams; + + const int n_embd = hparams.n_embd; + const int n_block = hparams.n_block; + const int n_ctx = hparams.n_ctx; + + const int64_t n_mem = n_block*n_ctx; + const int64_t n_elements = n_embd*n_mem; + + // create the ggml context + { + struct ggml_init_params params = { + /*.mem_size =*/ size_t(n_elements*4+ggml_tensor_overhead()*2), + /*.mem_buffer =*/ NULL, + /*.no_alloc =*/ false, + }; + + model.kvctx = ggml_init(params); + if (!model.kvctx) { + fprintf(stderr, "%s: kv ggml_init() failed\n", __func__); + return false; + } + + } + + + model.memory_k = ggml_new_tensor_1d(kvctx, GGML_TYPE_F16, n_elements); + model.memory_v = ggml_new_tensor_1d(kvctx, GGML_TYPE_F16, n_elements); + + const size_t memory_size = ggml_nbytes(model.memory_k) + ggml_nbytes(model.memory_v); + + printf("%s: memory_size = %8.2f MB, n_mem = %" PRId64 "\n", __func__, memory_size/1024.0/1024.0, n_mem); + } + + return true; +} + + +// feed-forward network +ggml_tensor * gpt_neox_ff( + const gpt_neox_block &block, + ggml_context * ctx0, + ggml_tensor * inp) { + + ggml_tensor * cur = ggml_norm(ctx0, inp); + + cur = ggml_add(ctx0, ggml_mul(ctx0, ggml_repeat(ctx0, block.ln_2_g, cur), cur), ggml_repeat(ctx0, block.ln_2_b, cur)); + cur = ggml_mul_mat(ctx0, block.c_mlp_fc_w, cur); + cur = ggml_add(ctx0, ggml_repeat(ctx0, block.c_mlp_fc_b, cur), cur); + + // GELU activation + cur = ggml_gelu(ctx0, cur); + + // projection + // cur = proj_w*cur + proj_b + cur = ggml_mul_mat(ctx0, block.c_mlp_proj_w, cur); + + cur = ggml_add(ctx0, ggml_repeat(ctx0, block.c_mlp_proj_b, cur), cur); + return cur; +} + +// evaluate the transformer +// +// - model: the model +// - n_threads: number of threads to use +// - n_past: the context size so far +// - embd_inp: the embeddings of the tokens in the context +// - embd_w: the predicted logits for the next token +// +bool gpt_neox_eval( + const gpt_neox_model & model, + const int n_threads, + const int n_past, + const std::vector & embd_inp, + std::vector & embd_w, + size_t & mem_per_token) { + const int N = embd_inp.size(); + + const auto & hparams = model.hparams; + + const int n_embd = hparams.n_embd; + const int n_block = hparams.n_block; + const int n_ctx = hparams.n_ctx; + const int n_head = hparams.n_head; + const int n_vocab = hparams.n_vocab; + const int n_rot = hparams.n_rot; + + static size_t buf_size = 256u*1024*1024; + static void * buf = malloc(buf_size); + + // use 2 scratch buffers + // TODO: very hacky solution - reimplement in a more elegant way + static size_t scr0_size = 256u*1024*1024; + static void * scr0 = malloc(scr0_size); + + static size_t scr1_size = 256u*1024*1024; + static void * scr1 = malloc(scr1_size); + + if (mem_per_token > 0 && mem_per_token*N > buf_size) { + const size_t buf_size_new = 1.1*(mem_per_token*N); // add 10% to account for ggml object overhead + //printf("\n%s: reallocating buffer from %zu to %zu bytes\n", __func__, buf_size, buf_size_new); + + // reallocate + buf_size = buf_size_new; + buf = realloc(buf, buf_size); + if (buf == nullptr) { + fprintf(stderr, "%s: failed to allocate %zu bytes\n", __func__, buf_size); + return false; + } + } + + struct ggml_init_params params = { + /*.mem_size =*/ buf_size, + /*.mem_buffer =*/ buf, + /*.no_alloc =*/ false, + }; + + struct ggml_context * ctx0 = ggml_init(params); + struct ggml_cgraph gf = {}; + + struct ggml_tensor * embd = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, N); + memcpy(embd->data, embd_inp.data(), N*ggml_element_size(embd)); + + + // wte + struct ggml_tensor * inpL = ggml_get_rows(ctx0, model.wte, embd); + + for (int il = 0; il < n_block; ++il) { + struct ggml_tensor * cur; + + ggml_set_scratch(ctx0, { 0, scr0_size, scr0, }); + + // self-attention + { + { + cur = ggml_norm(ctx0, inpL); + + cur = ggml_add(ctx0, + ggml_mul(ctx0, ggml_repeat(ctx0, model.blocks[il].ln_1_g, cur), cur), + ggml_repeat(ctx0, model.blocks[il].ln_1_b, cur)); + } + + // compute QKV + { + + cur = ggml_mul_mat(ctx0, model.blocks[il].c_attn_attn_w, cur); + cur = ggml_add(ctx0, ggml_repeat(ctx0, model.blocks[il].c_attn_attn_b, cur), cur); + } + + struct ggml_tensor * Qcur = ggml_cont(ctx0, ggml_view_3d(ctx0, cur, n_embd/n_head, n_head, N, cur->nb[1]/n_head, cur->nb[1], 0*sizeof(float)*n_embd/n_head)); + struct ggml_tensor * Kcur = ggml_cont(ctx0, ggml_view_3d(ctx0, cur, n_embd/n_head, n_head, N, cur->nb[1]/n_head, cur->nb[1], 1*sizeof(float)*n_embd/n_head)); + struct ggml_tensor * Vcur = ggml_cont(ctx0, ggml_view_3d(ctx0, cur, n_embd/n_head, n_head, N, cur->nb[1]/n_head, cur->nb[1], 2*sizeof(float)*n_embd/n_head)); + + // using mode = 2 for GPT-NeoX mode + Qcur = ggml_rope_inplace(ctx0, Qcur, n_past, n_rot, 2, 0); + Kcur = ggml_rope_inplace(ctx0, Kcur, n_past, n_rot, 2, 0); + + // store key and value to memory + { + Vcur = ggml_transpose(ctx0, ggml_reshape_2d(ctx0, Vcur, n_embd, N)); + + struct ggml_tensor * k = ggml_view_1d(ctx0, model.memory_k, N*n_embd, (ggml_element_size(model.memory_k)*n_embd)*(il*n_ctx + n_past)); + struct ggml_tensor * v = ggml_view_2d(ctx0, model.memory_v, N, n_embd, + ( n_ctx)*ggml_element_size(model.memory_v), + (il*n_ctx)*ggml_element_size(model.memory_v)*n_embd + n_past*ggml_element_size(model.memory_v)); + + ggml_build_forward_expand(&gf, ggml_cpy(ctx0, Kcur, k)); + ggml_build_forward_expand(&gf, ggml_cpy(ctx0, Vcur, v)); + } + + // Q = Qcur.contiguous().view(n_embd/n_head, n_head, N).permute(0, 2, 1, 3) + struct ggml_tensor * Q = ggml_permute(ctx0, Qcur, 0, 2, 1, 3); + + // K = Kmem.view(n_embd/n_head, n_head, n_past + N).permute(0, 2, 1, 3) + struct ggml_tensor * K = + ggml_permute(ctx0, + ggml_reshape_3d(ctx0, + ggml_view_1d(ctx0, model.memory_k, (n_past + N)*n_embd, il*n_ctx*ggml_element_size(model.memory_k)*n_embd), + n_embd/n_head, n_head, n_past + N), + 0, 2, 1, 3); + + // K * Q + struct ggml_tensor * KQ = ggml_mul_mat(ctx0, K, Q); + + // KQ_scaled = KQ / sqrt(n_embd/n_head) + struct ggml_tensor * KQ_scaled = + ggml_scale_inplace(ctx0, + KQ, + ggml_new_f32(ctx0, 1.0f/sqrt(float(n_embd)/n_head)) + ); + + // KQ_masked = mask_past(KQ_scaled) + struct ggml_tensor * KQ_masked = ggml_diag_mask_inf_inplace(ctx0, KQ_scaled, n_past); + + // KQ = soft_max(KQ_masked) + struct ggml_tensor * KQ_soft_max = ggml_soft_max_inplace(ctx0, KQ_masked); + + // V_trans = Vmem.view(n_embd/n_head, n_head, n_past + N).permute(1, 2, 0, 3).contiguous() + struct ggml_tensor * V = + ggml_view_3d(ctx0, model.memory_v, + n_past + N, n_embd/n_head, n_head, + n_ctx*ggml_element_size(model.memory_v), + n_ctx*ggml_element_size(model.memory_v)*n_embd/n_head, + il*n_ctx*ggml_element_size(model.memory_v)*n_embd); + + // KQV = transpose(V) * KQ_soft_max + struct ggml_tensor * KQV = ggml_mul_mat(ctx0, V, KQ_soft_max); + + // KQV_merged = KQV.permute(0, 2, 1, 3) + struct ggml_tensor * KQV_merged = ggml_permute(ctx0, KQV, 0, 2, 1, 3); + + // cur = KQV_merged.contiguous().view(n_embd, N) + cur = ggml_cpy(ctx0, KQV_merged, ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_embd, N)); + + // projection + { + cur = ggml_mul_mat(ctx0, model.blocks[il].c_attn_proj_w, cur); + cur = ggml_add(ctx0, ggml_repeat(ctx0, model.blocks[il].c_attn_proj_b, cur), cur); + } + } + + ggml_set_scratch(ctx0, { 0, scr1_size, scr1, }); + + if (hparams.par_res == 0) { + struct ggml_tensor * inpFF = ggml_add(ctx0, cur, inpL); + + cur = gpt_neox_ff(model.blocks[il], ctx0, inpFF); + + // input for next layer + inpL = ggml_add(ctx0, cur, inpFF); + } else { + struct ggml_tensor * inpFF = cur; + + // this is independent of the self-attention result, so it could be done in parallel to the self-attention + // note here we pass inpL instead of cur + cur = gpt_neox_ff(model.blocks[il], ctx0, inpL); + + // layer input + FF + cur = ggml_add(ctx0, cur, inpFF); + + // input for next layer + inpL = ggml_add(ctx0, cur, inpL); + } + } + + ggml_set_scratch(ctx0, { 0, scr0_size, scr0, }); + + // norm + { + inpL = ggml_norm(ctx0, inpL); + + // inpL = ln_f_g*inpL + ln_f_b + inpL = ggml_add(ctx0, + ggml_mul(ctx0, + ggml_repeat(ctx0, model.ln_f_g, inpL), + inpL), + ggml_repeat(ctx0, model.ln_f_b, inpL)); + } + + ggml_set_scratch(ctx0, { 0, 0, nullptr, }); + + // lm_head + { + inpL = ggml_mul_mat(ctx0, model.lmh_g, inpL); + + //inpL = ggml_add(ctx0, + // ggml_repeat(ctx0, model.lmh_b, inpL), + // inpL); + } + + // logits -> probs + //inpL = ggml_soft_max_inplace(ctx0, inpL); + + // run the computation + ggml_build_forward_expand(&gf, inpL); + ggml_graph_compute_with_ctx(ctx0, &gf, n_threads); + + //if (n_past%100 == 0) { + // ggml_graph_print (&gf); + // ggml_graph_dump_dot(&gf, NULL, "gpt-2.dot"); + //} + + //embd_w.resize(n_vocab*N); + //memcpy(embd_w.data(), ggml_get_data(inpL), sizeof(float)*n_vocab*N); + + // return result for just the last token + embd_w.resize(n_vocab); + memcpy(embd_w.data(), (float *) ggml_get_data(inpL) + (n_vocab*(N-1)), sizeof(float)*n_vocab); + + if (mem_per_token == 0) { + mem_per_token = ggml_used_mem(ctx0)/N; + } + //printf("used_mem = %zu\n", ggml_used_mem(ctx0)); + + ggml_free(ctx0); + + return true; +} + +int main(int argc, char ** argv) { + ggml_time_init(); + + const int64_t t_main_start_us = ggml_time_us(); + + gpt_params params; + + if (gpt_params_parse(argc, argv, params) == false) { + return 1; + } + + int64_t t_load_us = 0; + + gpt2bpe_vocab vocab; + gpt_neox_model model; + + // load the model + { + const int64_t t_start_us = ggml_time_us(); + + if (!gpt_neox_model_load(params.model, model, vocab)) { + fprintf(stderr, "%s: failed to load model from '%s'\n", __func__, params.model.c_str()); + return 1; + } + + t_load_us = ggml_time_us() - t_start_us; + + } + + if (params.seed < 0) { + params.seed = time(NULL); + } + + if (params.top_k == 0) { + params.top_k = model.hparams.n_vocab; + } + + printf("%s: seed = %d\n", __func__, params.seed); + printf("%s: temp = %.3f\n", __func__, params.temp); + printf("%s: top_k = %d\n", __func__, params.top_k); + printf("%s: top_p = %.3f\n", __func__, params.top_p); + printf("%s: repeat_last_n = %d\n", __func__, params.repeat_last_n); + printf("%s: repeat_penalty = %.3f\n", __func__, params.repeat_penalty); + + std::mt19937 rng(params.seed); + + if (params.prompt.empty()) { + params.prompt = "Once upon"; + } + + std::vector last_n_tokens(model.hparams.n_ctx); + std::fill(last_n_tokens.begin(), last_n_tokens.end(), 0); + + int n_past = 0; + + int64_t t_sample_us = 0; + int64_t t_predict_us = 0; + + std::vector logits; + + // tokenize the prompt + std::vector embd_inp = gpt2bpe_tokenize(vocab, params.prompt,false, false); + + params.n_predict = std::min(params.n_predict, model.hparams.n_ctx - (int) embd_inp.size()); + + printf("%s: number of tokens in prompt = %zu\n", __func__, embd_inp.size()); +// for (size_t i = 0; i < embd_inp.size(); i++) { +// printf("%s: token[%zu] = %6d, %s\n", __func__, i, embd_inp[i], vocab.id_to_token[embd_inp[i]].c_str()); +// } + + if( model.hparams.n_ctx < params.n_predict+embd_inp.size() ) { + params.n_predict = model.hparams.n_ctx-embd_inp.size(); + } + + printf("%s: n_predict = %d\n", __func__, params.n_predict); + printf("\n"); + + std::vector embd; + + // determine the required inference memory per token: + size_t mem_per_token = 0; + gpt_neox_eval(model, params.n_threads, 0, { 0, 1, 2, 3 }, logits, mem_per_token); + + for (size_t i = embd.size(); i < embd_inp.size() + params.n_predict; i++) { + // predict + if (embd.size() > 0) { + const int64_t t_start_us = ggml_time_us(); + + if (!gpt_neox_eval(model, params.n_threads, n_past, embd, logits, mem_per_token)) { + printf("Failed to predict\n"); + return 1; + } + + t_predict_us += ggml_time_us() - t_start_us; + } + + n_past += embd.size(); + embd.clear(); + + if (i >= embd_inp.size()) { + // sample next token + const int top_k = params.top_k; + const float top_p = params.top_p; + const float temp = params.temp; + const int repeat_last_n = params.repeat_last_n; + const float repeat_penalty = params.repeat_penalty; + + const int n_vocab = model.hparams.n_vocab; + + gpt2bpe_vocab::id id = 0; + + { + const int64_t t_start_sample_us = ggml_time_us(); + + id = sample_top_k_top_p_repeat(vocab, logits.data() + (logits.size() - n_vocab), last_n_tokens.data(), last_n_tokens.size(), top_k, top_p, temp, repeat_last_n, repeat_penalty, rng); + + last_n_tokens.erase(last_n_tokens.begin()); + last_n_tokens.push_back(id); + + t_sample_us += ggml_time_us() - t_start_sample_us; + } + + // add it to the context + embd.push_back(id); + } else { + // if here, it means we are still processing the input prompt + for (size_t k = i; k < embd_inp.size(); k++) { + embd.push_back(embd_inp[k]); + if (embd.size() > params.n_batch) { + break; + } + } + i += embd.size() - 1; + } + + // display text + for (auto id : embd) { + printf("%s", vocab.id_to_token[id].c_str() ); + } + fflush(stdout); + + // end of text token + if (vocab.special_eos_id != -1 && embd.back() == vocab.special_eos_id) { + break; + } + } + + // report timing + { + const int64_t t_main_end_us = ggml_time_us(); + + printf("\n\n"); + printf("%s: mem per token = %8zu bytes\n", __func__, mem_per_token); + printf("%s: load time = %8.2f ms\n", __func__, t_load_us/1000.0f); + printf("%s: sample time = %8.2f ms\n", __func__, t_sample_us/1000.0f); + printf("%s: predict time = %8.2f ms / %.2f ms per token\n", __func__, t_predict_us/1000.0f, t_predict_us/1000.0f/n_past); + printf("%s: total time = %8.2f ms\n", __func__, (t_main_end_us - t_main_start_us)/1000.0f); + } + + ggml_free(model.ctx); + + return 0; +} diff --git a/examples/llama-bench/llama-bench.cpp b/examples/llama-bench/llama-bench.cpp index 266c8eab3..d11fff288 100755 --- a/examples/llama-bench/llama-bench.cpp +++ b/examples/llama-bench/llama-bench.cpp @@ -606,6 +606,8 @@ const std::string test::cpu_info = get_cpu_info(); const std::string test::gpu_info = get_gpu_info(); struct printer { + virtual ~printer() {} + FILE * fout; virtual void print_header(const cmd_params & params) { (void) params; }; virtual void print_test(const test & t) = 0; @@ -849,7 +851,7 @@ struct sql_printer : public printer { }; static void test_prompt(llama_context * ctx, int n_prompt, int n_past, int n_batch, int n_threads) { - std::vector tokens(n_batch, llama_token_bos()); + std::vector tokens(n_batch, llama_token_bos(ctx)); int n_processed = 0; while (n_processed < n_prompt) { int n_tokens = std::min(n_prompt - n_processed, n_batch); @@ -859,7 +861,7 @@ static void test_prompt(llama_context * ctx, int n_prompt, int n_past, int n_bat } static void test_gen(llama_context * ctx, int n_gen, int n_past, int n_threads) { - llama_token token = llama_token_bos(); + llama_token token = llama_token_bos(ctx); for (int i = 0; i < n_gen; i++) { llama_eval(ctx, &token, 1, n_past + i, n_threads); } diff --git a/examples/main/main.cpp b/examples/main/main.cpp index a632bea1c..388e1f7d7 100644 --- a/examples/main/main.cpp +++ b/examples/main/main.cpp @@ -143,7 +143,7 @@ int main(int argc, char ** argv) { { fprintf(stderr, "%s: testing memory usage for n_batch = %d, n_ctx = %d\n", __func__, params.n_batch, params.n_ctx); - const std::vector tmp(params.n_batch, llama_token_bos()); + const std::vector tmp(params.n_batch, llama_token_bos(ctx)); llama_eval(ctx, tmp.data(), tmp.size(), params.n_ctx, params.n_threads); } @@ -191,10 +191,6 @@ int main(int argc, char ** argv) { // tokenize the prompt std::vector embd_inp; - - // Add a space in front of the first character to match OG llama tokenizer behavior - params.prompt.insert(0, 1, ' '); - if (params.interactive_first || params.instruct || !params.prompt.empty() || session_tokens.empty()) { embd_inp = ::llama_tokenize(ctx, params.prompt, true); } else { @@ -270,15 +266,12 @@ int main(int argc, char ** argv) { params.interactive = true; } - // determine newline token - auto llama_token_newline = ::llama_tokenize(ctx, "\n", false); - if (params.verbose_prompt) { fprintf(stderr, "\n"); fprintf(stderr, "%s: prompt: '%s'\n", __func__, params.prompt.c_str()); fprintf(stderr, "%s: number of tokens in prompt = %zu\n", __func__, embd_inp.size()); for (int i = 0; i < (int) embd_inp.size(); i++) { - fprintf(stderr, "%6d -> '%s'\n", embd_inp[i], llama_token_to_str(ctx, embd_inp[i])); + fprintf(stderr, "%6d -> '%s'\n", embd_inp[i], llama_token_to_str(ctx, embd_inp[i]).c_str()); } if (ctx_guidance) { @@ -286,14 +279,14 @@ int main(int argc, char ** argv) { fprintf(stderr, "%s: negative prompt: '%s'\n", __func__, params.cfg_negative_prompt.c_str()); fprintf(stderr, "%s: number of tokens in negative prompt = %zu\n", __func__, guidance_inp.size()); for (int i = 0; i < (int) guidance_inp.size(); i++) { - fprintf(stderr, "%6d -> '%s'\n", guidance_inp[i], llama_token_to_str(ctx, guidance_inp[i])); + fprintf(stderr, "%6d -> '%s'\n", guidance_inp[i], llama_token_to_str(ctx, guidance_inp[i]).c_str()); } } if (params.n_keep > 0) { fprintf(stderr, "%s: static prompt based on n_keep: '", __func__); for (int i = 0; i < params.n_keep; i++) { - fprintf(stderr, "%s", llama_token_to_str(ctx, embd_inp[i])); + fprintf(stderr, "%s", llama_token_to_str(ctx, embd_inp[i]).c_str()); } fprintf(stderr, "'\n"); } @@ -311,7 +304,7 @@ int main(int argc, char ** argv) { auto console_ctrl_handler = +[](DWORD ctrl_type) -> BOOL { return (ctrl_type == CTRL_C_EVENT) ? (sigint_handler(SIGINT), true) : false; }; - SetConsoleCtrlHandler(static_cast(console_ctrl_handler), true); + SetConsoleCtrlHandler(reinterpret_cast(console_ctrl_handler), true); #endif fprintf(stderr, "%s: interactive mode on.\n", __func__); @@ -352,10 +345,9 @@ int main(int argc, char ** argv) { fprintf(stderr, "\n"); { - auto it = params.logit_bias.find(llama_token_eos()); + auto it = params.logit_bias.find(llama_token_eos(ctx)); if (it != params.logit_bias.end() && it->second == -INFINITY) { - fprintf(stderr, - "%s: warning: EOS token is disabled, which will cause most grammars to fail\n", __func__); + fprintf(stderr, "%s: warning: EOS token is disabled, which will cause most grammars to fail\n", __func__); } } @@ -405,7 +397,7 @@ int main(int argc, char ** argv) { // do one empty run to warm up the model { - const std::vector tmp = { llama_token_bos(), }; + const std::vector tmp = { llama_token_bos(ctx), }; llama_eval(ctx, tmp.data(), tmp.size(), 0, params.n_threads); llama_reset_timings(ctx); } @@ -589,7 +581,7 @@ int main(int argc, char ** argv) { } // Apply penalties - float nl_logit = logits[llama_token_nl()]; + float nl_logit = logits[llama_token_nl(ctx)]; auto last_n_repeat = std::min(std::min((int)last_n_tokens.size(), repeat_last_n), n_ctx); llama_sample_repetition_penalty(ctx, &candidates_p, last_n_tokens.data() + last_n_tokens.size() - last_n_repeat, @@ -598,7 +590,7 @@ int main(int argc, char ** argv) { last_n_tokens.data() + last_n_tokens.size() - last_n_repeat, last_n_repeat, alpha_frequency, alpha_presence); if (!penalize_nl) { - logits[llama_token_nl()] = nl_logit; + logits[llama_token_nl(ctx)] = nl_logit; } if (grammar != NULL) { @@ -662,7 +654,7 @@ int main(int argc, char ** argv) { // display text if (input_echo) { for (auto id : embd) { - printf("%s", llama_token_to_str(ctx, id)); + printf("%s", llama_token_to_str(ctx, id).c_str()); } fflush(stdout); } @@ -704,7 +696,7 @@ int main(int argc, char ** argv) { } // deal with end of text token in interactive mode - if (last_n_tokens.back() == llama_token_eos()) { + if (last_n_tokens.back() == llama_token_eos(ctx)) { if (params.interactive) { if (params.antiprompt.size() != 0) { // tokenize and inject first reverse prompt @@ -728,7 +720,7 @@ int main(int argc, char ** argv) { } if (params.input_prefix_bos) { - embd_inp.push_back(llama_token_bos()); + embd_inp.push_back(llama_token_bos(ctx)); } std::string buffer; @@ -782,8 +774,7 @@ int main(int argc, char ** argv) { if (grammar != NULL) { llama_grammar_free(grammar); - std::vector grammar_rules( - parsed_grammar.c_rules()); + std::vector grammar_rules( parsed_grammar.c_rules()); grammar = llama_grammar_init( grammar_rules.data(), grammar_rules.size(), parsed_grammar.symbol_ids.at("root")); @@ -794,7 +785,7 @@ int main(int argc, char ** argv) { } // end of text token - if (!embd.empty() && embd.back() == llama_token_eos() && !(params.instruct || params.interactive)) { + if (!embd.empty() && embd.back() == llama_token_eos(ctx) && !(params.instruct || params.interactive)) { fprintf(stderr, " [end of text]\n"); break; } diff --git a/examples/metal/metal.cpp b/examples/metal/metal.cpp index 7438defde..c05a4fa93 100644 --- a/examples/metal/metal.cpp +++ b/examples/metal/metal.cpp @@ -2,7 +2,7 @@ // // - First, export a LLaMA graph: // -// $ ./bin/main -m ../models/7B/ggml-model-q4_0.bin --export +// $ ./bin/main -m ../models/7B/ggml-model-q4_0.gguf --export // // - Run this tool to evaluate the exported graph: // diff --git a/examples/perplexity/perplexity.cpp b/examples/perplexity/perplexity.cpp index 2409db69f..f3c045aec 100644 --- a/examples/perplexity/perplexity.cpp +++ b/examples/perplexity/perplexity.cpp @@ -64,7 +64,7 @@ void perplexity(llama_context * ctx, const gpt_params & params) { // add BOS token for the first batch of each chunk if (j == 0) { - tokens[batch_start] = llama_token_bos(); + tokens[batch_start] = llama_token_bos(ctx); } if (llama_eval(ctx, tokens.data() + batch_start, batch_size, j * n_batch, params.n_threads)) { diff --git a/examples/quantize-stats/quantize-stats.cpp b/examples/quantize-stats/quantize-stats.cpp index 6aa06ec8f..06ce18f09 100644 --- a/examples/quantize-stats/quantize-stats.cpp +++ b/examples/quantize-stats/quantize-stats.cpp @@ -24,7 +24,7 @@ #endif struct quantize_stats_params { - std::string model = "models/7B/ggml-model-f16.bin"; + std::string model = "models/7B/ggml-model-f16.gguf"; bool verbose = false; bool per_layer_stats = false; bool print_histogram = false; diff --git a/examples/quantize/quantize.cpp b/examples/quantize/quantize.cpp index 744f549c5..f628d0642 100644 --- a/examples/quantize/quantize.cpp +++ b/examples/quantize/quantize.cpp @@ -68,10 +68,10 @@ bool try_parse_ftype(const std::string & ftype_str_in, llama_ftype & ftype, std: } // usage: -// ./quantize [--allow-requantize] [--leave-output-tensor] models/llama/ggml-model.bin [models/llama/ggml-model-quant.bin] type [nthreads] +// ./quantize [--allow-requantize] [--leave-output-tensor] models/llama/ggml-model.gguf [models/llama/ggml-model-quant.gguf] type [nthreads] // void usage(const char * executable) { - fprintf(stderr, "usage: %s [--help] [--allow-requantize] [--leave-output-tensor] model-f32.bin [model-quant.bin] type [nthreads]\n\n", executable); + fprintf(stderr, "usage: %s [--help] [--allow-requantize] [--leave-output-tensor] model-f32.gguf [model-quant.gguf] type [nthreads]\n\n", executable); fprintf(stderr, " --allow-requantize: Allows requantizing tensors that have already been quantized. Warning: This can severely reduce quality compared to quantizing from 16bit or 32bit\n"); fprintf(stderr, " --leave-output-tensor: Will leave output.weight un(re)quantized. Increases model size but may also increase quality, especially when requantizing\n"); fprintf(stderr, "\nAllowed quantization types:\n"); @@ -118,8 +118,8 @@ int main(int argc, char ** argv) { if (pos != std::string::npos) { fpath = fname_inp.substr(0, pos + 1); } - // export as [inp path]/ggml-model-[ftype].bin - fname_out = fpath + "ggml-model-" + ftype_str + ".bin"; + // export as [inp path]/ggml-model-[ftype].gguf + fname_out = fpath + "ggml-model-" + ftype_str + ".gguf"; arg_idx++; } else { diff --git a/examples/save-load-state/save-load-state.cpp b/examples/save-load-state/save-load-state.cpp index 61c71c358..3db61b754 100644 --- a/examples/save-load-state/save-load-state.cpp +++ b/examples/save-load-state/save-load-state.cpp @@ -26,7 +26,6 @@ int main(int argc, char ** argv) { auto lparams = llama_context_default_params(); lparams.n_ctx = params.n_ctx; - lparams.n_gqa = params.n_gqa; lparams.seed = params.seed; lparams.f16_kv = params.memory_f16; lparams.use_mmap = params.use_mmap; @@ -45,9 +44,8 @@ int main(int argc, char ** argv) { llama_free_model(model); return 1; } - auto tokens = std::vector(params.n_ctx); - auto n_prompt_tokens = llama_tokenize(ctx, params.prompt.c_str(), tokens.data(), int(tokens.size()), true); - + auto tokens = llama_tokenize(ctx, params.prompt.c_str(), true); + auto n_prompt_tokens = tokens.size(); if (n_prompt_tokens < 1) { fprintf(stderr, "%s : failed to tokenize prompt\n", __func__); llama_free(ctx); @@ -92,7 +90,7 @@ int main(int argc, char ** argv) { auto next_token_str = llama_token_to_str(ctx, next_token); last_n_tokens_data.push_back(next_token); - printf("%s", next_token_str); + printf("%s", next_token_str.c_str()); if (llama_eval(ctx, &next_token, 1, n_past, params.n_threads)) { fprintf(stderr, "\n%s : failed to evaluate\n", __func__); llama_free(ctx); @@ -152,7 +150,7 @@ int main(int argc, char ** argv) { auto next_token_str = llama_token_to_str(ctx2, next_token); last_n_tokens_data.push_back(next_token); - printf("%s", next_token_str); + printf("%s", next_token_str.c_str()); if (llama_eval(ctx2, &next_token, 1, n_past, params.n_threads)) { fprintf(stderr, "\n%s : failed to evaluate\n", __func__); llama_free(ctx2); diff --git a/examples/server/README.md b/examples/server/README.md index 1559dd3f2..4d97db2e4 100644 --- a/examples/server/README.md +++ b/examples/server/README.md @@ -5,7 +5,7 @@ This example demonstrates a simple HTTP API server and a simple web front end to Command line options: - `--threads N`, `-t N`: Set the number of threads to use during computation. -- `-m FNAME`, `--model FNAME`: Specify the path to the LLaMA model file (e.g., `models/7B/ggml-model.bin`). +- `-m FNAME`, `--model FNAME`: Specify the path to the LLaMA model file (e.g., `models/7B/ggml-model.gguf`). - `-m ALIAS`, `--alias ALIAS`: Set an alias for the model. The alias will be returned in API responses. - `-c N`, `--ctx-size N`: Set the size of the prompt context. The default is 512, but LLaMA models were built with a context of 2048, which will provide better results for longer input/inference. The size may differ in other models, for example, baichuan models were build with a context of 4096. - `-ngl N`, `--n-gpu-layers N`: When compiled with appropriate support (currently CLBlast or cuBLAS), this option allows offloading some layers to the GPU for computation. Generally results in increased performance. @@ -48,15 +48,14 @@ To get started right away, run the following command, making sure to use the cor ### Unix-based systems (Linux, macOS, etc.): ```bash -./server -m models/7B/ggml-model.bin -c 2048 +./server -m models/7B/ggml-model.gguf -c 2048 ``` ### Windows: ```powershell -server.exe -m models\7B\ggml-model.bin -c 2048 +server.exe -m models\7B\ggml-model.gguf -c 2048 ``` - The above command will start a server that by default listens on `127.0.0.1:8080`. You can consume the endpoints with Postman or NodeJS with axios library. You can visit the web front end at the same url. diff --git a/examples/server/server.cpp b/examples/server/server.cpp index 99660455a..a04f1910c 100644 --- a/examples/server/server.cpp +++ b/examples/server/server.cpp @@ -279,7 +279,7 @@ struct llama_server_context grammar_parser::print_grammar(stderr, parsed_grammar); { - auto it = params.logit_bias.find(llama_token_eos()); + auto it = params.logit_bias.find(llama_token_eos(ctx)); if (it != params.logit_bias.end() && it->second == -INFINITY) { LOG_WARNING("EOS token is disabled, which will cause most grammars to fail", {}); } @@ -402,7 +402,7 @@ struct llama_server_context if (params.n_predict == 0) { has_next_token = false; - result.tok = llama_token_eos(); + result.tok = llama_token_eos(ctx); return result; } @@ -442,7 +442,7 @@ struct llama_server_context llama_token_data_array candidates_p = {candidates.data(), candidates.size(), false}; // Apply penalties - float nl_logit = logits[llama_token_nl()]; + float nl_logit = logits[llama_token_nl(ctx)]; auto last_n_repeat = std::min(std::min((int)last_n_tokens.size(), repeat_last_n), params.n_ctx); llama_sample_repetition_penalty(ctx, &candidates_p, last_n_tokens.data() + last_n_tokens.size() - last_n_repeat, @@ -452,7 +452,7 @@ struct llama_server_context last_n_repeat, alpha_frequency, alpha_presence); if (!penalize_nl) { - logits[llama_token_nl()] = nl_logit; + logits[llama_token_nl(ctx)] = nl_logit; } if (grammar != nullptr) { @@ -515,7 +515,7 @@ struct llama_server_context // decrement remaining sampling budget --n_remain; - if (!embd.empty() && embd.back() == llama_token_eos()) + if (!embd.empty() && embd.back() == llama_token_eos(ctx)) { // stopping_word = llama_token_to_str(ctx, embd.back()); has_next_token = false; @@ -652,8 +652,6 @@ static void server_print_usage(const char *argv0, const gpt_params ¶ms, fprintf(stdout, " -v, --verbose verbose output (default: %s)\n", server_verbose ? "enabled" : "disabled"); fprintf(stdout, " -t N, --threads N number of threads to use during computation (default: %d)\n", params.n_threads); fprintf(stdout, " -c N, --ctx-size N size of the prompt context (default: %d)\n", params.n_ctx); - fprintf(stdout, " -gqa N, --gqa N grouped-query attention factor (TEMP!!! use 8 for LLaMAv2 70B) (default: %d)\n", params.n_gqa); - fprintf(stdout, " -eps N, --rms-norm-eps N rms norm eps (TEMP!!! use 1e-5 for LLaMAv2) (default: %.1e)\n", params.rms_norm_eps); fprintf(stdout, " --rope-freq-base N RoPE base frequency (default: %.1f)\n", params.rope_freq_base); fprintf(stdout, " --rope-freq-scale N RoPE frequency scaling factor (default: %g)\n", params.rope_freq_scale); fprintf(stdout, " -b N, --batch-size N batch size for prompt processing (default: %d)\n", params.n_batch); @@ -774,23 +772,6 @@ static void server_params_parse(int argc, char **argv, server_params &sparams, } params.n_ctx = std::stoi(argv[i]); } - else if (arg == "-gqa" || arg == "--gqa") - { - if (++i >= argc) - { - invalid_param = true; - break; - } - params.n_gqa = std::stoi(argv[i]); - } - else if (arg == "-eps" || arg == "--rms-norm-eps") { - if (++i >= argc) - { - invalid_param = true; - break; - } - params.rms_norm_eps = std::stof(argv[i]); - } else if (arg == "--rope-freq-base") { if (++i >= argc) @@ -968,7 +949,7 @@ static void server_params_parse(int argc, char **argv, server_params &sparams, static json format_generation_settings(llama_server_context &llama) { - const auto eos_bias = llama.params.logit_bias.find(llama_token_eos()); + const auto eos_bias = llama.params.logit_bias.find(llama_token_eos(llama.ctx)); const bool ignore_eos = eos_bias != llama.params.logit_bias.end() && eos_bias->second < 0.0f && std::isinf(eos_bias->second); @@ -1103,7 +1084,7 @@ static void parse_options_completion(const json &body, llama_server_context &lla llama.params.logit_bias.clear(); if (body.value("ignore_eos", false)) { - llama.params.logit_bias[llama_token_eos()] = -INFINITY; + llama.params.logit_bias[llama_token_eos(llama.ctx)] = -INFINITY; } const auto &logit_bias = body.find("logit_bias"); diff --git a/examples/simple/simple.cpp b/examples/simple/simple.cpp index 97137a658..132f7fbf9 100644 --- a/examples/simple/simple.cpp +++ b/examples/simple/simple.cpp @@ -2,180 +2,129 @@ #define _GNU_SOURCE #endif -#include "common.h" -#include "llama.h" #include "build-info.h" -#include -#include +#include "common.h" +#include "llama.h" + #include #include -#include -#include -#include -#include #include #include -#if defined (__unix__) || (defined (__APPLE__) && defined (__MACH__)) -#include -#include -#elif defined (_WIN32) -#define WIN32_LEAN_AND_MEAN -#define NOMINMAX -#include -#include -#endif - - - -int main(int argc, char ** argv) -{ +int main(int argc, char ** argv) { gpt_params params; - //--------------------------------- - // Print help : - //--------------------------------- - - if ( argc == 1 || argv[1][0] == '-' ) - { - printf( "usage: %s MODEL_PATH [PROMPT]\n" , argv[0] ); + if (argc == 1 || argv[1][0] == '-') { + printf("usage: %s MODEL_PATH [PROMPT]\n" , argv[0]); return 1 ; } - //--------------------------------- - // Load parameters : - //--------------------------------- - - if ( argc >= 2 ) - { + if (argc >= 2) { params.model = argv[1]; } - if ( argc >= 3 ) - { + if (argc >= 3) { params.prompt = argv[2]; } - if ( params.prompt.empty() ) - { + if (params.prompt.empty()) { params.prompt = "Hello my name is"; } - //--------------------------------- - // Init LLM : - //--------------------------------- + // init LLM llama_backend_init(params.numa); - llama_model * model; - llama_context * ctx; + llama_context_params ctx_params = llama_context_default_params(); - std::tie(model, ctx) = llama_init_from_gpt_params( params ); + llama_model * model = llama_load_model_from_file(params.model.c_str(), ctx_params); - if ( model == NULL ) - { - fprintf( stderr , "%s: error: unable to load model\n" , __func__ ); + if (model == NULL) { + fprintf(stderr , "%s: error: unable to load model\n" , __func__); return 1; } - //--------------------------------- - // Tokenize the prompt : - //--------------------------------- + llama_context * ctx = llama_new_context_with_model(model, ctx_params); + + // tokenize the prompt std::vector tokens_list; - tokens_list = ::llama_tokenize( ctx , params.prompt , true ); + tokens_list = ::llama_tokenize(ctx, params.prompt, true); - const int max_context_size = llama_n_ctx( ctx ); - const int max_tokens_list_size = max_context_size - 4 ; + const int max_context_size = llama_n_ctx(ctx); + const int max_tokens_list_size = max_context_size - 4; - if ( (int)tokens_list.size() > max_tokens_list_size ) - { - fprintf( stderr , "%s: error: prompt too long (%d tokens, max %d)\n" , - __func__ , (int)tokens_list.size() , max_tokens_list_size ); + if ((int) tokens_list.size() > max_tokens_list_size) { + fprintf(stderr, "%s: error: prompt too long (%d tokens, max %d)\n", __func__, (int) tokens_list.size(), max_tokens_list_size); return 1; } - fprintf( stderr, "\n\n" ); + fprintf(stderr, "\n\n"); - // Print the tokens from the prompt : - - for( auto id : tokens_list ) - { - printf( "%s" , llama_token_to_str( ctx , id ) ); + for (auto id : tokens_list) { + fprintf(stderr, "%s", llama_token_to_str(ctx, id).c_str()); } - fflush(stdout); + fflush(stderr); - - //--------------------------------- - // Main prediction loop : - //--------------------------------- + // main loop // The LLM keeps a contextual cache memory of previous token evaluation. // Usually, once this cache is full, it is required to recompute a compressed context based on previous // tokens (see "infinite text generation via context swapping" in the main example), but in this minimalist // example, we will just stop the loop once this cache is full or once an end of stream is detected. - while ( llama_get_kv_cache_token_count( ctx ) < max_context_size ) - { - //--------------------------------- - // Evaluate the tokens : - //--------------------------------- + const int n_gen = std::min(32, max_context_size); - if ( llama_eval( ctx , tokens_list.data() , int(tokens_list.size()) , llama_get_kv_cache_token_count( ctx ) , params.n_threads ) ) - { - fprintf( stderr, "%s : failed to eval\n" , __func__ ); + while (llama_get_kv_cache_token_count(ctx) < n_gen) { + // evaluate the transformer + + if (llama_eval(ctx, tokens_list.data(), int(tokens_list.size()), llama_get_kv_cache_token_count(ctx), params.n_threads)) { + fprintf(stderr, "%s : failed to eval\n", __func__); return 1; } tokens_list.clear(); - //--------------------------------- - // Select the best prediction : - //--------------------------------- + // sample the next token llama_token new_token_id = 0; - auto logits = llama_get_logits( ctx ); - auto n_vocab = llama_n_vocab( ctx ); // the size of the LLM vocabulary (in tokens) + auto logits = llama_get_logits(ctx); + auto n_vocab = llama_n_vocab(ctx); std::vector candidates; - candidates.reserve( n_vocab ); + candidates.reserve(n_vocab); - for( llama_token token_id = 0 ; token_id < n_vocab ; token_id++ ) - { - candidates.emplace_back( llama_token_data{ token_id , logits[ token_id ] , 0.0f } ); + for (llama_token token_id = 0; token_id < n_vocab; token_id++) { + candidates.emplace_back(llama_token_data{ token_id, logits[token_id], 0.0f }); } llama_token_data_array candidates_p = { candidates.data(), candidates.size(), false }; - // Select it using the "Greedy sampling" method : - new_token_id = llama_sample_token_greedy( ctx , &candidates_p ); - + new_token_id = llama_sample_token_greedy(ctx , &candidates_p); // is it an end of stream ? - if ( new_token_id == llama_token_eos() ) - { + if (new_token_id == llama_token_eos(ctx)) { fprintf(stderr, " [end of text]\n"); break; } - // Print the new token : - printf( "%s" , llama_token_to_str( ctx , new_token_id ) ); - fflush( stdout ); + // print the new token : + printf("%s", llama_token_to_str(ctx, new_token_id).c_str()); + fflush(stdout); - // Push this new token for next evaluation : - tokens_list.push_back( new_token_id ); + // push this new token for next evaluation + tokens_list.push_back(new_token_id); + } - } // wend of main loop - - llama_free( ctx ); - llama_free_model( model ); + llama_free(ctx); + llama_free_model(model); llama_backend_free(); + fprintf(stderr, "\n\n"); + return 0; } - -// EOF diff --git a/examples/train-text-from-scratch/train-text-from-scratch.cpp b/examples/train-text-from-scratch/train-text-from-scratch.cpp index 54dc2beed..31d6620a2 100644 --- a/examples/train-text-from-scratch/train-text-from-scratch.cpp +++ b/examples/train-text-from-scratch/train-text-from-scratch.cpp @@ -1,4 +1,5 @@ #include "ggml.h" +#include "common.h" #include "llama.h" #include #include @@ -16,7 +17,7 @@ #pragma warning(disable: 4244 4267) // possible loss of data #endif -static const float rms_norm_eps = LLAMA_DEFAULT_RMS_EPS; +static const float rms_norm_eps = 1e-5f; struct random_normal_distribution { std::mt19937 gen; @@ -169,14 +170,16 @@ struct ggml_tensor * randomize_tensor_uniform(struct ggml_tensor * tensor, struc struct llama_vocab { using id = int32_t; using token = std::string; + using ttype = llama_token_type; - struct token_score { - token tok; + struct token_data { + token text; float score; + ttype type; }; std::unordered_map token_to_id; - std::vector id_to_token; + std::vector id_to_token; }; struct my_llama_hparams { @@ -1961,7 +1964,7 @@ void print_matrix(struct ggml_tensor * probs) { void print_token(struct llama_context * ctx, llama_token token) { - printf("%s", llama_token_to_str(ctx, token)); + printf("%s", llama_token_to_str(ctx, token).c_str()); } void print_tokens(struct llama_context* ctx, struct ggml_tensor * tokens) { @@ -1995,7 +1998,7 @@ void print_tokens_batch(struct llama_context* ctx, struct ggml_tensor * tokens) } } -void get_example_targets(const int * train_samples, size_t n_train_samples, const llama_token * train_data, size_t n_train_data, int example_id, struct ggml_tensor * tokens_input, struct ggml_tensor * target_logits, struct ggml_tensor * target_probs) { +void get_example_targets(struct llama_context * lctx, const int * train_samples, size_t n_train_samples, const llama_token * train_data, size_t n_train_data, int example_id, struct ggml_tensor * tokens_input, struct ggml_tensor * target_logits, struct ggml_tensor * target_probs) { int n_tokens = tokens_input->ne[0]; int n_vocab = target_logits->ne[0]; @@ -2004,7 +2007,7 @@ void get_example_targets(const int * train_samples, size_t n_train_samples, cons ggml_set_f32(target_logits, -1.0f/n_vocab); ggml_set_f32(target_probs, 0.0f); - ggml_set_i32_1d(tokens_input, 0, llama_token_bos()); + ggml_set_i32_1d(tokens_input, 0, llama_token_bos(lctx)); for (int i=1; in_dims == 2); GGML_ASSERT(target_logits->n_dims == 3); GGML_ASSERT(target_probs->n_dims == 3); @@ -2035,7 +2038,7 @@ void get_example_targets_batch(struct llama_context * /*lctx*/, const int * trai size_t sample = train_samples[(example_id*n_batch + k) % n_train_samples]; GGML_ASSERT(sample+n_tokens-1 < n_train_data); - set_i32_2d(tokens_input, 0, k, llama_token_bos()); + set_i32_2d(tokens_input, 0, k, llama_token_bos(lctx)); for (int i=1; i= 0) { - out.resize(n_tokens); + int n_tokens = llama_tokenize(lctx, buf.data(), out.data(), out.size(), false); + if (n_tokens < 0) { + out.resize(-n_tokens); + llama_tokenize(lctx, buf.data(), out.data(), out.size(), false); } bool verify = false; @@ -2200,17 +2202,17 @@ int tokenize_file(struct llama_context * lctx, const char * filename, std::vecto const char * in = buf.data(); const char * end = buf.data() + buf.size(); for (int i = 0; i < (int) out.size(); ++i) { - const char * s = llama_token_to_str(lctx, out[i]); - int len = strlen(s); + std::string s = llama_token_to_str(lctx, out[i]); + int len = s.length(); if (in >= end) { printf("%s: unexpected end of original text.\n", __func__); break; } - const bool matches = (strncmp(in, s, len) == 0); + const bool matches = (strncmp(in, s.c_str(), len) == 0); if (matches) { in += len; } else { - printf("%s: mismatch: expected '%s', but got '%s'\n", __func__, std::string(in, len).c_str(), s); + printf("%s: mismatch: expected '%s', but got '%s'\n", __func__, std::string(in, len).c_str(), s.c_str()); } } } @@ -2294,7 +2296,7 @@ llama_token sample(struct my_llama_sampler * sampler, float * logits, const llam const auto params = sampler->params; // Apply penalties - const float nl_logit = logits[llama_token_nl()]; + const float nl_logit = logits[llama_token_nl(ctx)]; const int n_last = std::min(std::min(n_last_tokens, params.repeat_last_n), sampler->n_ctx); @@ -2313,7 +2315,7 @@ llama_token sample(struct my_llama_sampler * sampler, float * logits, const llam params.alpha_presence); if (!params.penalize_nl) { - logits[llama_token_nl()] = nl_logit; + logits[llama_token_nl(ctx)] = nl_logit; } llama_token token = 0; @@ -2612,42 +2614,45 @@ void save_as_llama_model(struct llama_vocab * vocab, struct my_llama_model * mod return; } - // write_magic - file.write_u32(LLAMA_FILE_MAGIC); // magic - file.write_u32(LLAMA_FILE_VERSION); // version - // write_hparams - file.write_u32(model->hparams.n_vocab); - file.write_u32(model->hparams.n_embd); - file.write_u32(model->hparams.n_mult); - file.write_u32(model->hparams.n_head); - file.write_u32(model->hparams.n_layer); - file.write_u32(model->hparams.n_rot); - file.write_u32(LLAMA_FTYPE_ALL_F32); - // write_vocab - uint32_t n_vocab = model->hparams.n_vocab; - for (uint32_t i = 0; i < n_vocab; i++) { - const auto & token_score = vocab->id_to_token.at(i); - file.write_u32((uint32_t) token_score.tok.size()); - file.write_raw(token_score.tok.data(), token_score.tok.size()); - file.write_raw(&token_score.score, sizeof(token_score.score)); - } - // write tensors - write_tensor(&file, model->tok_embeddings); - write_tensor(&file, model->norm); - write_tensor(&file, model->output); - for (uint32_t i = 0; i < model->hparams.n_layer; ++i) { - auto & layer = model->layers[i]; - - write_tensor(&file, layer.attention_norm); - write_tensor(&file, layer.wq); - write_tensor(&file, layer.wk); - write_tensor(&file, layer.wv); - write_tensor(&file, layer.wo); - write_tensor(&file, layer.ffn_norm); - write_tensor(&file, layer.w1); - write_tensor(&file, layer.w2); - write_tensor(&file, layer.w3); - } +#pragma message("TODO: implement file saving using gguf") + (void) vocab; + (void) model; +// // write_magic +// file.write_u32(LLAMA_FILE_MAGIC); // magic +// file.write_u32(LLAMA_FILE_VERSION); // version +// // write_hparams +// file.write_u32(model->hparams.n_vocab); +// file.write_u32(model->hparams.n_embd); +// file.write_u32(model->hparams.n_mult); +// file.write_u32(model->hparams.n_head); +// file.write_u32(model->hparams.n_layer); +// file.write_u32(model->hparams.n_rot); +// file.write_u32(LLAMA_FTYPE_ALL_F32); +// // write_vocab +// uint32_t n_vocab = model->hparams.n_vocab; +// for (uint32_t i = 0; i < n_vocab; i++) { +// const auto & token_data = vocab->id_to_token.at(i); +// file.write_u32((uint32_t) token_data.tok.size()); +// file.write_raw(token_data.tok.data(), token_data.tok.size()); +// file.write_raw(&token_data.score, sizeof(token_data.score)); +// } +// // write tensors +// write_tensor(&file, model->tok_embeddings); +// write_tensor(&file, model->norm); +// write_tensor(&file, model->output); +// for (uint32_t i = 0; i < model->hparams.n_layer; ++i) { +// auto & layer = model->layers[i]; +// +// write_tensor(&file, layer.attention_norm); +// write_tensor(&file, layer.wq); +// write_tensor(&file, layer.wk); +// write_tensor(&file, layer.wv); +// write_tensor(&file, layer.wo); +// write_tensor(&file, layer.ffn_norm); +// write_tensor(&file, layer.w1); +// write_tensor(&file, layer.w2); +// write_tensor(&file, layer.w3); +// } } float cosine_decay(const int decay_steps, const float alpha, int step) { @@ -3052,20 +3057,13 @@ int main(int argc, char ** argv) { struct llama_vocab vocab; { - std::vector strings; - std::vector scores; - int n_vocab = llama_n_vocab(lctx); - strings.resize(n_vocab, NULL); - scores.resize(n_vocab, 0); - n_vocab = llama_get_vocab(lctx, strings.data(), scores.data(), n_vocab); - GGML_ASSERT(n_vocab == llama_n_vocab(lctx)); + const int n_vocab = llama_n_vocab(lctx); vocab.id_to_token.resize(n_vocab); for (int i=0; i train_samples; train_samples.push_back(0); for (int i = 1; i < (int) train_tokens.size() - n_tokens; ++i) { - if (!params.samples_start_after_nl || (train_tokens[i-1] == llama_token_nl())) { + if (!params.samples_start_after_nl || (train_tokens[i-1] == llama_token_nl(lctx))) { train_samples.push_back(i); } } @@ -3338,7 +3336,7 @@ int main(int argc, char ** argv) { struct ggml_tensor * target_logits = ggml_new_tensor_2d(model.ctx, GGML_TYPE_F32, n_vocab, n_tokens); struct ggml_tensor * target_probs = ggml_new_tensor_2d(model.ctx, GGML_TYPE_F32, n_vocab, n_tokens); - get_example_targets(train_samples.data(), train_samples.size(), train_tokens.data(), train_tokens.size(), rand()%train_samples.size(), tokens_input, target_logits, target_probs); + get_example_targets(lctx, train_samples.data(), train_samples.size(), train_tokens.data(), train_tokens.size(), rand()%train_samples.size(), tokens_input, target_logits, target_probs); for (int i=sample_ctx; in_cb = n_cb; } diff --git a/ggml.c b/ggml.c index 44c43b424..c917d73c7 100644 --- a/ggml.c +++ b/ggml.c @@ -213,10 +213,10 @@ inline static void * ggml_aligned_malloc(size_t size) { error_desc = "insufficient memory"; break; } - GGML_PRINT("%s: %s (attempted to allocate %6.2f MB)\n", - __func__, error_desc, size/(1024.0*1024.0)); + GGML_PRINT("%s: %s (attempted to allocate %6.2f MB)\n", __func__, error_desc, size/(1024.0*1024.0)); return NULL; } + return aligned_memory; } #define GGML_ALIGNED_MALLOC(size) ggml_aligned_malloc(size) @@ -4091,7 +4091,11 @@ size_t ggml_nbytes(const struct ggml_tensor * tensor) { // // is enough, but just in case, adding the second part - return GGML_PAD(MAX(tensor->ne[3]*tensor->nb[3], ggml_nelements(tensor)*ggml_type_size(tensor->type))/ggml_blck_size(tensor->type), GGML_MEM_ALIGN); + return MAX(tensor->ne[3]*tensor->nb[3], (ggml_nelements(tensor)*ggml_type_size(tensor->type))/ggml_blck_size(tensor->type)); +} + +size_t ggml_nbytes_pad(const struct ggml_tensor * tensor) { + return GGML_PAD(ggml_nbytes(tensor), GGML_MEM_ALIGN); } size_t ggml_nbytes_split(const struct ggml_tensor * tensor, int nrows_split) { @@ -9118,6 +9122,8 @@ static void ggml_compute_forward_mul( const struct ggml_tensor * src0, const struct ggml_tensor * src1, struct ggml_tensor * dst) { + GGML_ASSERT(src1->type == GGML_TYPE_F32 && "only f32 src1 supported for now"); + switch (src0->type) { case GGML_TYPE_F32: { @@ -16881,7 +16887,7 @@ void ggml_graph_export(const struct ggml_cgraph * cgraph, const char * fname) { // compute size of intermediate results // TODO: does not take into account scratch buffers !!!! for (int i = 0; i < cgraph->n_nodes; ++i) { - size_eval += ggml_nbytes(cgraph->nodes[i]); + size_eval += ggml_nbytes_pad(cgraph->nodes[i]); } // print @@ -18542,6 +18548,1005 @@ size_t ggml_quantize_chunk(enum ggml_type type, const float * src, void * dst, i //////////////////////////////////////////////////////////////////////////////// +struct gguf_str { + uint32_t n; + char * data; +}; + +static const size_t GGUF_TYPE_SIZE[GGUF_TYPE_COUNT] = { + [GGUF_TYPE_UINT8] = sizeof(uint8_t), + [GGUF_TYPE_INT8] = sizeof(int8_t), + [GGUF_TYPE_UINT16] = sizeof(uint16_t), + [GGUF_TYPE_INT16] = sizeof(int16_t), + [GGUF_TYPE_UINT32] = sizeof(uint32_t), + [GGUF_TYPE_INT32] = sizeof(int32_t), + [GGUF_TYPE_FLOAT32] = sizeof(float), + [GGUF_TYPE_BOOL] = sizeof(bool), + [GGUF_TYPE_STRING] = sizeof(struct gguf_str), + [GGUF_TYPE_ARRAY] = 0, // undefined +}; +static_assert(GGUF_TYPE_COUNT == 10, "GGUF_TYPE_COUNT != 10"); + +static const char * GGUF_TYPE_NAME[GGUF_TYPE_COUNT] = { + [GGUF_TYPE_UINT8] = "u8", + [GGUF_TYPE_INT8] = "i8", + [GGUF_TYPE_UINT16] = "u16", + [GGUF_TYPE_INT16] = "i16", + [GGUF_TYPE_UINT32] = "u32", + [GGUF_TYPE_INT32] = "i32", + [GGUF_TYPE_FLOAT32] = "f32", + [GGUF_TYPE_BOOL] = "bool", + [GGUF_TYPE_STRING] = "str", + [GGUF_TYPE_ARRAY] = "arr", +}; +static_assert(GGUF_TYPE_COUNT == 10, "GGUF_TYPE_COUNT != 10"); + +union gguf_value { + uint8_t uint8; + int8_t int8; + uint16_t uint16; + int16_t int16; + uint32_t uint32; + int32_t int32; + float float32; + bool bool_; + + struct gguf_str str; + + struct { + enum gguf_type type; + + uint32_t n; + void * data; + } arr; +}; + +struct gguf_kv { + struct gguf_str key; + + uint32_t n_bytes; // TODO: is this actually needed? + + enum gguf_type type; + union gguf_value value; +}; + +struct gguf_header { + uint32_t magic; + uint32_t version; + uint32_t n_tensors; + uint32_t n_kv; +}; + +struct gguf_tensor_info { + struct gguf_str name; + + uint32_t n_dims; + uint32_t ne[GGML_MAX_DIMS]; + + enum ggml_type type; + + uint64_t offset; // offset from start of `data`, must be a multiple of `ALIGNMENT` + + // for writing API + const void * data; + size_t size; +}; + +struct gguf_context { + struct gguf_header header; + + struct gguf_kv * kv; + struct gguf_tensor_info * infos; + + size_t alignment; + size_t offset; // offset of `data` from beginning of file + size_t size; // size of `data` in bytes + + //uint8_t * padding; + void * data; +}; + +static bool gguf_fread_el(FILE * file, void * dst, size_t size, size_t * offset) { + const size_t n = fread(dst, 1, size, file); + *offset += n; + return n == size; +} + +static bool gguf_fread_str(FILE * file, struct gguf_str * p, size_t * offset) { + p->n = 0; + p->data = NULL; + + bool ok = true; + + // TODO: how to avoid mallocs for strings? + ok = ok && gguf_fread_el(file, &p->n, sizeof(p->n), offset); p->data = calloc(p->n + 1, 1); + ok = ok && gguf_fread_el(file, p->data, p->n, offset); + + return ok; +} + +struct gguf_context * gguf_init_empty(void) { + struct gguf_context * ctx = GGML_ALIGNED_MALLOC(sizeof(struct gguf_context)); + + ctx->header.magic = GGUF_MAGIC; + ctx->header.version = GGUF_VERSION; + ctx->header.n_tensors = 0; + ctx->header.n_kv = 0; + + ctx->kv = NULL; + ctx->infos = NULL; + + ctx->alignment = GGUF_DEFAULT_ALIGNMENT; + ctx->offset = 0; + ctx->size = 0; + + ctx->data = NULL; + + return ctx; +} + +struct gguf_context * gguf_init_from_file(const char * fname, struct gguf_init_params params) { + FILE * file = fopen(fname, "rb"); + if (!file) { + return NULL; + } + + // offset from start of file + size_t offset = 0; + + uint32_t magic = 0; + + // check the magic before making allocations + { + gguf_fread_el(file, &magic, sizeof(magic), &offset); + + if (magic != GGUF_MAGIC) { + fprintf(stderr, "%s: invalid magic number %08x\n", __func__, magic); + fclose(file); + return NULL; + } + } + + bool ok = true; + + struct gguf_context * ctx = GGML_ALIGNED_MALLOC(sizeof(struct gguf_context)); + + // read the header + { + ctx->header.magic = magic; + + ctx->kv = NULL; + ctx->infos = NULL; + ctx->data = NULL; + + ok = ok && gguf_fread_el(file, &ctx->header.version, sizeof(ctx->header.version), &offset); + ok = ok && gguf_fread_el(file, &ctx->header.n_tensors, sizeof(ctx->header.n_tensors), &offset); + ok = ok && gguf_fread_el(file, &ctx->header.n_kv, sizeof(ctx->header.n_kv), &offset); + + if (!ok) { + fprintf(stderr, "%s: failed to read header\n", __func__); + fclose(file); + gguf_free(ctx); + return NULL; + } + } + + // read the kv pairs + { + ctx->kv = GGML_ALIGNED_MALLOC(ctx->header.n_kv * sizeof(struct gguf_kv)); + + for (uint32_t i = 0; i < ctx->header.n_kv; ++i) { + struct gguf_kv * kv = &ctx->kv[i]; + + //fprintf(stderr, "%s: reading kv %d\n", __func__, i); + + ok = ok && gguf_fread_str(file, &kv->key, &offset); + //ok = ok && gguf_fread_el (file, &kv->n_bytes, sizeof(kv->n_bytes), &offset); + ok = ok && gguf_fread_el (file, &kv->type, sizeof(kv->type), &offset); + + //fprintf(stderr, "%s: reading kv with key %s\n", __func__, kv->key.data); + + switch (kv->type) { + case GGUF_TYPE_UINT8: ok = ok && gguf_fread_el (file, &kv->value.uint8, sizeof(kv->value.uint8), &offset); break; + case GGUF_TYPE_INT8: ok = ok && gguf_fread_el (file, &kv->value.int8, sizeof(kv->value.int8), &offset); break; + case GGUF_TYPE_UINT16: ok = ok && gguf_fread_el (file, &kv->value.uint16, sizeof(kv->value.uint16), &offset); break; + case GGUF_TYPE_INT16: ok = ok && gguf_fread_el (file, &kv->value.int16, sizeof(kv->value.int16), &offset); break; + case GGUF_TYPE_UINT32: ok = ok && gguf_fread_el (file, &kv->value.uint32, sizeof(kv->value.uint32), &offset); break; + case GGUF_TYPE_INT32: ok = ok && gguf_fread_el (file, &kv->value.int32, sizeof(kv->value.int32), &offset); break; + case GGUF_TYPE_FLOAT32: ok = ok && gguf_fread_el (file, &kv->value.float32, sizeof(kv->value.float32), &offset); break; + case GGUF_TYPE_BOOL: ok = ok && gguf_fread_el (file, &kv->value.bool_, sizeof(kv->value.bool_), &offset); break; + case GGUF_TYPE_STRING: ok = ok && gguf_fread_str(file, &kv->value.str, &offset); break; + case GGUF_TYPE_ARRAY: + { + ok = ok && gguf_fread_el(file, &kv->value.arr.type, sizeof(kv->value.arr.type), &offset); + ok = ok && gguf_fread_el(file, &kv->value.arr.n, sizeof(kv->value.arr.n), &offset); + + switch (kv->value.arr.type) { + case GGUF_TYPE_UINT8: + case GGUF_TYPE_INT8: + case GGUF_TYPE_UINT16: + case GGUF_TYPE_INT16: + case GGUF_TYPE_UINT32: + case GGUF_TYPE_INT32: + case GGUF_TYPE_FLOAT32: + case GGUF_TYPE_BOOL: + { + kv->value.arr.data = malloc(kv->value.arr.n * GGUF_TYPE_SIZE[kv->value.arr.type]); + ok = ok && gguf_fread_el(file, kv->value.arr.data, kv->value.arr.n * GGUF_TYPE_SIZE[kv->value.arr.type], &offset); + } break; + case GGUF_TYPE_STRING: + { + kv->value.arr.data = malloc(kv->value.arr.n * sizeof(struct gguf_str)); + for (uint32_t j = 0; j < kv->value.arr.n; ++j) { + ok = ok && gguf_fread_str(file, &((struct gguf_str *) kv->value.arr.data)[j], &offset); + } + } break; + case GGUF_TYPE_ARRAY: + case GGUF_TYPE_COUNT: GGML_ASSERT(false && "invalid type"); break; + }; + } break; + case GGUF_TYPE_COUNT: GGML_ASSERT(false && "invalid type"); + }; + + if (!ok) { + break; + } + } + + if (!ok) { + fprintf(stderr, "%s: failed to read key-value pairs\n", __func__); + fclose(file); + gguf_free(ctx); + return NULL; + } + } + + // read the tensor infos + { + ctx->infos = GGML_ALIGNED_MALLOC(ctx->header.n_tensors * sizeof(struct gguf_tensor_info)); + + for (uint32_t i = 0; i < ctx->header.n_tensors; ++i) { + struct gguf_tensor_info * info = &ctx->infos[i]; + + for (int j = 0; j < GGML_MAX_DIMS; ++j) { + info->ne[j] = 1; + } + + ok = ok && gguf_fread_str(file, &info->name, &offset); + ok = ok && gguf_fread_el (file, &info->n_dims, sizeof(info->n_dims), &offset); + for (uint32_t j = 0; j < info->n_dims; ++j) { + ok = ok && gguf_fread_el(file, &info->ne[j], sizeof(info->ne[j]), &offset); + } + ok = ok && gguf_fread_el (file, &info->type, sizeof(info->type), &offset); + ok = ok && gguf_fread_el (file, &info->offset, sizeof(info->offset), &offset); + + if (!ok) { + fprintf(stderr, "%s: failed to read tensor info\n", __func__); + fclose(file); + gguf_free(ctx); + return NULL; + } + } + } + + ctx->alignment = GGUF_DEFAULT_ALIGNMENT; + + int alignment_idx = gguf_find_key(ctx, "general.alignment"); + if (alignment_idx != -1) { + ctx->alignment = gguf_get_val_u32(ctx, alignment_idx); + } + + // we require the data section to be aligned, so take into account any padding + { + const size_t offset_pad = offset % ctx->alignment; + + if (offset_pad != 0) { + offset += ctx->alignment - offset_pad; + fseek(file, offset, SEEK_SET); + } + } + + // store the current file offset - this is where the data section starts + ctx->offset = offset; + + // compute the total size of the data section, taking into account the alignment + { + ctx->size = 0; + for (uint32_t i = 0; i < ctx->header.n_tensors; ++i) { + struct gguf_tensor_info * info = &ctx->infos[i]; + + const int64_t ne = + (int64_t) info->ne[0] * + (int64_t) info->ne[1] * + (int64_t) info->ne[2] * + (int64_t) info->ne[3]; + + if (ne % ggml_blck_size(info->type) != 0) { + fprintf(stderr, "%s: tensor '%s' number of elements (%" PRId64 ") is not a multiple of block size (%d)\n", + __func__, info->name.data, ne, ggml_blck_size(info->type)); + fclose(file); + gguf_free(ctx); + return NULL; + } + + const size_t size_cur = (ne*ggml_type_size(info->type))/ggml_blck_size(info->type); + + ctx->size += GGML_PAD(size_cur, ctx->alignment); + } + } + + // load the tensor data only if requested + if (params.ctx != NULL) { + // if the provided gguf_context is no_alloc, then we create "empty" tensors and do not read the binary blob + // otherwise, we load the binary blob into the created ggml_context as well, and point the "data" members of + // the ggml_tensor structs to the appropriate locations in the binary blob + + // compute the exact size needed for the new ggml_context + const size_t mem_size = + params.no_alloc ? + (ctx->header.n_tensors )*ggml_tensor_overhead() : + (ctx->header.n_tensors + 1)*ggml_tensor_overhead() + ctx->size; + + struct ggml_init_params pdata = { + .mem_size = mem_size, + .mem_buffer = NULL, + .no_alloc = params.no_alloc, + }; + + *params.ctx = ggml_init(pdata); + + struct ggml_context * ctx_data = *params.ctx; + + struct ggml_tensor * data = NULL; + + if (params.no_alloc == false) { + data = ggml_new_tensor_1d(ctx_data, GGML_TYPE_I8, ctx->size); + + ok = ok && data != NULL; + + // read the binary blob with the tensor data + ok = ok && gguf_fread_el(file, data->data, ctx->size, &offset); + + if (!ok) { + fprintf(stderr, "%s: failed to read tensor data\n", __func__); + fclose(file); + ggml_free(ctx_data); + gguf_free(ctx); + return NULL; + } + + ctx->data = data->data; + } + + ggml_set_no_alloc(ctx_data, true); + + // create the tensors + for (uint32_t i = 0; i < ctx->header.n_tensors; ++i) { + const int64_t ne[GGML_MAX_DIMS] = { + ctx->infos[i].ne[0], + ctx->infos[i].ne[1], + ctx->infos[i].ne[2], + ctx->infos[i].ne[3], + }; + + struct ggml_tensor * cur = ggml_new_tensor(ctx_data, ctx->infos[i].type, ctx->infos[i].n_dims, ne); + + ok = ok && cur != NULL; + + ggml_set_name(cur, ctx->infos[i].name.data); + + if (!ok) { + break; + } + + // point the data member to the appropriate location in the binary blob using the tensor infos + if (params.no_alloc == false) { + //cur->data = (char *) data->data + ctx->infos[i].offset - ctx->offset; // offset from start of file + cur->data = (char *) data->data + ctx->infos[i].offset; // offset from data + } + } + + if (!ok) { + fprintf(stderr, "%s: failed to read the tensor data\n", __func__); + fclose(file); + ggml_free(ctx_data); + gguf_free(ctx); + return NULL; + } + + ggml_set_no_alloc(ctx_data, params.no_alloc); + } + + fclose(file); + + return ctx; +} + +void gguf_free(struct gguf_context * ctx) { + if (ctx == NULL) { + return; + } + + if (ctx->kv) { + // free string memory - not great.. + for (uint32_t i = 0; i < ctx->header.n_kv; ++i) { + struct gguf_kv * kv = &ctx->kv[i]; + + if (kv->key.data) { + free(kv->key.data); + } + + if (kv->type == GGUF_TYPE_STRING) { + if (kv->value.str.data) { + free(kv->value.str.data); + } + } + + if (kv->type == GGUF_TYPE_ARRAY) { + if (kv->value.arr.data) { + if (kv->value.arr.type == GGUF_TYPE_STRING) { + for (uint32_t j = 0; j < kv->value.arr.n; ++j) { + struct gguf_str * str = &((struct gguf_str *) kv->value.arr.data)[j]; + if (str->data) { + free(str->data); + } + } + } + free(kv->value.arr.data); + } + } + } + + GGML_ALIGNED_FREE(ctx->kv); + } + + if (ctx->infos) { + for (uint32_t i = 0; i < ctx->header.n_tensors; ++i) { + struct gguf_tensor_info * info = &ctx->infos[i]; + + if (info->name.data) { + free(info->name.data); + } + } + + GGML_ALIGNED_FREE(ctx->infos); + } + + GGML_ALIGNED_FREE(ctx); +} + +const char * gguf_type_name(enum gguf_type type) { + return GGUF_TYPE_NAME[type]; +} + +int gguf_get_version(struct gguf_context * ctx) { + return ctx->header.version; +} + +size_t gguf_get_alignment(struct gguf_context * ctx) { + return ctx->alignment; +} + +size_t gguf_get_data_offset(struct gguf_context * ctx) { + return ctx->offset; +} + +void * gguf_get_data(struct gguf_context * ctx) { + return ctx->data; +} + +int gguf_get_n_kv(struct gguf_context * ctx) { + return ctx->header.n_kv; +} + +int gguf_find_key(struct gguf_context * ctx, const char * key) { + // return -1 if key not found + int keyfound = -1; + + const int n_kv = gguf_get_n_kv(ctx); + + for (int i = 0; i < n_kv; ++i) { + if (strcmp(key, gguf_get_key(ctx, i)) == 0) { + keyfound = i; + break; + } + } + + return keyfound; +} + +const char * gguf_get_key(struct gguf_context * ctx, int i) { + return ctx->kv[i].key.data; +} + +enum gguf_type gguf_get_kv_type(struct gguf_context * ctx, int i) { + return ctx->kv[i].type; +} + +enum gguf_type gguf_get_arr_type(struct gguf_context * ctx, int i) { + return ctx->kv[i].value.arr.type; +} + +const void * gguf_get_arr_data(struct gguf_context * ctx, int i) { + return ctx->kv[i].value.arr.data; +} + +const char * gguf_get_arr_str(struct gguf_context * ctx, int key_id, int i) { + struct gguf_kv * kv = &ctx->kv[key_id]; + struct gguf_str * str = &((struct gguf_str *) kv->value.arr.data)[i]; + return str->data; +} + +int gguf_get_arr_n(struct gguf_context * ctx, int i) { + return ctx->kv[i].value.arr.n; +} + +uint8_t gguf_get_val_u8(struct gguf_context * ctx, int i) { + return ctx->kv[i].value.uint8; +} + +int8_t gguf_get_val_i8(struct gguf_context * ctx, int i) { + return ctx->kv[i].value.int8; +} + +uint16_t gguf_get_val_u16(struct gguf_context * ctx, int i) { + return ctx->kv[i].value.uint16; +} + +int16_t gguf_get_val_i16(struct gguf_context * ctx, int i) { + return ctx->kv[i].value.int16; +} + +uint32_t gguf_get_val_u32(struct gguf_context * ctx, int i) { + return ctx->kv[i].value.uint32; +} + +int32_t gguf_get_val_i32(struct gguf_context * ctx, int i) { + return ctx->kv[i].value.int32; +} + +float gguf_get_val_f32(struct gguf_context * ctx, int i) { + return ctx->kv[i].value.float32; +} + +bool gguf_get_val_bool(struct gguf_context * ctx, int i) { + return ctx->kv[i].value.bool_; +} + +const char * gguf_get_val_str (struct gguf_context * ctx, int i) { + return ctx->kv[i].value.str.data; +} + +int gguf_get_n_tensors(struct gguf_context * ctx) { + return ctx->header.n_tensors; +} + +int gguf_find_tensor(struct gguf_context * ctx, const char * name) { + // return -1 if tensor not found + int tensorfound = -1; + + const int n_tensors = gguf_get_n_tensors(ctx); + + for (int i = 0; i < n_tensors; ++i) { + if (strcmp(name, gguf_get_tensor_name(ctx, i)) == 0) { + tensorfound = i; + break; + } + } + + return tensorfound; +} + +size_t gguf_get_tensor_offset(struct gguf_context * ctx, int i) { + return ctx->infos[i].offset; +} + +char * gguf_get_tensor_name(struct gguf_context * ctx, int i) { + return ctx->infos[i].name.data; +} + +// returns the index +static int gguf_get_or_add_key(struct gguf_context * ctx, const char * key) { + const int idx = gguf_find_key(ctx, key); + if (idx >= 0) { + return idx; + } + + const int n_kv = gguf_get_n_kv(ctx); + + ctx->kv = realloc(ctx->kv, (n_kv + 1) * sizeof(struct gguf_kv)); + ctx->kv[n_kv].key.n = strlen(key) + 1; + ctx->kv[n_kv].key.data = strdup(key); + ctx->header.n_kv++; + + return n_kv; +} + +void gguf_set_val_u8(struct gguf_context * ctx, const char * key, uint8_t val) { + const int idx = gguf_get_or_add_key(ctx, key); + + ctx->kv[idx].type = GGUF_TYPE_UINT8; + ctx->kv[idx].value.uint8 = val; +} + +void gguf_set_val_i8(struct gguf_context * ctx, const char * key, int8_t val) { + const int idx = gguf_get_or_add_key(ctx, key); + + ctx->kv[idx].type = GGUF_TYPE_INT8; + ctx->kv[idx].value.int8 = val; +} + +void gguf_set_val_u16(struct gguf_context * ctx, const char * key, uint16_t val) { + const int idx = gguf_get_or_add_key(ctx, key); + + ctx->kv[idx].type = GGUF_TYPE_UINT16; + ctx->kv[idx].value.uint16 = val; +} + +void gguf_set_val_i16(struct gguf_context * ctx, const char * key, int16_t val) { + const int idx = gguf_get_or_add_key(ctx, key); + + ctx->kv[idx].type = GGUF_TYPE_INT16; + ctx->kv[idx].value.int16 = val; +} + +void gguf_set_val_u32(struct gguf_context * ctx, const char * key, uint32_t val) { + const int idx = gguf_get_or_add_key(ctx, key); + + ctx->kv[idx].type = GGUF_TYPE_UINT32; + ctx->kv[idx].value.uint32 = val; +} + +void gguf_set_val_i32(struct gguf_context * ctx, const char * key, int32_t val) { + const int idx = gguf_get_or_add_key(ctx, key); + + ctx->kv[idx].type = GGUF_TYPE_INT32; + ctx->kv[idx].value.int32 = val; +} + +void gguf_set_val_f32(struct gguf_context * ctx, const char * key, float val) { + const int idx = gguf_get_or_add_key(ctx, key); + + ctx->kv[idx].type = GGUF_TYPE_FLOAT32; + ctx->kv[idx].value.float32 = val; +} + +void gguf_set_val_bool(struct gguf_context * ctx, const char * key, bool val) { + const int idx = gguf_get_or_add_key(ctx, key); + + ctx->kv[idx].type = GGUF_TYPE_BOOL; + ctx->kv[idx].value.bool_ = val; +} + +void gguf_set_val_str(struct gguf_context * ctx, const char * key, const char * val) { + const int idx = gguf_get_or_add_key(ctx, key); + + ctx->kv[idx].type = GGUF_TYPE_STRING; + ctx->kv[idx].value.str.n = strlen(val) + 1; + ctx->kv[idx].value.str.data = strdup(val); +} + +void gguf_set_arr_data(struct gguf_context * ctx, const char * key, enum gguf_type type, const void * data, int n) { + const int idx = gguf_get_or_add_key(ctx, key); + + ctx->kv[idx].type = GGUF_TYPE_ARRAY; + ctx->kv[idx].value.arr.type = type; + ctx->kv[idx].value.arr.n = n; + ctx->kv[idx].value.arr.data = malloc(n*GGUF_TYPE_SIZE[type]); + memcpy(ctx->kv[idx].value.arr.data, data, n*GGUF_TYPE_SIZE[type]); +} + +void gguf_set_arr_str(struct gguf_context * ctx, const char * key, const char ** data, int n) { + const int idx = gguf_get_or_add_key(ctx, key); + + ctx->kv[idx].type = GGUF_TYPE_ARRAY; + ctx->kv[idx].value.arr.type = GGUF_TYPE_STRING; + ctx->kv[idx].value.arr.n = n; + ctx->kv[idx].value.arr.data = malloc(n*sizeof(struct gguf_str)); + for (int i = 0; i < n; i++) { + struct gguf_str * str = &((struct gguf_str *)ctx->kv[idx].value.arr.data)[i]; + str->n = strlen(data[i]) + 1; + str->data = strdup(data[i]); + } +} + +// set or add KV pairs from another context +void gguf_set_kv(struct gguf_context * ctx, struct gguf_context * src) { + for (uint32_t i = 0; i < src->header.n_kv; i++) { + switch (src->kv[i].type) { + case GGUF_TYPE_UINT8: gguf_set_val_u8 (ctx, src->kv[i].key.data, src->kv[i].value.uint8); break; + case GGUF_TYPE_INT8: gguf_set_val_i8 (ctx, src->kv[i].key.data, src->kv[i].value.int8); break; + case GGUF_TYPE_UINT16: gguf_set_val_u16 (ctx, src->kv[i].key.data, src->kv[i].value.uint16); break; + case GGUF_TYPE_INT16: gguf_set_val_i16 (ctx, src->kv[i].key.data, src->kv[i].value.int16); break; + case GGUF_TYPE_UINT32: gguf_set_val_u32 (ctx, src->kv[i].key.data, src->kv[i].value.uint32); break; + case GGUF_TYPE_INT32: gguf_set_val_i32 (ctx, src->kv[i].key.data, src->kv[i].value.int32); break; + case GGUF_TYPE_FLOAT32: gguf_set_val_f32 (ctx, src->kv[i].key.data, src->kv[i].value.float32); break; + case GGUF_TYPE_BOOL: gguf_set_val_bool(ctx, src->kv[i].key.data, src->kv[i].value.bool_); break; + case GGUF_TYPE_STRING: gguf_set_val_str (ctx, src->kv[i].key.data, src->kv[i].value.str.data); break; + case GGUF_TYPE_ARRAY: + { + if (src->kv[i].value.arr.type == GGUF_TYPE_STRING) { + const char ** data = malloc(src->kv[i].value.arr.n*sizeof(char *)); + for (uint32_t j = 0; j < src->kv[i].value.arr.n; j++) { + data[j] = ((struct gguf_str *)src->kv[i].value.arr.data)[j].data; + } + gguf_set_arr_str(ctx, src->kv[i].key.data, data, src->kv[i].value.arr.n); + free(data); + } else if (src->kv[i].value.arr.type == GGUF_TYPE_ARRAY) { + GGML_ASSERT(false && "nested arrays not supported"); + } else { + gguf_set_arr_data(ctx, src->kv[i].key.data, src->kv[i].value.arr.type, src->kv[i].value.arr.data, src->kv[i].value.arr.n); + } + } break; + case GGUF_TYPE_COUNT: GGML_ASSERT(false && "invalid type"); break; + } + } +} + +void gguf_add_tensor( + struct gguf_context * ctx, + const struct ggml_tensor * tensor) { + const int idx = ctx->header.n_tensors; + ctx->infos = realloc(ctx->infos, (idx + 1)*sizeof(struct gguf_tensor_info)); + + ctx->infos[idx].name.n = strlen(tensor->name) + 1; + ctx->infos[idx].name.data = strdup(tensor->name); + + for (int i = 0; i < GGML_MAX_DIMS; ++i) { + ctx->infos[idx].ne[i] = 1; + } + + ctx->infos[idx].n_dims = tensor->n_dims; + for (int i = 0; i < tensor->n_dims; i++) { + ctx->infos[idx].ne[i] = tensor->ne[i]; + } + + ctx->infos[idx].type = tensor->type; + ctx->infos[idx].offset = 0; + ctx->infos[idx].data = tensor->data; + ctx->infos[idx].size = ggml_nbytes(tensor); + + if (ctx->header.n_tensors > 0) { + ctx->infos[idx].offset = ctx->infos[idx - 1].offset + GGML_PAD(ctx->infos[idx - 1].size, ctx->alignment); + } + + ctx->header.n_tensors++; +} + +void gguf_set_tensor_type(struct gguf_context * ctx, const char * name, enum ggml_type type) { + const int idx = gguf_find_tensor(ctx, name); + if (idx < 0) { + GGML_ASSERT(false && "tensor not found"); + } + + ctx->infos[idx].type = type; +} + +void gguf_set_tensor_data(struct gguf_context * ctx, const char * name, const void * data, size_t size) { + const int idx = gguf_find_tensor(ctx, name); + if (idx < 0) { + GGML_ASSERT(false && "tensor not found"); + } + + ctx->infos[idx].data = data; + ctx->infos[idx].size = size; + + // update offsets + for (uint32_t i = idx + 1; i < ctx->header.n_tensors; ++i) { + ctx->infos[i].offset = ctx->infos[i - 1].offset + GGML_PAD(ctx->infos[i - 1].size, ctx->alignment); + } +} + +//static void gguf_fwrite_str(FILE * file, const struct gguf_str * val) { +// fwrite(&val->n, sizeof(val->n), 1, file); +// fwrite(val->data, sizeof(char), val->n, file); +//} +// +//static void gguf_fwrite_el(FILE * file, const void * val, size_t size) { +// fwrite(val, sizeof(char), size, file); +//} + +struct gguf_buf { + void * data; + size_t size; + size_t offset; +}; + +static struct gguf_buf gguf_buf_init(size_t size) { + struct gguf_buf buf = { + /*buf.data =*/ size == 0 ? NULL : malloc(size), + /*buf.size =*/ size, + /*buf.offset =*/ 0, + }; + + return buf; +} + +static void gguf_buf_free(struct gguf_buf buf) { + if (buf.data) { + free(buf.data); + } +} + +static void gguf_buf_grow(struct gguf_buf * buf, size_t size) { + if (buf->offset + size > buf->size) { + buf->size = 1.5*(buf->offset + size); + if (buf->data) { + buf->data = realloc(buf->data, buf->size); + } + } +} + +static void gguf_bwrite_str(struct gguf_buf * buf, const struct gguf_str * val) { + gguf_buf_grow(buf, sizeof(val->n) + val->n); + + if (buf->data) { + memcpy((char *) buf->data + buf->offset, &val->n, sizeof(val->n)); + } + buf->offset += sizeof(val->n); + + if (buf->data) { + memcpy((char *) buf->data + buf->offset, val->data, val->n); + } + buf->offset += val->n; +} + +static void gguf_bwrite_el(struct gguf_buf * buf, const void * val, size_t el_size) { + gguf_buf_grow(buf, el_size); + + if (buf->data) { + memcpy((char *) buf->data + buf->offset, val, el_size); + } + buf->offset += el_size; +} + +static void gguf_write_to_buf(struct gguf_context * ctx, struct gguf_buf * buf, bool only_meta) { + // write header + gguf_bwrite_el(buf, &ctx->header.magic, sizeof(ctx->header.magic)); + gguf_bwrite_el(buf, &ctx->header.version, sizeof(ctx->header.version)); + gguf_bwrite_el(buf, &ctx->header.n_tensors, sizeof(ctx->header.n_tensors)); + gguf_bwrite_el(buf, &ctx->header.n_kv, sizeof(ctx->header.n_kv)); + + // write key-value pairs + for (uint32_t i = 0; i < ctx->header.n_kv; ++i) { + struct gguf_kv * kv = &ctx->kv[i]; + + gguf_bwrite_str(buf, &kv->key); + gguf_bwrite_el (buf, &kv->type, sizeof(kv->type)); + + switch (kv->type) { + case GGUF_TYPE_UINT8: gguf_bwrite_el( buf, &kv->value.uint8, sizeof(kv->value.uint8) ); break; + case GGUF_TYPE_INT8: gguf_bwrite_el (buf, &kv->value.int8, sizeof(kv->value.int8) ); break; + case GGUF_TYPE_UINT16: gguf_bwrite_el (buf, &kv->value.uint16, sizeof(kv->value.uint16) ); break; + case GGUF_TYPE_INT16: gguf_bwrite_el (buf, &kv->value.int16, sizeof(kv->value.int16) ); break; + case GGUF_TYPE_UINT32: gguf_bwrite_el (buf, &kv->value.uint32, sizeof(kv->value.uint32) ); break; + case GGUF_TYPE_INT32: gguf_bwrite_el (buf, &kv->value.int32, sizeof(kv->value.int32) ); break; + case GGUF_TYPE_FLOAT32: gguf_bwrite_el (buf, &kv->value.float32, sizeof(kv->value.float32)); break; + case GGUF_TYPE_BOOL: gguf_bwrite_el (buf, &kv->value.bool_, sizeof(kv->value.bool_) ); break; + case GGUF_TYPE_STRING: gguf_bwrite_str(buf, &kv->value.str ); break; + case GGUF_TYPE_ARRAY: + { + gguf_bwrite_el(buf, &kv->value.arr.type, sizeof(kv->value.arr.type)); + gguf_bwrite_el(buf, &kv->value.arr.n, sizeof(kv->value.arr.n) ); + + switch (kv->value.arr.type) { + case GGUF_TYPE_UINT8: + case GGUF_TYPE_INT8: + case GGUF_TYPE_UINT16: + case GGUF_TYPE_INT16: + case GGUF_TYPE_UINT32: + case GGUF_TYPE_INT32: + case GGUF_TYPE_FLOAT32: + case GGUF_TYPE_BOOL: + { + gguf_bwrite_el(buf, kv->value.arr.data, kv->value.arr.n * GGUF_TYPE_SIZE[kv->value.arr.type]); + } break; + case GGUF_TYPE_STRING: + { + for (uint32_t j = 0; j < kv->value.arr.n; ++j) { + gguf_bwrite_str(buf, &((struct gguf_str *) kv->value.arr.data)[j]); + } + } break; + case GGUF_TYPE_ARRAY: + case GGUF_TYPE_COUNT: GGML_ASSERT(false && "invalid type"); break; + }; + } break; + case GGUF_TYPE_COUNT: GGML_ASSERT(false && "invalid type"); + }; + } + + // write tensor infos + for (uint32_t i = 0; i < ctx->header.n_tensors; ++i) { + struct gguf_tensor_info * info = &ctx->infos[i]; + + gguf_bwrite_str(buf, &info->name); + gguf_bwrite_el (buf, &info->n_dims, sizeof(info->n_dims)); + for (uint32_t j = 0; j < info->n_dims; ++j) { + gguf_bwrite_el(buf, &info->ne[j], sizeof(info->ne[j])); + } + gguf_bwrite_el(buf, &info->type, sizeof(info->type)); + gguf_bwrite_el(buf, &info->offset, sizeof(info->offset)); + } + + // we require the data section to be aligned, so take into account any padding + { + const size_t offset = buf->offset; + const size_t offset_pad = GGML_PAD(offset, ctx->alignment); + + if (offset_pad != offset) { + uint8_t pad = 0; + for (size_t i = 0; i < offset_pad - offset; ++i) { + gguf_bwrite_el(buf, &pad, sizeof(pad)); + } + } + } + + if (only_meta) { + return; + } + + size_t offset = 0; + + // write tensor data + for (uint32_t i = 0; i < ctx->header.n_tensors; ++i) { + struct gguf_tensor_info * info = &ctx->infos[i]; + + const size_t size = info->size; + const size_t size_pad = GGML_PAD(size, ctx->alignment); + + gguf_bwrite_el(buf, info->data, size); + + if (size_pad != size) { + uint8_t pad = 0; + for (size_t j = 0; j < size_pad - size; ++j) { + gguf_bwrite_el(buf, &pad, sizeof(pad)); + } + } + + GGML_ASSERT(offset == info->offset); + + offset += size_pad; + } +} + +void gguf_write_to_file(struct gguf_context * ctx, const char * fname, bool only_meta) { + FILE * file = fopen(fname, "wb"); + if (!file) { + GGML_ASSERT(false && "failed to open file for writing"); + } + + struct gguf_buf buf = gguf_buf_init(16*1024); + + gguf_write_to_buf(ctx, &buf, only_meta); + + fwrite(buf.data, 1, buf.offset, file); + + gguf_buf_free(buf); + + fclose(file); +} + +size_t gguf_get_meta_size(struct gguf_context * ctx) { + // no allocs - only compute size + struct gguf_buf buf = gguf_buf_init(0); + + gguf_write_to_buf(ctx, &buf, true); + + return buf.offset; +} + +void gguf_get_meta_data(struct gguf_context * ctx, void * data) { + struct gguf_buf buf = gguf_buf_init(16*1024); + + gguf_write_to_buf(ctx, &buf, true); + + memcpy(data, buf.data, buf.offset); + + gguf_buf_free(buf); +} + +//////////////////////////////////////////////////////////////////////////////// + int ggml_cpu_has_avx(void) { #if defined(__AVX__) return 1; diff --git a/ggml.h b/ggml.h index 3a946dbdc..544ad2d11 100644 --- a/ggml.h +++ b/ggml.h @@ -207,14 +207,18 @@ #define GGML_MAX_PARAMS 256 #define GGML_MAX_CONTEXTS 64 #define GGML_MAX_SRC 6 -#define GGML_MAX_NAME 48 +#define GGML_MAX_NAME 64 #define GGML_MAX_OP_PARAMS 32 #define GGML_DEFAULT_N_THREADS 4 - #define GGML_EXIT_SUCCESS 0 #define GGML_EXIT_ABORTED 1 +#define GGUF_MAGIC 0x46554747 // "GGUF" +#define GGUF_VERSION 1 + +#define GGUF_DEFAULT_ALIGNMENT 32 + #define GGML_UNUSED(x) (void)(x) #define GGML_PAD(x, n) (((x) + (n) - 1) & ~((n) - 1)) @@ -562,6 +566,7 @@ extern "C" { GGML_API int64_t ggml_nelements (const struct ggml_tensor * tensor); GGML_API int64_t ggml_nrows (const struct ggml_tensor * tensor); GGML_API size_t ggml_nbytes (const struct ggml_tensor * tensor); + GGML_API size_t ggml_nbytes_pad (const struct ggml_tensor * tensor); // same as ggml_nbytes() but padded to GGML_MEM_ALIGN GGML_API size_t ggml_nbytes_split(const struct ggml_tensor * tensor, int nrows_split); GGML_API int ggml_blck_size (enum ggml_type type); @@ -1494,7 +1499,6 @@ extern "C" { struct ggml_context * ctx, struct ggml_tensor * tensor); - GGML_API void ggml_build_forward_expand(struct ggml_cgraph * cgraph, struct ggml_tensor * tensor); GGML_API struct ggml_cgraph ggml_build_forward (struct ggml_tensor * tensor); @@ -1703,6 +1707,118 @@ extern "C" { GGML_API size_t ggml_quantize_chunk(enum ggml_type type, const float * src, void * dst, int start, int n, int64_t * hist); + // + // gguf + // + + enum gguf_type { + GGUF_TYPE_UINT8 = 0, + GGUF_TYPE_INT8 = 1, + GGUF_TYPE_UINT16 = 2, + GGUF_TYPE_INT16 = 3, + GGUF_TYPE_UINT32 = 4, + GGUF_TYPE_INT32 = 5, + GGUF_TYPE_FLOAT32 = 6, + GGUF_TYPE_BOOL = 7, + GGUF_TYPE_STRING = 8, + GGUF_TYPE_ARRAY = 9, + GGUF_TYPE_COUNT, // marks the end of the enum + }; + + struct gguf_context; + + struct gguf_init_params { + bool no_alloc; + + // if not NULL, create a ggml_context and allocate the tensor data in it + struct ggml_context ** ctx; + }; + + GGML_API struct gguf_context * gguf_init_empty(void); + GGML_API struct gguf_context * gguf_init_from_file(const char * fname, struct gguf_init_params params); + //GGML_API struct gguf_context * gguf_init_from_buffer(..); + + GGML_API void gguf_free(struct gguf_context * ctx); + + GGML_API const char * gguf_type_name(enum gguf_type type); + + GGML_API int gguf_get_version (struct gguf_context * ctx); + GGML_API size_t gguf_get_alignment (struct gguf_context * ctx); + GGML_API size_t gguf_get_data_offset(struct gguf_context * ctx); + GGML_API void * gguf_get_data (struct gguf_context * ctx); + + GGML_API int gguf_get_n_kv(struct gguf_context * ctx); + GGML_API int gguf_find_key(struct gguf_context * ctx, const char * key); + GGML_API const char * gguf_get_key (struct gguf_context * ctx, int i); + + GGML_API enum gguf_type gguf_get_kv_type (struct gguf_context * ctx, int i); + GGML_API enum gguf_type gguf_get_arr_type(struct gguf_context * ctx, int i); + + // results are undefined if the wrong type is used for the key + GGML_API uint8_t gguf_get_val_u8 (struct gguf_context * ctx, int i); + GGML_API int8_t gguf_get_val_i8 (struct gguf_context * ctx, int i); + GGML_API uint16_t gguf_get_val_u16 (struct gguf_context * ctx, int i); + GGML_API int16_t gguf_get_val_i16 (struct gguf_context * ctx, int i); + GGML_API uint32_t gguf_get_val_u32 (struct gguf_context * ctx, int i); + GGML_API int32_t gguf_get_val_i32 (struct gguf_context * ctx, int i); + GGML_API float gguf_get_val_f32 (struct gguf_context * ctx, int i); + GGML_API bool gguf_get_val_bool(struct gguf_context * ctx, int i); + GGML_API const char * gguf_get_val_str (struct gguf_context * ctx, int i); + GGML_API int gguf_get_arr_n (struct gguf_context * ctx, int i); + GGML_API const void * gguf_get_arr_data(struct gguf_context * ctx, int i); + GGML_API const char * gguf_get_arr_str (struct gguf_context * ctx, int key_id, int i); + + GGML_API int gguf_get_n_tensors (struct gguf_context * ctx); + GGML_API int gguf_find_tensor (struct gguf_context * ctx, const char * name); + GGML_API size_t gguf_get_tensor_offset(struct gguf_context * ctx, int i); + GGML_API char * gguf_get_tensor_name (struct gguf_context * ctx, int i); + + // overrides existing values or adds a new one + GGML_API void gguf_set_val_u8 (struct gguf_context * ctx, const char * key, uint8_t val); + GGML_API void gguf_set_val_i8 (struct gguf_context * ctx, const char * key, int8_t val); + GGML_API void gguf_set_val_u16 (struct gguf_context * ctx, const char * key, uint16_t val); + GGML_API void gguf_set_val_i16 (struct gguf_context * ctx, const char * key, int16_t val); + GGML_API void gguf_set_val_u32 (struct gguf_context * ctx, const char * key, uint32_t val); + GGML_API void gguf_set_val_i32 (struct gguf_context * ctx, const char * key, int32_t val); + GGML_API void gguf_set_val_f32 (struct gguf_context * ctx, const char * key, float val); + GGML_API void gguf_set_val_bool(struct gguf_context * ctx, const char * key, bool val); + GGML_API void gguf_set_val_str (struct gguf_context * ctx, const char * key, const char * val); + GGML_API void gguf_set_arr_data(struct gguf_context * ctx, const char * key, enum gguf_type type, const void * data, int n); + GGML_API void gguf_set_arr_str (struct gguf_context * ctx, const char * key, const char ** data, int n); + + // set or add KV pairs from another context + GGML_API void gguf_set_kv(struct gguf_context * ctx, struct gguf_context * src); + + // manage tensor info + GGML_API void gguf_add_tensor(struct gguf_context * ctx, const struct ggml_tensor * tensor); + GGML_API void gguf_set_tensor_type(struct gguf_context * ctx, const char * name, enum ggml_type type); + GGML_API void gguf_set_tensor_data(struct gguf_context * ctx, const char * name, const void * data, size_t size); + + // writing gguf files can be done in 2 ways: + // + // - write the entire gguf_context to a binary file in a single pass: + // + // gguf_write_to_file(ctx, fname); + // + // - first prepare a file with a placeholder for the meta data, write the tensor data, then write the meta data: + // + // FILE * f = fopen(fname, "wb"); + // fseek(f, gguf_get_meta_size(ctx), SEEK_SET); + // fwrite(f, ...); + // void * data = gguf_meta_get_meta_data(ctx); + // fseek(f, 0, SEEK_SET); + // fwrite(f, data, gguf_get_meta_size(ctx)); + // free(data); + // fclose(f); + // + + // write the entire context to a binary file + GGML_API void gguf_write_to_file(struct gguf_context * ctx, const char * fname, bool only_meta); + + // get the size in bytes of the meta data (header, kv pairs, tensor info) including padding + GGML_API size_t gguf_get_meta_size(struct gguf_context * ctx); + GGML_API void gguf_get_meta_data(struct gguf_context * ctx, void * data); + // // system info // diff --git a/gguf.py b/gguf.py new file mode 100644 index 000000000..9776649c7 --- /dev/null +++ b/gguf.py @@ -0,0 +1,718 @@ +import shutil +import sys +import struct +import tempfile +import numpy as np + +from enum import IntEnum, auto +from typing import Any, IO, List, Optional + +# +# constants +# + +GGUF_MAGIC = 0x46554747 +GGUF_VERSION = 1 +GGUF_DEFAULT_ALIGNMENT = 32 + +# general +KEY_GENERAL_ARCHITECTURE = "general.architecture" +KEY_GENERAL_QUANTIZATION_VERSION = "general.quantization_version" +KEY_GENERAL_ALIGNMENT = "general.alignment" +KEY_GENERAL_NAME = "general.name" +KEY_GENERAL_AUTHOR = "general.author" +KEY_GENERAL_URL = "general.url" +KEY_GENERAL_DESCRIPTION = "general.description" +KEY_GENERAL_LICENSE = "general.license" +KEY_GENERAL_SOURCE_URL = "general.source.url" +KEY_GENERAL_SOURCE_HF_REPO = "general.source.hugginface.repository" + +# LLM +KEY_LLM_CONTEXT_LENGTH = "{arch}.context_length" +KEY_LLM_EMBEDDING_LENGTH = "{arch}.embedding_length" +KEY_LLM_BLOCK_COUNT = "{arch}.block_count" +KEY_LLM_FEED_FORWARD_LENGTH = "{arch}.feed_forward_length" +KEY_LLM_USE_PARALLEL_RESIDUAL = "{arch}.use_parallel_residual" +KEY_LLM_TENSOR_DATA_LAYOUT = "{arch}.tensor_data_layout" + +# attention +KEY_ATTENTION_HEAD_COUNT = "{arch}.attention.head_count" +KEY_ATTENTION_HEAD_COUNT_KV = "{arch}.attention.head_count_kv" +KEY_ATTENTION_MAX_ALIBI_BIAS = "{arch}.attention.max_alibi_bias" +KEY_ATTENTION_CLAMP_KQV = "{arch}.attention.clamp_kqv" +KEY_ATTENTION_LAYERNORM_EPS = "{arch}.attention.layer_norm_epsilon" +KEY_ATTENTION_LAYERNORM_RMS_EPS = "{arch}.attention.layer_norm_rms_epsilon" + +# RoPE +KEY_ROPE_DIMENSION_COUNT = "{arch}.rope.dimension_count" +KEY_ROPE_SCALE_LINEAR = "{arch}.rope.scale_linear" + +# tokenization +KEY_TOKENIZER_MODEL = "tokenizer.ggml.model" +KEY_TOKENIZER_LIST = "tokenizer.ggml.tokens" +KEY_TOKENIZER_TOKEN_TYPE = "tokenizer.ggml.token_type" +KEY_TOKENIZER_SCORES = "tokenizer.ggml.scores" +KEY_TOKENIZER_MERGES = "tokenizer.ggml.merges" +KEY_TOKENIZER_BOS_ID = "tokenizer.ggml.bos_token_id" +KEY_TOKENIZER_EOS_ID = "tokenizer.ggml.eos_token_id" +KEY_TOKENIZER_UNK_ID = "tokenizer.ggml.unknown_token_id" +KEY_TOKENIZER_SEP_ID = "tokenizer.ggml.seperator_token_id" +KEY_TOKENIZER_PAD_ID = "tokenizer.ggml.padding_token_id" +KEY_TOKENIZER_HF_JSON = "tokenizer.huggingface.json" +KEY_TOKENIZER_RWKV = "tokenizer.rwkv.world" + + +# +# recommended mapping of model tensor names for storage in gguf +# + + +class MODEL_ARCH(IntEnum): + LLAMA = auto() + FALCON = auto() + GPT2 = auto() + GPTJ = auto() + GPTNEOX = auto() + MPT = auto() + + +class MODEL_TENSOR(IntEnum): + TOKEN_EMBD = auto() + POS_EMBD = auto() + OUTPUT = auto() + OUTPUT_NORM = auto() + ROPE_FREQS = auto() + ATTN_Q = auto() + ATTN_K = auto() + ATTN_V = auto() + ATTN_QKV = auto() + ATTN_OUT = auto() + ATTN_NORM = auto() + ATTN_NORM_2 = auto() + ATTN_ROT_EMBD = auto() + FFN_GATE = auto() + FFN_DOWN = auto() + FFN_UP = auto() + FFN_NORM = auto() + + +MODEL_ARCH_NAMES = { + MODEL_ARCH.LLAMA: "llama", + MODEL_ARCH.FALCON: "falcon", + MODEL_ARCH.GPT2: "gpt2", + MODEL_ARCH.GPTJ: "gptj", + MODEL_ARCH.GPTNEOX: "gptneox", + MODEL_ARCH.MPT: "mpt", +} + +MODEL_TENSOR_NAMES = { + MODEL_ARCH.LLAMA: { + MODEL_TENSOR.TOKEN_EMBD: "token_embd", + MODEL_TENSOR.OUTPUT_NORM: "output_norm", + MODEL_TENSOR.OUTPUT: "output", + MODEL_TENSOR.ROPE_FREQS: "rope_freqs", + MODEL_TENSOR.ATTN_NORM: "blk.{bid}.attn_norm", + MODEL_TENSOR.ATTN_Q: "blk.{bid}.attn_q", + MODEL_TENSOR.ATTN_K: "blk.{bid}.attn_k", + MODEL_TENSOR.ATTN_V: "blk.{bid}.attn_v", + MODEL_TENSOR.ATTN_OUT: "blk.{bid}.attn_output", + MODEL_TENSOR.ATTN_ROT_EMBD: "blk.{bid}.attn_rot_embd", + MODEL_TENSOR.FFN_NORM: "blk.{bid}.ffn_norm", + MODEL_TENSOR.FFN_GATE: "blk.{bid}.ffn_gate", + MODEL_TENSOR.FFN_DOWN: "blk.{bid}.ffn_down", + MODEL_TENSOR.FFN_UP: "blk.{bid}.ffn_up", + }, + MODEL_ARCH.GPTNEOX: { + MODEL_TENSOR.TOKEN_EMBD: "token_embd", + MODEL_TENSOR.OUTPUT_NORM: "output_norm", + MODEL_TENSOR.OUTPUT: "output", + MODEL_TENSOR.ATTN_NORM: "blk.{bid}.attn_norm", + MODEL_TENSOR.ATTN_QKV: "blk.{bid}.attn_qkv", + MODEL_TENSOR.ATTN_OUT: "blk.{bid}.attn_output", + MODEL_TENSOR.FFN_NORM: "blk.{bid}.ffn_norm", + MODEL_TENSOR.FFN_DOWN: "blk.{bid}.ffn_down", + MODEL_TENSOR.FFN_UP: "blk.{bid}.ffn_up", + }, + MODEL_ARCH.FALCON: { + MODEL_TENSOR.TOKEN_EMBD: "token_embd", + MODEL_TENSOR.OUTPUT_NORM: "output_norm", + MODEL_TENSOR.OUTPUT: "output", + MODEL_TENSOR.ATTN_NORM: "blk.{bid}.attn_norm", + MODEL_TENSOR.ATTN_NORM_2: "blk.{bid}.attn_norm_2", + MODEL_TENSOR.ATTN_QKV: "blk.{bid}.attn_qkv", + MODEL_TENSOR.ATTN_OUT: "blk.{bid}.attn_output", + MODEL_TENSOR.FFN_DOWN: "blk.{bid}.ffn_down", + MODEL_TENSOR.FFN_UP: "blk.{bid}.ffn_up", + }, + MODEL_ARCH.GPT2: { + # TODO + }, + # TODO +} + +# tensors that will not be serialized +MODEL_TENSOR_SKIP = { + MODEL_ARCH.LLAMA: [ + MODEL_TENSOR.ROPE_FREQS, + MODEL_TENSOR.ATTN_ROT_EMBD, + ], +} + + +# TODO: the following helper functions should be removed +# instead, get_tensor_name_map should return tuples of (name, MODEL_TENSOR) +# however, my Python is very bad, and I couldn't figure out how to do this, hence these functions +# REMOVE +def should_skip_tensor_TMP(arch: MODEL_ARCH, n_blocks: int, name: str) -> bool: + for skip in MODEL_TENSOR_SKIP.get(arch, []): + for i in range(n_blocks): + if name == MODEL_TENSOR_NAMES[arch][skip].format(bid=i): + return True + + return False + + +def get_tensor_name_map(arch: MODEL_ARCH, n_blocks: int) -> dict: + tensor_map = {} + + # Token embeddings + mapped_to = MODEL_TENSOR_NAMES[arch].get(MODEL_TENSOR.TOKEN_EMBD, None) + + tensor_map["gpt_neox.embed_in"] = mapped_to # gptneox + tensor_map["transformer.wte"] = mapped_to # gpt2 mpt + tensor_map["transformer.word_embeddings"] = mapped_to # falcon + tensor_map["model.embed_tokens"] = mapped_to # llama-hf + tensor_map["tok_embeddings"] = mapped_to # llama-pth + + # Position embeddings + mapped_to = MODEL_TENSOR_NAMES[arch].get(MODEL_TENSOR.POS_EMBD, None) + + tensor_map["transformer.wpe"] = mapped_to # gpt2 + + # Output + mapped_to = MODEL_TENSOR_NAMES[arch].get(MODEL_TENSOR.OUTPUT, None) + + tensor_map["embed_out"] = mapped_to # gptneox + tensor_map["lm_head"] = mapped_to # gpt2 mpt falcon llama-hf + tensor_map["output"] = mapped_to # llama-pth + + # Output norm + mapped_to = MODEL_TENSOR_NAMES[arch].get(MODEL_TENSOR.OUTPUT_NORM, None) + + tensor_map["gpt_neox.final_layer_norm"] = mapped_to # gptneox + tensor_map["transformer.ln_f"] = mapped_to # gpt2 falcon + tensor_map["transformer.norm_f"] = mapped_to # mpt + tensor_map["model.norm"] = mapped_to # llama-hf + tensor_map["norm"] = mapped_to # llama-pth + + # Rope frequencies + mapped_to = MODEL_TENSOR_NAMES[arch].get(MODEL_TENSOR.ROPE_FREQS, None) + + tensor_map["rope.freqs"] = mapped_to # llama-pth + + # Attention and feed-forward blocks + for i in range(0, n_blocks): + # Attention norm + # TODO: is there are simpler way to write these 2 lines in Python? + mapped_to = MODEL_TENSOR_NAMES[arch].get(MODEL_TENSOR.ATTN_NORM, None) + mapped_to = mapped_to.format(bid=i) if mapped_to else None + + tensor_map["gpt_neox.layers."+str(i)+".input_layernorm"] = mapped_to # gptneox + tensor_map["transformer.h."+str(i)+".ln_1"] = mapped_to # gpt2 + tensor_map["transformer.blocks."+str(i)+".norm_1"] = mapped_to # mpt + tensor_map["transformer.h."+str(i)+".input_layernorm"] = mapped_to # falcon7b + tensor_map["transformer.h."+str(i)+".ln_mlp"] = mapped_to # falcon40b + tensor_map["model.layers."+str(i)+".input_layernorm"] = mapped_to # llama-hf + tensor_map["layers."+str(i)+".attention_norm"] = mapped_to # llama-pth + + # Attention norm 2 + mapped_to = MODEL_TENSOR_NAMES[arch].get(MODEL_TENSOR.ATTN_NORM_2, None) + mapped_to = mapped_to.format(bid=i) if mapped_to is not None else None + + tensor_map["transformer.h."+str(i)+".ln_attn"] = mapped_to # falcon40b + + # Attention query-key-value + mapped_to = MODEL_TENSOR_NAMES[arch].get(MODEL_TENSOR.ATTN_QKV, None) + mapped_to = mapped_to.format(bid=i) if mapped_to is not None else None + + tensor_map["gpt_neox.layers."+str(i)+".attention.query_key_value"] = mapped_to # gptneox + tensor_map["transformer.h."+str(i)+".attn.c_attn"] = mapped_to # gpt2 + tensor_map["transformer.blocks."+str(i)+".attn.Wqkv"] = mapped_to # mpt + tensor_map["transformer.h."+str(i)+".self_attention.query_key_value"] = mapped_to # falcon + + # Attention query + mapped_to = MODEL_TENSOR_NAMES[arch].get(MODEL_TENSOR.ATTN_Q, None) + mapped_to = mapped_to.format(bid=i) if mapped_to is not None else None + + tensor_map["model.layers."+str(i)+".self_attn.q_proj"] = mapped_to # llama-hf + tensor_map["layers."+str(i)+".attention.wq"] = mapped_to # llama-pth + + # Attention key + mapped_to = MODEL_TENSOR_NAMES[arch].get(MODEL_TENSOR.ATTN_K, None) + mapped_to = mapped_to.format(bid=i) if mapped_to is not None else None + + tensor_map["model.layers."+str(i)+".self_attn.k_proj"] = mapped_to # llama-hf + tensor_map["layers."+str(i)+".attention.wk"] = mapped_to # llama-pth + + # Attention value + mapped_to = MODEL_TENSOR_NAMES[arch].get(MODEL_TENSOR.ATTN_V, None) + mapped_to = mapped_to.format(bid=i) if mapped_to is not None else None + + tensor_map["model.layers."+str(i)+".self_attn.v_proj"] = mapped_to # llama-hf + tensor_map["layers."+str(i)+".attention.wv"] = mapped_to # llama-pth + + # Attention output + mapped_to = MODEL_TENSOR_NAMES[arch].get(MODEL_TENSOR.ATTN_OUT, None) + mapped_to = mapped_to.format(bid=i) if mapped_to is not None else None + + tensor_map["gpt_neox.layers."+str(i)+".attention.dense"] = mapped_to # gptneox + tensor_map["transformer.h."+str(i)+".attn.c_proj"] = mapped_to # gpt2 + tensor_map["transformer.blocks."+str(i)+".attn.out_proj"] = mapped_to # mpt + tensor_map["transformer.h."+str(i)+".self_attention.dense"] = mapped_to # falcon + tensor_map["model.layers."+str(i)+".self_attn.o_proj"] = mapped_to # llama-hf + tensor_map["layers."+str(i)+".attention.wo"] = mapped_to # llama-pth + + # Rotary embeddings + mapped_to = MODEL_TENSOR_NAMES[arch].get(MODEL_TENSOR.ATTN_ROT_EMBD, None) + mapped_to = mapped_to.format(bid=i) if mapped_to is not None else None + + tensor_map["model.layers."+str(i)+".self_attn.rotary_emb.inv_freq"] = mapped_to # llama-hf + tensor_map["layers."+str(i)+".attention.inner_attention.rope.freqs"] = mapped_to # llama-pth + + # Feed-forward norm + mapped_to = MODEL_TENSOR_NAMES[arch].get(MODEL_TENSOR.FFN_NORM, None) + mapped_to = mapped_to.format(bid=i) if mapped_to is not None else None + + tensor_map["gpt_neox.layers."+str(i)+".post_attention_layernorm"] = mapped_to # gptneox + tensor_map["transformer.h."+str(i)+".ln_2"] = mapped_to # gpt2 + tensor_map["transformer.blocks."+str(i)+".norm_2"] = mapped_to # mpt + tensor_map["model.layers."+str(i)+".post_attention_layernorm"] = mapped_to # llama-hf + tensor_map["layers."+str(i)+".ffn_norm"] = mapped_to # llama-pth + + # Feed-forward up + mapped_to = MODEL_TENSOR_NAMES[arch].get(MODEL_TENSOR.FFN_UP, None) + mapped_to = mapped_to.format(bid=i) if mapped_to is not None else None + + tensor_map["gpt_neox.layers."+str(i)+".mlp.dense_h_to_4h"] = mapped_to # gptneox + tensor_map["transformer.h."+str(i)+".mlp.c_fc"] = mapped_to # gpt2 + tensor_map["transformer.blocks."+str(i)+".ffn.up_proj"] = mapped_to # mpt + tensor_map["transformer.h."+str(i)+".mlp.dense_h_to_4h"] = mapped_to # falcon + tensor_map["model.layers."+str(i)+".mlp.up_proj"] = mapped_to # llama-hf + tensor_map["layers."+str(i)+".feed_forward.w3"] = mapped_to # llama-pth + + # Feed-forward gate + mapped_to = MODEL_TENSOR_NAMES[arch].get(MODEL_TENSOR.FFN_GATE, None) + mapped_to = mapped_to.format(bid=i) if mapped_to is not None else None + + tensor_map["model.layers."+str(i)+".mlp.gate_proj"] = mapped_to # llama-hf + tensor_map["layers."+str(i)+".feed_forward.w1"] = mapped_to # llama-pth + + # Feed-forward down + mapped_to = MODEL_TENSOR_NAMES[arch].get(MODEL_TENSOR.FFN_DOWN, None) + mapped_to = mapped_to.format(bid=i) if mapped_to is not None else None + + tensor_map["gpt_neox.layers."+str(i)+".mlp.dense_4h_to_h"] = mapped_to # gptneox + tensor_map["transformer.h."+str(i)+".mlp.c_proj"] = mapped_to # gpt2 + tensor_map["transformer.blocks."+str(i)+".ffn.down_proj"] = mapped_to # mpt + tensor_map["transformer.h."+str(i)+".mlp.dense_4h_to_h"] = mapped_to # falcon + tensor_map["model.layers."+str(i)+".mlp.down_proj"] = mapped_to # llama-hf + tensor_map["layers."+str(i)+".feed_forward.w2"] = mapped_to # llama-pth + + return tensor_map + + +class TokenType(IntEnum): + NORMAL = 1 + UNKNOWN = 2 + CONTROL = 3 + USER_DEFINED = 4 + UNUSED = 5 + BYTE = 6 + +# +# implementation +# + + +class GGMLQuantizationType(IntEnum): + F32 = 0 + F16 = 1 + Q4_0 = 2 + Q4_1 = 3 + Q5_0 = 6 + Q5_1 = 7 + Q8_0 = 8 + Q8_1 = 9 + Q2_K = 10 + Q3_K = 11 + Q4_K = 12 + Q5_K = 13 + Q6_K = 14 + Q8_K = 15 + + +class GGUFValueType(IntEnum): + UINT8 = 0 + INT8 = 1 + UINT16 = 2 + INT16 = 3 + UINT32 = 4 + INT32 = 5 + FLOAT32 = 6 + BOOL = 7 + STRING = 8 + ARRAY = 9 + + @staticmethod + def get_type(val): + if isinstance(val, str) or isinstance(val, bytes) or isinstance(val, bytearray): + return GGUFValueType.STRING + elif isinstance(val, list): + return GGUFValueType.ARRAY + elif isinstance(val, float): + return GGUFValueType.FLOAT32 + elif isinstance(val, bool): + return GGUFValueType.BOOL + elif isinstance(val, int): + return GGUFValueType.INT32 + else: + print("Unknown type: "+str(type(val))) + sys.exit() + + +class GGUFWriter: + def __init__(self, path: str, arch: str, use_temp_file = True): + self.fout = open(path, "wb") + self.arch = arch + self.offset_tensor = 0 + self.data_alignment = GGUF_DEFAULT_ALIGNMENT + self.kv_data = b"" + self.kv_data_count = 0 + self.ti_data = b"" + self.ti_data_count = 0 + self.add_architecture() + self.use_temp_file = use_temp_file + self.tensors = [] + + def write_header_to_file(self): + self.fout.write(struct.pack(" int: + return ((x + n - 1) // n) * n + + def add_tensor_info(self, name: str, tensor_shape: np.ndarray, tensor_dtype: np.dtype, tensor_nbytes: int, raw_dtype: Optional[GGMLQuantizationType] = None): + assert raw_dtype is not None or tensor_dtype in (np.float32, np.float16), "Only F32 and F16 tensors are supported for now" + + encoded_name = name.encode("utf8") + self.ti_data += struct.pack(" -#include -#include -#include -#include -#include -#include - -#include -#include -#include - -#ifdef __has_include - #if __has_include() - #include - #if defined(_POSIX_MAPPED_FILES) - #include - #endif - #if defined(_POSIX_MEMLOCK_RANGE) - #include - #endif - #endif -#endif - -#if defined(_WIN32) - #define WIN32_LEAN_AND_MEAN - #ifndef NOMINMAX - #define NOMINMAX - #endif - #include - #include - #include // for _fseeki64 -#endif - -#define LLAMA_ASSERT(x) \ - do { \ - if (!(x)) { \ - fprintf(stderr, "LLAMA_ASSERT: %s:%d: %s\n", __FILE__, __LINE__, #x); \ - abort(); \ - } \ - } while (0) - -#ifdef __GNUC__ -#ifdef __MINGW32__ -__attribute__((format(gnu_printf, 1, 2))) -#else -__attribute__((format(printf, 1, 2))) -#endif -#endif -static std::string format(const char * fmt, ...) { - va_list ap, ap2; - va_start(ap, fmt); - va_copy(ap2, ap); - int size = vsnprintf(NULL, 0, fmt, ap); - LLAMA_ASSERT(size >= 0 && size < INT_MAX); - std::vector buf(size + 1); - int size2 = vsnprintf(buf.data(), size + 1, fmt, ap2); - LLAMA_ASSERT(size2 == size); - va_end(ap2); - va_end(ap); - return std::string(buf.data(), size); -} - -struct llama_file { - // use FILE * so we don't have to re-open the file to mmap - FILE * fp; - size_t size; - - llama_file(const char * fname, const char * mode) { - fp = std::fopen(fname, mode); - if (fp == NULL) { - throw std::runtime_error(format("failed to open %s: %s", fname, strerror(errno))); - } - seek(0, SEEK_END); - size = tell(); - seek(0, SEEK_SET); - } - - size_t tell() const { -#ifdef _WIN32 - __int64 ret = _ftelli64(fp); -#else - long ret = std::ftell(fp); -#endif - LLAMA_ASSERT(ret != -1); // this really shouldn't fail - return (size_t) ret; - } - - void seek(size_t offset, int whence) { -#ifdef _WIN32 - int ret = _fseeki64(fp, (__int64) offset, whence); -#else - int ret = std::fseek(fp, (long) offset, whence); -#endif - LLAMA_ASSERT(ret == 0); // same - } - - void read_raw(void * ptr, size_t len) const { - if (len == 0) { - return; - } - errno = 0; - std::size_t ret = std::fread(ptr, len, 1, fp); - if (ferror(fp)) { - throw std::runtime_error(format("read error: %s", strerror(errno))); - } - if (ret != 1) { - throw std::runtime_error(std::string("unexpectedly reached end of file")); - } - } - - std::uint32_t read_u32() { - std::uint32_t ret; - read_raw(&ret, sizeof(ret)); - return ret; - } - - std::string read_string(std::uint32_t len) { - std::vector chars(len); - read_raw(chars.data(), len); - return std::string(chars.data(), len); - } - - void write_raw(const void * ptr, size_t len) const { - if (len == 0) { - return; - } - errno = 0; - size_t ret = std::fwrite(ptr, len, 1, fp); - if (ret != 1) { - throw std::runtime_error(format("write error: %s", strerror(errno))); - } - } - - void write_u32(std::uint32_t val) { - write_raw(&val, sizeof(val)); - } - - ~llama_file() { - if (fp) { - std::fclose(fp); - } - } -}; - -// llama_context_data -struct llama_data_context { - virtual void write(const void * src, size_t size) = 0; - virtual size_t get_size_written() = 0; - virtual ~llama_data_context() = default; -}; - -struct llama_data_buffer_context : llama_data_context { - uint8_t* ptr; - size_t size_written = 0; - - llama_data_buffer_context(uint8_t * p) : ptr(p) {} - - void write(const void * src, size_t size) override { - memcpy(ptr, src, size); - ptr += size; - size_written += size; - } - - size_t get_size_written() override { - return size_written; - } -}; - -struct llama_data_file_context : llama_data_context { - llama_file* file; - size_t size_written = 0; - - llama_data_file_context(llama_file * f) : file(f) {} - - void write(const void * src, size_t size) override { - file->write_raw(src, size); - size_written += size; - } - - size_t get_size_written() override { - return size_written; - } -}; - -#if defined(_WIN32) -static std::string llama_format_win_err(DWORD err) { - LPSTR buf; - size_t size = FormatMessageA(FORMAT_MESSAGE_ALLOCATE_BUFFER | FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_IGNORE_INSERTS, - NULL, err, MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT), (LPSTR)&buf, 0, NULL); - if (!size) { - return "FormatMessageA failed"; - } - std::string ret(buf, size); - LocalFree(buf); - return ret; -} -#endif - -struct llama_mmap { - void * addr; - size_t size; - - llama_mmap(const llama_mmap &) = delete; - -#ifdef _POSIX_MAPPED_FILES - static constexpr bool SUPPORTED = true; - - llama_mmap(struct llama_file * file, size_t prefetch = (size_t) -1 /* -1 = max value */, bool numa = false) { - size = file->size; - int fd = fileno(file->fp); - int flags = MAP_SHARED; - // prefetch/readahead impairs performance on NUMA systems - if (numa) { prefetch = 0; } -#ifdef __linux__ - if (prefetch >= file->size) { flags |= MAP_POPULATE; } -#endif - addr = mmap(NULL, file->size, PROT_READ, flags, fd, 0); - if (addr == MAP_FAILED) { - throw std::runtime_error(format("mmap failed: %s", strerror(errno))); - } - - if (prefetch > 0) { - // Advise the kernel to preload the mapped memory - if (madvise(addr, std::min(file->size, prefetch), MADV_WILLNEED)) { - fprintf(stderr, "warning: madvise(.., MADV_WILLNEED) failed: %s\n", - strerror(errno)); - } - } - if (numa) { - // advise the kernel not to use readahead - // (because the next page might not belong on the same node) - if (madvise(addr, file->size, MADV_RANDOM)) { - fprintf(stderr, "warning: madvise(.., MADV_RANDOM) failed: %s\n", - strerror(errno)); - } - } - } - - ~llama_mmap() { - munmap(addr, size); - } -#elif defined(_WIN32) - static constexpr bool SUPPORTED = true; - - llama_mmap(struct llama_file * file, bool prefetch = true, bool numa = false) { - (void) numa; - - size = file->size; - - HANDLE hFile = (HANDLE) _get_osfhandle(_fileno(file->fp)); - - HANDLE hMapping = CreateFileMappingA(hFile, NULL, PAGE_READONLY, 0, 0, NULL); - DWORD error = GetLastError(); - - if (hMapping == NULL) { - throw std::runtime_error(format("CreateFileMappingA failed: %s", llama_format_win_err(error).c_str())); - } - - addr = MapViewOfFile(hMapping, FILE_MAP_READ, 0, 0, 0); - error = GetLastError(); - CloseHandle(hMapping); - - if (addr == NULL) { - throw std::runtime_error(format("MapViewOfFile failed: %s", llama_format_win_err(error).c_str())); - } - - if (prefetch) { - // The PrefetchVirtualMemory API is only present on Windows 8 and above, so we - // will dynamically load it using GetProcAddress. - BOOL (WINAPI *pPrefetchVirtualMemory) (HANDLE, ULONG_PTR, PWIN32_MEMORY_RANGE_ENTRY, ULONG); - HMODULE hKernel32; - - // This call is guaranteed to succeed. - hKernel32 = GetModuleHandleW(L"kernel32.dll"); - - // This call may fail if on a pre-Win8 system. - pPrefetchVirtualMemory = reinterpret_cast (GetProcAddress(hKernel32, "PrefetchVirtualMemory")); - - if (pPrefetchVirtualMemory) { - // Advise the kernel to preload the mapped memory. - WIN32_MEMORY_RANGE_ENTRY range; - range.VirtualAddress = addr; - range.NumberOfBytes = (SIZE_T)size; - if (!pPrefetchVirtualMemory(GetCurrentProcess(), 1, &range, 0)) { - fprintf(stderr, "warning: PrefetchVirtualMemory failed: %s\n", - llama_format_win_err(GetLastError()).c_str()); - } - } - } - } - - ~llama_mmap() { - if (!UnmapViewOfFile(addr)) { - fprintf(stderr, "warning: UnmapViewOfFile failed: %s\n", - llama_format_win_err(GetLastError()).c_str()); - } - } -#else - static constexpr bool SUPPORTED = false; - - llama_mmap(struct llama_file *, bool prefetch = true, bool numa = false) { - (void) prefetch; - (void) numa; - - throw std::runtime_error(std::string("mmap not supported")); - } -#endif -}; - -// Represents some region of memory being locked using mlock or VirtualLock; -// will automatically unlock on destruction. -struct llama_mlock { - void * addr = NULL; - size_t size = 0; - bool failed_already = false; - - llama_mlock() {} - llama_mlock(const llama_mlock &) = delete; - - ~llama_mlock() { - if (size) { - raw_unlock(addr, size); - } - } - - void init(void * ptr) { - LLAMA_ASSERT(addr == NULL && size == 0); - addr = ptr; - } - - void grow_to(size_t target_size) { - LLAMA_ASSERT(addr); - if (failed_already) { - return; - } - size_t granularity = lock_granularity(); - target_size = (target_size + granularity - 1) & ~(granularity - 1); - if (target_size > size) { - if (raw_lock((uint8_t *) addr + size, target_size - size)) { - size = target_size; - } else { - failed_already = true; - } - } - } - -#ifdef _POSIX_MEMLOCK_RANGE - static constexpr bool SUPPORTED = true; - - size_t lock_granularity() { - return (size_t) sysconf(_SC_PAGESIZE); - } - - #ifdef __APPLE__ - #define MLOCK_SUGGESTION \ - "Try increasing the sysctl values 'vm.user_wire_limit' and 'vm.global_user_wire_limit' and/or " \ - "decreasing 'vm.global_no_user_wire_amount'. Also try increasing RLIMIT_MLOCK (ulimit -l).\n" - #else - #define MLOCK_SUGGESTION \ - "Try increasing RLIMIT_MLOCK ('ulimit -l' as root).\n" - #endif - - bool raw_lock(const void * addr, size_t size) { - if (!mlock(addr, size)) { - return true; - } else { - char* errmsg = std::strerror(errno); - bool suggest = (errno == ENOMEM); - - // Check if the resource limit is fine after all - struct rlimit lock_limit; - if (suggest && getrlimit(RLIMIT_MEMLOCK, &lock_limit)) - suggest = false; - if (suggest && (lock_limit.rlim_max > lock_limit.rlim_cur + size)) - suggest = false; - - fprintf(stderr, "warning: failed to mlock %zu-byte buffer (after previously locking %zu bytes): %s\n%s", - size, this->size, errmsg, suggest ? MLOCK_SUGGESTION : ""); - return false; - } - } - - #undef MLOCK_SUGGESTION - - void raw_unlock(void * addr, size_t size) { - if (munlock(addr, size)) { - fprintf(stderr, "warning: failed to munlock buffer: %s\n", std::strerror(errno)); - } - } -#elif defined(_WIN32) - static constexpr bool SUPPORTED = true; - - size_t lock_granularity() { - SYSTEM_INFO si; - GetSystemInfo(&si); - return (size_t) si.dwPageSize; - } - - bool raw_lock(void * ptr, size_t len) { - for (int tries = 1; ; tries++) { - if (VirtualLock(ptr, len)) { - return true; - } - if (tries == 2) { - fprintf(stderr, "warning: failed to VirtualLock %zu-byte buffer (after previously locking %zu bytes): %s\n", - len, size, llama_format_win_err(GetLastError()).c_str()); - return false; - } - - // It failed but this was only the first try; increase the working - // set size and try again. - SIZE_T min_ws_size, max_ws_size; - if (!GetProcessWorkingSetSize(GetCurrentProcess(), &min_ws_size, &max_ws_size)) { - fprintf(stderr, "warning: GetProcessWorkingSetSize failed: %s\n", - llama_format_win_err(GetLastError()).c_str()); - return false; - } - // Per MSDN: "The maximum number of pages that a process can lock - // is equal to the number of pages in its minimum working set minus - // a small overhead." - // Hopefully a megabyte is enough overhead: - size_t increment = len + 1048576; - // The minimum must be <= the maximum, so we need to increase both: - min_ws_size += increment; - max_ws_size += increment; - if (!SetProcessWorkingSetSize(GetCurrentProcess(), min_ws_size, max_ws_size)) { - fprintf(stderr, "warning: SetProcessWorkingSetSize failed: %s\n", - llama_format_win_err(GetLastError()).c_str()); - return false; - } - } - } - - void raw_unlock(void * ptr, size_t len) { - if (!VirtualUnlock(ptr, len)) { - fprintf(stderr, "warning: failed to VirtualUnlock buffer: %s\n", - llama_format_win_err(GetLastError()).c_str()); - } - } -#else - static constexpr bool SUPPORTED = false; - - size_t lock_granularity() { - return (size_t) 65536; - } - - bool raw_lock(const void * addr, size_t len) { - fprintf(stderr, "warning: mlock not supported on this system\n"); - return false; - } - - void raw_unlock(const void * addr, size_t len) {} -#endif -}; - -// Replacement for std::vector that doesn't require zero-initialization. -struct llama_buffer { - uint8_t * addr = NULL; - size_t size = 0; - - llama_buffer() = default; - - void resize(size_t len) { -#ifdef GGML_USE_METAL - free(addr); - int result = posix_memalign((void **) &addr, getpagesize(), len); - if (result == 0) { - memset(addr, 0, len); - } - else { - addr = NULL; - } -#else - delete[] addr; - addr = new uint8_t[len]; -#endif - size = len; - } - - ~llama_buffer() { -#ifdef GGML_USE_METAL - free(addr); -#else - delete[] addr; -#endif - addr = NULL; - } - - // disable copy and move - llama_buffer(const llama_buffer&) = delete; - llama_buffer(llama_buffer&&) = delete; - llama_buffer& operator=(const llama_buffer&) = delete; - llama_buffer& operator=(llama_buffer&&) = delete; -}; - -#ifdef GGML_USE_CUBLAS -#include "ggml-cuda.h" -struct llama_ctx_buffer { - uint8_t * addr = NULL; - bool is_cuda; - size_t size = 0; - - llama_ctx_buffer() = default; - - void resize(size_t size) { - free(); - - addr = (uint8_t *) ggml_cuda_host_malloc(size); - if (addr) { - is_cuda = true; - } - else { - // fall back to pageable memory - addr = new uint8_t[size]; - is_cuda = false; - } - this->size = size; - } - - void free() { - if (addr) { - if (is_cuda) { - ggml_cuda_host_free(addr); - } - else { - delete[] addr; - } - } - addr = NULL; - } - - ~llama_ctx_buffer() { - free(); - } - - // disable copy and move - llama_ctx_buffer(const llama_ctx_buffer&) = delete; - llama_ctx_buffer(llama_ctx_buffer&&) = delete; - llama_ctx_buffer& operator=(const llama_ctx_buffer&) = delete; - llama_ctx_buffer& operator=(llama_ctx_buffer&&) = delete; -}; -#else -typedef llama_buffer llama_ctx_buffer; -#endif - -#endif diff --git a/llama.cpp b/llama.cpp index f2cbe7641..c97aaee69 100644 --- a/llama.cpp +++ b/llama.cpp @@ -6,94 +6,146 @@ #include #endif -#include "llama-util.h" #include "llama.h" #include "ggml.h" + +#if !defined(GGML_USE_CUBLAS) +# include "ggml-alloc.h" +# define LLAMA_USE_ALLOCATOR +#else +# define LLAMA_USE_SCRATCH +# define LLAMA_MAX_SCRATCH_BUFFERS 16 +#endif + #ifdef GGML_USE_CUBLAS -#include "ggml-cuda.h" +# include "ggml-cuda.h" #elif defined(GGML_USE_CLBLAST) -#include "ggml-opencl.h" +# include "ggml-opencl.h" #endif #ifdef GGML_USE_METAL -#include "ggml-metal.h" +# include "ggml-metal.h" #endif #ifdef GGML_USE_MPI -#include "ggml-mpi.h" +# include "ggml-mpi.h" #endif #ifdef GGML_USE_K_QUANTS -#ifndef QK_K -#ifdef GGML_QKK_64 -#define QK_K 64 -#else -#define QK_K 256 -#endif -#endif +# ifndef QK_K +# ifdef GGML_QKK_64 +# define QK_K 64 +# else +# define QK_K 256 +# endif +# endif +#endif + +#ifdef __has_include + #if __has_include() + #include + #if defined(_POSIX_MAPPED_FILES) + #include + #endif + #if defined(_POSIX_MEMLOCK_RANGE) + #include + #endif + #endif +#endif + +#if defined(_WIN32) + #define WIN32_LEAN_AND_MEAN + #ifndef NOMINMAX + #define NOMINMAX + #endif + #include + #include + #include // for _fseeki64 #endif -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include #include +#include +#include +#include +#include +#include +#include +#include +#include #include -#include -#include +#include +#include #include -#include #include +#include +#include +#include +#include +#include #if defined(_MSC_VER) #pragma warning(disable: 4244 4267) // possible loss of data #endif -static void llama_log_internal(llama_log_level level, const char* format, ...); +// tensor names +#define TN_TOKEN_EMBD "token_embd.weight" +#define TN_OUTPUT_NORM "output_norm.weight" +#define TN_OUTPUT "output.weight" +#define TN_ATTN_NORM "blk.%d.attn_norm.weight" +#define TN_ATTN_Q "blk.%d.attn_q.weight" +#define TN_ATTN_K "blk.%d.attn_k.weight" +#define TN_ATTN_V "blk.%d.attn_v.weight" +#define TN_ATTN_OUTPUT "blk.%d.attn_output.weight" +#define TN_FFN_NORM "blk.%d.ffn_norm.weight" +#define TN_FFN_GATE "blk.%d.ffn_gate.weight" +#define TN_FFN_DOWN "blk.%d.ffn_down.weight" +#define TN_FFN_UP "blk.%d.ffn_up.weight" + +#ifdef __GNUC__ +#ifdef __MINGW32__ +#define LLAMA_ATTRIBUTE_FORMAT(...) __attribute__((format(gnu_printf, __VA_ARGS__))) +#else +#define LLAMA_ATTRIBUTE_FORMAT(...) __attribute__((format(printf, __VA_ARGS__))) +#endif +#else +#define LLAMA_ATTRIBUTE_FORMAT(...) +#endif + +// +// logging +// +LLAMA_ATTRIBUTE_FORMAT(2, 3) +static void llama_log_internal (llama_log_level level, const char* format, ...); static void llama_log_callback_default(llama_log_level level, const char * text, void * user_data); + #define LLAMA_LOG_INFO(...) llama_log_internal(LLAMA_LOG_LEVEL_INFO , __VA_ARGS__) #define LLAMA_LOG_WARN(...) llama_log_internal(LLAMA_LOG_LEVEL_WARN , __VA_ARGS__) #define LLAMA_LOG_ERROR(...) llama_log_internal(LLAMA_LOG_LEVEL_ERROR, __VA_ARGS__) +// +// helpers +// -#if !defined(GGML_USE_CUBLAS) -#include "ggml-alloc.h" -#define LLAMA_USE_ALLOCATOR -#else -#define LLAMA_USE_SCRATCH -#define LLAMA_MAX_SCRATCH_BUFFERS 16 -#endif +static void zeros(std::ofstream & file, size_t n) { + char zero = 0; + for (size_t i = 0; i < n; ++i) { + file.write(&zero, 1); + } +} - -// available llama models -enum e_model { - MODEL_UNKNOWN, - MODEL_3B, - MODEL_7B, - MODEL_13B, - MODEL_30B, - MODEL_65B, - MODEL_70B, -}; - -static const size_t kB = 1024; -static const size_t MB = 1024*1024; - -// computed for n_ctx == 2048 -// TODO: dynamically determine these sizes -// needs modifications in ggml - -typedef void (*offload_func_t)(struct ggml_tensor * tensor); - -void llama_nop(struct ggml_tensor * tensor) { // don't offload by default - (void) tensor; +LLAMA_ATTRIBUTE_FORMAT(1, 2) +static std::string format(const char * fmt, ...) { + va_list ap; + va_list ap2; + va_start(ap, fmt); + va_copy(ap2, ap); + int size = vsnprintf(NULL, 0, fmt, ap); + GGML_ASSERT(size >= 0 && size < INT_MAX); // NOLINT + std::vector buf(size + 1); + int size2 = vsnprintf(buf.data(), size + 1, fmt, ap2); + GGML_ASSERT(size2 == size); + va_end(ap2); + va_end(ap); + return std::string(buf.data(), size); } // @@ -111,10 +163,453 @@ static void ggml_graph_compute_helper(std::vector & buf, ggml_cgraph * ggml_graph_compute(graph, &plan); } +// +// llama helpers +// + +#ifdef GGML_USE_CUBLAS +# define llama_host_malloc(n) ggml_cuda_host_malloc(n) +# define llama_host_free(data) ggml_cuda_host_free(data) +#elif GGML_USE_METAL +# define llama_host_malloc(n) ggml_metal_host_malloc(n) +# define llama_host_free(data) ggml_metal_host_free(data) +#else +# define llama_host_malloc(n) malloc(n) +# define llama_host_free(data) free(data) +#endif + +#if defined(_WIN32) +static std::string llama_format_win_err(DWORD err) { + LPSTR buf; + size_t size = FormatMessageA(FORMAT_MESSAGE_ALLOCATE_BUFFER | FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_IGNORE_INSERTS, + NULL, err, MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT), (LPSTR)&buf, 0, NULL); + if (!size) { + return "FormatMessageA failed"; + } + std::string ret(buf, size); + LocalFree(buf); + return ret; +} +#endif + +struct llama_buffer { + void * data = NULL; + size_t size = 0; + + // fallback to malloc / free + // useful in cases where CUDA can try to allocate PINNED memory + bool fallback = false; + + void resize(size_t n) { + llama_host_free(data); + + data = llama_host_malloc(n); + if (!data) { + fallback = true; + data = malloc(n); + } else { + fallback = false; + } + + GGML_ASSERT(data); + size = n; + } + + ~llama_buffer() { + if (data) { + if (fallback) { // NOLINT + free(data); + } else { + llama_host_free(data); + } + } + + data = NULL; + } +}; + +struct llama_file { + // use FILE * so we don't have to re-open the file to mmap + FILE * fp; + size_t size; + + llama_file(const char * fname, const char * mode) { + fp = std::fopen(fname, mode); + if (fp == NULL) { + throw std::runtime_error(format("failed to open %s: %s", fname, strerror(errno))); + } + seek(0, SEEK_END); + size = tell(); + seek(0, SEEK_SET); + } + + size_t tell() const { +#ifdef _WIN32 + __int64 ret = _ftelli64(fp); +#else + long ret = std::ftell(fp); +#endif + GGML_ASSERT(ret != -1); // this really shouldn't fail + return (size_t) ret; + } + + void seek(size_t offset, int whence) const { +#ifdef _WIN32 + int ret = _fseeki64(fp, (__int64) offset, whence); +#else + int ret = std::fseek(fp, (long) offset, whence); +#endif + GGML_ASSERT(ret == 0); // same + } + + void read_raw(void * ptr, size_t len) const { + if (len == 0) { + return; + } + errno = 0; + std::size_t ret = std::fread(ptr, len, 1, fp); + if (ferror(fp)) { + throw std::runtime_error(format("read error: %s", strerror(errno))); + } + if (ret != 1) { + throw std::runtime_error(std::string("unexpectedly reached end of file")); + } + } + + uint32_t read_u32() const { + uint32_t ret; + read_raw(&ret, sizeof(ret)); + return ret; + } + + void write_raw(const void * ptr, size_t len) const { + if (len == 0) { + return; + } + errno = 0; + size_t ret = std::fwrite(ptr, len, 1, fp); + if (ret != 1) { + throw std::runtime_error(format("write error: %s", strerror(errno))); + } + } + + void write_u32(std::uint32_t val) const { + write_raw(&val, sizeof(val)); + } + + ~llama_file() { + if (fp) { + std::fclose(fp); + } + } +}; + +struct llama_mmap { + void * addr; + size_t size; + + llama_mmap(const llama_mmap &) = delete; + +#ifdef _POSIX_MAPPED_FILES + static constexpr bool SUPPORTED = true; + + llama_mmap(struct llama_file * file, size_t prefetch = (size_t) -1 /* -1 = max value */, bool numa = false) { + size = file->size; + int fd = fileno(file->fp); + int flags = MAP_SHARED; + // prefetch/readahead impairs performance on NUMA systems + if (numa) { prefetch = 0; } +#ifdef __linux__ + if (prefetch) { flags |= MAP_POPULATE; } +#endif + addr = mmap(NULL, file->size, PROT_READ, flags, fd, 0); + if (addr == MAP_FAILED) { + throw std::runtime_error(format("mmap failed: %s", strerror(errno))); + } + + if (prefetch > 0) { + // Advise the kernel to preload the mapped memory + if (madvise(addr, std::min(file->size, prefetch), MADV_WILLNEED)) { + fprintf(stderr, "warning: madvise(.., MADV_WILLNEED) failed: %s\n", + strerror(errno)); + } + } + if (numa) { + // advise the kernel not to use readahead + // (because the next page might not belong on the same node) + if (madvise(addr, file->size, MADV_RANDOM)) { + fprintf(stderr, "warning: madvise(.., MADV_RANDOM) failed: %s\n", + strerror(errno)); + } + } + } + + ~llama_mmap() { + munmap(addr, size); + } +#elif defined(_WIN32) + static constexpr bool SUPPORTED = true; + + llama_mmap(struct llama_file * file, bool prefetch = true, bool numa = false) { + (void) numa; + + size = file->size; + + HANDLE hFile = (HANDLE) _get_osfhandle(_fileno(file->fp)); + + HANDLE hMapping = CreateFileMappingA(hFile, NULL, PAGE_READONLY, 0, 0, NULL); + DWORD error = GetLastError(); + + if (hMapping == NULL) { + throw std::runtime_error(format("CreateFileMappingA failed: %s", llama_format_win_err(error).c_str())); + } + + addr = MapViewOfFile(hMapping, FILE_MAP_READ, 0, 0, 0); + error = GetLastError(); + CloseHandle(hMapping); + + if (addr == NULL) { + throw std::runtime_error(format("MapViewOfFile failed: %s", llama_format_win_err(error).c_str())); + } + + #if _WIN32_WINNT >= _WIN32_WINNT_WIN8 + if (prefetch) { + // Advise the kernel to preload the mapped memory + WIN32_MEMORY_RANGE_ENTRY range; + range.VirtualAddress = addr; + range.NumberOfBytes = (SIZE_T)size; + if (!PrefetchVirtualMemory(GetCurrentProcess(), 1, &range, 0)) { + fprintf(stderr, "warning: PrefetchVirtualMemory failed: %s\n", + llama_format_win_err(GetLastError()).c_str()); + } + } + #else + #pragma message("warning: You are building for pre-Windows 8; prefetch not supported") + #endif // _WIN32_WINNT >= _WIN32_WINNT_WIN8 + } + + ~llama_mmap() { + if (!UnmapViewOfFile(addr)) { + fprintf(stderr, "warning: UnmapViewOfFile failed: %s\n", + llama_format_win_err(GetLastError()).c_str()); + } + } +#else + static constexpr bool SUPPORTED = false; + + llama_mmap(struct llama_file * file, bool prefetch = true, bool numa = false) { + (void) file; + (void) prefetch; + (void) numa; + + throw std::runtime_error(std::string("mmap not supported")); + } +#endif +}; + +// Represents some region of memory being locked using mlock or VirtualLock; +// will automatically unlock on destruction. +struct llama_mlock { + void * addr = NULL; + size_t size = 0; + + bool failed_already = false; + + llama_mlock() {} + llama_mlock(const llama_mlock &) = delete; + + ~llama_mlock() { + if (size) { + raw_unlock(addr, size); + } + } + + void init(void * ptr) { + GGML_ASSERT(addr == NULL && size == 0); // NOLINT + addr = ptr; + } + + void grow_to(size_t target_size) { + GGML_ASSERT(addr); + if (failed_already) { + return; + } + size_t granularity = lock_granularity(); + target_size = (target_size + granularity - 1) & ~(granularity - 1); + if (target_size > size) { + if (raw_lock((uint8_t *) addr + size, target_size - size)) { + size = target_size; + } else { + failed_already = true; + } + } + } + +#ifdef _POSIX_MEMLOCK_RANGE + static constexpr bool SUPPORTED = true; + + static size_t lock_granularity() { + return (size_t) sysconf(_SC_PAGESIZE); + } + + #ifdef __APPLE__ + #define MLOCK_SUGGESTION \ + "Try increasing the sysctl values 'vm.user_wire_limit' and 'vm.global_user_wire_limit' and/or " \ + "decreasing 'vm.global_no_user_wire_amount'. Also try increasing RLIMIT_MLOCK (ulimit -l).\n" + #else + #define MLOCK_SUGGESTION \ + "Try increasing RLIMIT_MLOCK ('ulimit -l' as root).\n" + #endif + + bool raw_lock(const void * addr, size_t size) const { + if (!mlock(addr, size)) { + return true; + } + + char* errmsg = std::strerror(errno); + bool suggest = (errno == ENOMEM); + + // Check if the resource limit is fine after all + struct rlimit lock_limit; + if (suggest && getrlimit(RLIMIT_MEMLOCK, &lock_limit)) { + suggest = false; + } + if (suggest && (lock_limit.rlim_max > lock_limit.rlim_cur + size)) { + suggest = false; + } + + fprintf(stderr, "warning: failed to mlock %zu-byte buffer (after previously locking %zu bytes): %s\n%s", + size, this->size, errmsg, suggest ? MLOCK_SUGGESTION : ""); + return false; + } + + #undef MLOCK_SUGGESTION + + static void raw_unlock(void * addr, size_t size) { + if (munlock(addr, size)) { + fprintf(stderr, "warning: failed to munlock buffer: %s\n", std::strerror(errno)); + } + } +#elif defined(_WIN32) + static constexpr bool SUPPORTED = true; + + static size_t lock_granularity() { + SYSTEM_INFO si; + GetSystemInfo(&si); + return (size_t) si.dwPageSize; + } + + bool raw_lock(void * ptr, size_t len) const { + for (int tries = 1; ; tries++) { + if (VirtualLock(ptr, len)) { + return true; + } + if (tries == 2) { + fprintf(stderr, "warning: failed to VirtualLock %zu-byte buffer (after previously locking %zu bytes): %s\n", + len, size, llama_format_win_err(GetLastError()).c_str()); + return false; + } + + // It failed but this was only the first try; increase the working + // set size and try again. + SIZE_T min_ws_size, max_ws_size; + if (!GetProcessWorkingSetSize(GetCurrentProcess(), &min_ws_size, &max_ws_size)) { + fprintf(stderr, "warning: GetProcessWorkingSetSize failed: %s\n", + llama_format_win_err(GetLastError()).c_str()); + return false; + } + // Per MSDN: "The maximum number of pages that a process can lock + // is equal to the number of pages in its minimum working set minus + // a small overhead." + // Hopefully a megabyte is enough overhead: + size_t increment = len + 1048576; + // The minimum must be <= the maximum, so we need to increase both: + min_ws_size += increment; + max_ws_size += increment; + if (!SetProcessWorkingSetSize(GetCurrentProcess(), min_ws_size, max_ws_size)) { + fprintf(stderr, "warning: SetProcessWorkingSetSize failed: %s\n", + llama_format_win_err(GetLastError()).c_str()); + return false; + } + } + } + + static void raw_unlock(void * ptr, size_t len) { + if (!VirtualUnlock(ptr, len)) { + fprintf(stderr, "warning: failed to VirtualUnlock buffer: %s\n", + llama_format_win_err(GetLastError()).c_str()); + } + } +#else + static constexpr bool SUPPORTED = false; + + static size_t lock_granularity() { + return (size_t) 65536; + } + + bool raw_lock(const void * addr, size_t len) const { + fprintf(stderr, "warning: mlock not supported on this system\n"); + return false; + } + + static void raw_unlock(const void * addr, size_t len) {} +#endif +}; + +typedef void (*offload_func_t)(struct ggml_tensor * tensor); + +static void llama_nop(struct ggml_tensor * tensor) { // don't offload by default + (void) tensor; +} + +static std::string llama_token_to_text(const struct llama_context * ctx, llama_token token) { + std::vector result(8, 0); + const int n_tokens = llama_token_to_str(ctx, token, result.data(), result.size()); + if (n_tokens < 0) { + result.resize(-n_tokens); + int check = llama_token_to_str(ctx, token, result.data(), result.size()); + GGML_ASSERT(check == -n_tokens); + } else { + result.resize(n_tokens); + } + + return std::string(result.data(), result.size()); +} + +// +// globals +// + +struct llama_state { + // We save the log callback globally + llama_log_callback log_callback = llama_log_callback_default; + void * log_callback_user_data = nullptr; +}; + +static llama_state g_state; + // // memory sizes (calculated for n_batch == 512) // +// computed for n_ctx == 2048 +// TODO: dynamically determine these sizes +// needs modifications in ggml + +// available llama models +enum e_model { + MODEL_UNKNOWN, + MODEL_3B, + MODEL_7B, + MODEL_13B, + MODEL_30B, + MODEL_65B, + MODEL_70B, +}; + +static const size_t kB = 1024; +static const size_t MB = 1024*1024; + static std::map MEM_REQ_SCRATCH0(int n_ctx) { std::map k_sizes = { @@ -187,25 +682,21 @@ static const std::map & VRAM_REQ_SCRATCH_PER_CONTEXT() // default hparams (LLaMA 7B) struct llama_hparams { - uint32_t n_vocab = 32000; - uint32_t n_ctx = 512; // this is provided as user input? - uint32_t n_embd = 4096; - uint32_t n_mult = 256; - uint32_t n_head = 32; - uint32_t n_head_kv = 32; - uint32_t n_layer = 32; - uint32_t n_rot = 64; + uint32_t n_vocab = 32000; + uint32_t n_ctx_train = 2048; // the context size used during training + uint32_t n_ctx = 512; // the context size used during inference + uint32_t n_embd = 4096; + uint32_t n_head = 32; + uint32_t n_head_kv = 32; + uint32_t n_layer = 32; + uint32_t n_rot = 64; + uint32_t n_ff = 11008; - // LLaMAv2 - // TODO: load from model data hparams - float f_ffn_mult = 1.0f; - float f_rms_norm_eps = LLAMA_DEFAULT_RMS_EPS; + float f_norm_rms_eps = 1e-5; float rope_freq_base = 10000.0f; float rope_freq_scale = 1.0f; - enum llama_ftype ftype = LLAMA_FTYPE_MOSTLY_F16; - bool operator!=(const llama_hparams & other) const { return static_cast(memcmp(this, &other, sizeof(llama_hparams))); // NOLINT } @@ -257,7 +748,7 @@ struct llama_kv_cache { struct ggml_context * ctx = NULL; - llama_ctx_buffer buf; + llama_buffer buf; int n; // number of tokens currently in the cache @@ -274,22 +765,41 @@ struct llama_kv_cache { }; struct llama_vocab { + // TODO: + // - add a vector of merges + // so that we can pass it to different types of tokenizers with a common interface + using id = int32_t; using token = std::string; + using ttype = llama_token_type; - struct token_score { - token tok; + struct token_data { + token text; float score; + ttype type; }; + llama_vocab_type type = LLAMA_VOCAB_TYPE_SPM; + std::unordered_map token_to_id; - std::vector id_to_token; + std::vector id_to_token; + + // default LLaMA special tokens + id special_bos_id = 1; + id special_eos_id = 2; + id special_unk_id = -1; + id special_sep_id = -1; + id special_pad_id = -1; + + id linefeed_id = 13; }; struct llama_model { - e_model type = MODEL_UNKNOWN; + e_model type = MODEL_UNKNOWN; + llama_ftype ftype = LLAMA_FTYPE_ALL_F32; llama_hparams hparams; + llama_vocab vocab; struct ggml_tensor * tok_embeddings; @@ -303,7 +813,7 @@ struct llama_model { struct ggml_context * ctx = NULL; // the model memory buffer - llama_ctx_buffer buf; + llama_buffer buf; // model memory mapped file std::unique_ptr mapping; @@ -318,8 +828,6 @@ struct llama_model { int64_t t_load_us = 0; int64_t t_start_us = 0; - llama_vocab vocab; - ~llama_model() { if (ctx) { ggml_free(ctx); @@ -391,16 +899,16 @@ struct llama_context { std::vector work_buffer; // memory buffers used to evaluate the model - // TODO: move in llama_state - llama_ctx_buffer buf_compute; + llama_buffer buf_compute; #ifdef LLAMA_USE_ALLOCATOR - llama_ctx_buffer buf_alloc; + llama_buffer buf_alloc; ggml_allocr * alloc = NULL; #endif #ifdef LLAMA_USE_SCRATCH - llama_ctx_buffer buf_scratch[LLAMA_MAX_SCRATCH_BUFFERS]; + llama_buffer buf_scratch[LLAMA_MAX_SCRATCH_BUFFERS]; + int buf_last = 0; size_t buf_max_size[LLAMA_MAX_SCRATCH_BUFFERS] = { 0 }; #endif @@ -413,7 +921,7 @@ struct llama_context { ggml_mpi_context * ctx_mpi = NULL; #endif - void use_buf(struct ggml_context * ctx, int i) { + void use_buf(struct ggml_context * ctx, int i) { // NOLINT #if defined(LLAMA_USE_SCRATCH) size_t last_size = 0; @@ -421,7 +929,7 @@ struct llama_context { last_size = ggml_set_scratch(ctx, { 0, 0, nullptr, }); } else { auto & buf = buf_scratch[i]; - last_size = ggml_set_scratch(ctx, { 0, buf.size, buf.addr, }); + last_size = ggml_set_scratch(ctx, { 0, buf.size, buf.data, }); } if (buf_last >= 0) { @@ -435,7 +943,7 @@ struct llama_context { #endif } - size_t get_buf_max_mem(int i) const { + size_t get_buf_max_mem(int i) { // NOLINT #if defined(LLAMA_USE_SCRATCH) return buf_max_size[i]; #else @@ -445,418 +953,11 @@ struct llama_context { } }; -struct llama_state { - // We save the log callback globally - llama_log_callback log_callback = llama_log_callback_default; - void * log_callback_user_data = nullptr; -}; -// global state -static llama_state g_state; - -template -static T checked_mul(T a, T b) { - T ret = a * b; - if (a != 0 && ret / a != b) { - throw std::runtime_error(format("overflow multiplying %llu * %llu", - (unsigned long long) a, (unsigned long long) b)); - } - return ret; -} - -static size_t checked_div(size_t a, size_t b) { - if (b == 0 || a % b != 0) { - throw std::runtime_error(format("error dividing %zu / %zu", a, b)); - } - return a / b; -} - -static std::string llama_format_tensor_shape(const std::vector & ne) { - char buf[256]; - snprintf(buf, sizeof(buf), "%5u", ne.at(0)); - for (size_t i = 1; i < ne.size(); i++) { - snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), " x %5u", ne.at(i)); - } - return buf; -} - -static size_t llama_calc_tensor_size(const std::vector & ne, enum ggml_type type) { - size_t size = ggml_type_size(type); - for (uint32_t dim : ne) { - size = checked_mul(size, dim); - } - return size / ggml_blck_size(type); -} - -struct llama_load_tensor { - std::string name; - enum ggml_type type = GGML_TYPE_F32; - std::vector ne; - size_t file_off; - size_t size; - struct ggml_tensor * ggml_tensor = NULL; - uint8_t * data; -}; - -struct llama_load_tensors_map { - // tensors is kept in a separate vector to preserve file order - std::vector tensors; - std::unordered_map name_to_idx; -}; - -enum llama_file_version { - LLAMA_FILE_VERSION_GGML, - LLAMA_FILE_VERSION_GGMF_V1, // added version field and scores in vocab - LLAMA_FILE_VERSION_GGJT_V1, // added padding - LLAMA_FILE_VERSION_GGJT_V2, // changed quantization format - LLAMA_FILE_VERSION_GGJT_V3, // changed Q4 and Q8 quantization format -}; - -struct llama_file_loader { - llama_file file; - llama_file_version file_version; - llama_hparams hparams; - llama_vocab vocab; - - llama_file_loader(const char * fname, llama_load_tensors_map & tensors_map) - : file(fname, "rb") { - LLAMA_LOG_INFO("llama.cpp: loading model from %s\n", fname); - read_magic(); - read_hparams(); - read_vocab(); - read_tensor_metadata(tensors_map); - } - void read_magic() { - uint32_t magic = file.read_u32(); - - if (magic == LLAMA_FILE_MAGIC_GGML) { - file_version = LLAMA_FILE_VERSION_GGML; - return; - } - - uint32_t version = file.read_u32(); - - switch (magic) { - case LLAMA_FILE_MAGIC_GGMF: - switch (version) { - case 1: file_version = LLAMA_FILE_VERSION_GGMF_V1; return; - } - break; - case LLAMA_FILE_MAGIC_GGJT: - switch (version) { - case 1: file_version = LLAMA_FILE_VERSION_GGJT_V1; return; - case 2: file_version = LLAMA_FILE_VERSION_GGJT_V2; return; - case 3: file_version = LLAMA_FILE_VERSION_GGJT_V3; return; - } - } - - throw std::runtime_error(format("unknown (magic, version) combination: %08x, %08x; is this really a GGML file?", - magic, version)); - } - void read_hparams() { - hparams.n_vocab = file.read_u32(); - hparams.n_embd = file.read_u32(); - hparams.n_mult = file.read_u32(); - hparams.n_head = file.read_u32(); - hparams.n_layer = file.read_u32(); - hparams.n_rot = file.read_u32(); - hparams.ftype = (enum llama_ftype) file.read_u32(); - - // LLaMAv2 - // TODO: read from header - hparams.n_head_kv = hparams.n_head; - } - void read_vocab() { - vocab.id_to_token.resize(hparams.n_vocab); - - for (uint32_t i = 0; i < hparams.n_vocab; i++) { - uint32_t len = file.read_u32(); - std::string word = file.read_string(len); - - float score = 0.0f; - file.read_raw(&score, sizeof(score)); - - vocab.token_to_id[word] = i; - - auto & tok_score = vocab.id_to_token[i]; - tok_score.tok = std::move(word); - tok_score.score = score; - } - } - void read_tensor_metadata(llama_load_tensors_map & tensors_map) { - while (file.tell() < file.size) { - llama_load_tensor tensor; - uint32_t n_dims = file.read_u32(); - uint32_t name_len = file.read_u32(); - tensor.type = (enum ggml_type) file.read_u32(); - tensor.ne.resize(n_dims); - file.read_raw(tensor.ne.data(), sizeof(tensor.ne[0]) * n_dims); - std::string name = file.read_string(name_len); - if (n_dims < 1 || n_dims > 2) { - throw std::runtime_error(format("llama.cpp: tensor '%s' should not be %u-dimensional", name.c_str(), n_dims)); - } - switch (tensor.type) { - case GGML_TYPE_F32: - case GGML_TYPE_F16: - case GGML_TYPE_Q4_0: - case GGML_TYPE_Q4_1: - case GGML_TYPE_Q5_0: - case GGML_TYPE_Q5_1: - case GGML_TYPE_Q8_0: - case GGML_TYPE_Q2_K: - case GGML_TYPE_Q3_K: - case GGML_TYPE_Q4_K: - case GGML_TYPE_Q5_K: - case GGML_TYPE_Q6_K: - break; - default: { - throw std::runtime_error(format("unrecognized tensor type %u\n", tensor.type)); - } - } - - // skip to the next multiple of 32 bytes - if (file_version >= LLAMA_FILE_VERSION_GGJT_V1) { - file.seek(-static_cast(file.tell()) & 31, SEEK_CUR); - } - - tensor.file_off = file.tell(); - tensor.name = name; - tensor.size = llama_calc_tensor_size(tensor.ne, tensor.type); - file.seek(tensor.size, SEEK_CUR); - - tensors_map.tensors.push_back(tensor); - tensors_map.name_to_idx[name] = tensors_map.tensors.size() - 1; - } - } -}; - -struct llama_file_saver { - llama_file file; - llama_file_loader * any_file_loader; - llama_file_saver(const char * fname, llama_file_loader * any_file_loader, enum llama_ftype new_ftype) - : file(fname, "wb"), any_file_loader(any_file_loader) { - LLAMA_LOG_INFO("llama.cpp: saving model to %s\n", fname); - write_magic(); - write_hparams(new_ftype); - write_vocab(); - } - void write_magic() { - file.write_u32(LLAMA_FILE_MAGIC); // magic - file.write_u32(LLAMA_FILE_VERSION); // version - } - void write_hparams(enum llama_ftype new_ftype) { - const llama_hparams & hparams = any_file_loader->hparams; - file.write_u32(hparams.n_vocab); - file.write_u32(hparams.n_embd); - file.write_u32(hparams.n_mult); - file.write_u32(hparams.n_head); - file.write_u32(hparams.n_layer); - file.write_u32(hparams.n_rot); - file.write_u32(new_ftype); - } - void write_vocab() { - if (any_file_loader->file_version == LLAMA_FILE_VERSION_GGML) { - LLAMA_LOG_WARN("llama.cpp: WARNING: input is an old file that doesn't have scores; will add dummy scores\n"); - } - uint32_t n_vocab = any_file_loader->hparams.n_vocab; - for (uint32_t i = 0; i < n_vocab; i++) { - const auto & token_score = any_file_loader->vocab.id_to_token.at(i); - file.write_u32((uint32_t) token_score.tok.size()); - file.write_raw(token_score.tok.data(), token_score.tok.size()); - file.write_raw(&token_score.score, sizeof(token_score.score)); - } - } - void write_tensor(llama_load_tensor & tensor, enum ggml_type new_type, const void * new_data, size_t new_size) { - switch (new_type) { - case GGML_TYPE_F32: - case GGML_TYPE_F16: - case GGML_TYPE_Q4_0: - case GGML_TYPE_Q4_1: - case GGML_TYPE_Q5_0: - case GGML_TYPE_Q5_1: - case GGML_TYPE_Q8_0: - case GGML_TYPE_Q2_K: - case GGML_TYPE_Q3_K: - case GGML_TYPE_Q4_K: - case GGML_TYPE_Q5_K: - case GGML_TYPE_Q6_K: - break; - default: LLAMA_ASSERT(false); - } - file.write_u32((uint32_t) tensor.ne.size()); - file.write_u32((uint32_t) tensor.name.size()); - file.write_u32(new_type); - file.write_raw(tensor.ne.data(), sizeof(tensor.ne[0]) * tensor.ne.size()); - file.write_raw(tensor.name.data(), tensor.name.size()); - file.seek(-static_cast(file.tell()) & 31, SEEK_CUR); - LLAMA_ASSERT(new_size == llama_calc_tensor_size(tensor.ne, new_type)); - file.write_raw(new_data, new_size); - } -}; - -struct llama_model_loader { - std::unique_ptr file_loader; - llama_load_tensors_map tensors_map; - bool use_mmap; - size_t num_ggml_tensors_created = 0; - struct ggml_context * ggml_ctx = NULL; - std::unique_ptr mapping; - - llama_model_loader(const std::string & fname_base, bool use_mmap) { - file_loader = std::unique_ptr(new llama_file_loader(fname_base.c_str(), tensors_map)); - if (!llama_mmap::SUPPORTED) { - use_mmap = false; - } - this->use_mmap = use_mmap; - } - - void calc_sizes(size_t * ctx_size_p, size_t * mmapped_size_p) const { - *ctx_size_p = *mmapped_size_p = 0; - for (const llama_load_tensor & lt : tensors_map.tensors) { - *ctx_size_p += sizeof(struct ggml_tensor) + GGML_OBJECT_SIZE; - *(use_mmap ? mmapped_size_p : ctx_size_p) += lt.size + 16; - } - } - - struct ggml_tensor * get_tensor(const std::string & name, const std::vector & ne, ggml_backend backend) { - auto it = tensors_map.name_to_idx.find(name); - if (it == tensors_map.name_to_idx.end()) { - throw std::runtime_error(std::runtime_error(format("llama.cpp: tensor '%s' is missing from model", name.c_str()))); - } - llama_load_tensor & lt = tensors_map.tensors.at(it->second); - if (lt.ne != ne) { - throw std::runtime_error(format("llama.cpp: tensor '%s' has wrong shape; expected %s, got %s", - name.c_str(), llama_format_tensor_shape(ne).c_str(), llama_format_tensor_shape(lt.ne).c_str())); - } - - return get_tensor_for(lt, backend); - } - - struct ggml_tensor * get_tensor_for(llama_load_tensor & lt, ggml_backend backend) { - struct ggml_tensor * tensor; - if (backend != GGML_BACKEND_CPU) { - ggml_set_no_alloc(ggml_ctx, true); - } - if (lt.ne.size() == 2) { - tensor = ggml_new_tensor_2d(ggml_ctx, lt.type, lt.ne.at(0), lt.ne.at(1)); - } else { - LLAMA_ASSERT(lt.ne.size() == 1); - tensor = ggml_new_tensor_1d(ggml_ctx, lt.type, lt.ne.at(0)); - } - ggml_set_name(tensor, lt.name.c_str()); - LLAMA_ASSERT(lt.ggml_tensor == NULL); // if this fails, we called get_tensor twice on the same tensor - - if (backend != GGML_BACKEND_CPU) { - ggml_set_no_alloc(ggml_ctx, use_mmap); - } - tensor->backend = backend; - lt.ggml_tensor = tensor; - num_ggml_tensors_created++; - return tensor; - } - - void done_getting_tensors() const { - if (num_ggml_tensors_created != tensors_map.tensors.size()) { - throw std::runtime_error(std::string("llama.cpp: file contained more tensors than expected")); - } - } - - void load_all_data(llama_progress_callback progress_callback, void * progress_callback_user_data, llama_mlock * lmlock) { - size_t data_size = 0; - size_t prefetch_size = file_loader->file.size; - size_t lock_size = 0; - for (const llama_load_tensor & lt : tensors_map.tensors) { - data_size += lt.size; - if (lt.ggml_tensor->backend != GGML_BACKEND_CPU) { - prefetch_size -= lt.size; - } - } - - if (use_mmap) { - mapping.reset(new llama_mmap(&file_loader->file, prefetch_size, ggml_is_numa())); - if (lmlock) { - lmlock->init(mapping->addr); - } - } - - size_t done_size = 0; - for (llama_load_tensor & lt : tensors_map.tensors) { - if (progress_callback) { - progress_callback((float) done_size / data_size, progress_callback_user_data); - } - LLAMA_ASSERT(lt.ggml_tensor); // unused tensors should have been caught by load_data already - lt.data = (uint8_t *) lt.ggml_tensor->data; - - // allocate temp buffer if not using mmap - if (!use_mmap && lt.data == NULL) { - GGML_ASSERT(lt.ggml_tensor->backend != GGML_BACKEND_CPU); - lt.data = (uint8_t*)malloc(ggml_nbytes(lt.ggml_tensor)); - } - - load_data_for(lt); - - switch(lt.ggml_tensor->backend) { - case GGML_BACKEND_CPU: - lt.ggml_tensor->data = lt.data; - if (use_mmap && lmlock) { - lock_size += lt.size; - lmlock->grow_to(lock_size); - } - break; -#if defined(GGML_USE_CUBLAS) - case GGML_BACKEND_GPU: - case GGML_BACKEND_GPU_SPLIT: - ggml_cuda_transform_tensor(lt.data, lt.ggml_tensor); - if (!use_mmap) { - free(lt.data); - } - break; -#elif defined(GGML_USE_CLBLAST) - case GGML_BACKEND_GPU: - ggml_cl_transform_tensor(lt.data, lt.ggml_tensor); - if (!use_mmap) { - free(lt.data); - } - break; -#endif - default: - continue; - } - - done_size += lt.size; - } - } - - void load_data_for(llama_load_tensor & lt) { - if (use_mmap) { - lt.data = (uint8_t *) mapping->addr + lt.file_off; - } else { - llama_file & file = file_loader->file; - file.seek(lt.file_off, SEEK_SET); - file.read_raw(lt.data, lt.size); - } - - if (0) { - print_checksum(lt); - } - } - - static void print_checksum(llama_load_tensor & lt) { - uint32_t sum = 0; - for (size_t i = 0; i < lt.size; i++) { - uint8_t byte = lt.data[i]; - sum = byte + (sum << 6) + (sum << 16) - sum; // sdbm hash - } - LLAMA_LOG_INFO("%s checksum: %#08x (%s, size %zu)\n", lt.name.c_str(), sum, - llama_format_tensor_shape(lt.ne).c_str(), lt.size); - } - -}; - // -// kv cache +// kv cache helpers // -static bool kv_cache_init( +static bool llama_kv_cache_init( const struct llama_hparams & hparams, struct llama_kv_cache & cache, ggml_type wtype, @@ -873,7 +974,7 @@ static bool kv_cache_init( struct ggml_init_params params; params.mem_size = cache.buf.size; - params.mem_buffer = cache.buf.addr; + params.mem_buffer = cache.buf.data; params.no_alloc = false; cache.ctx = ggml_init(params); @@ -901,102 +1002,328 @@ static bool kv_cache_init( return true; } -struct llama_context_params llama_context_default_params() { - struct llama_context_params result = { - /*.seed =*/ LLAMA_DEFAULT_SEED, - /*.n_ctx =*/ 512, - /*.n_batch =*/ 512, - /*.n_gqa =*/ 1, - /*.rms_norm_eps =*/ LLAMA_DEFAULT_RMS_EPS, - /*.gpu_layers =*/ 0, - /*.main_gpu =*/ 0, - /*.tensor_split =*/ nullptr, - /*.rope_freq_base =*/ 10000.0f, - /*.rope_freq_scale =*/ 1.0f, - /*.progress_callback =*/ nullptr, - /*.progress_callback_user_data =*/ nullptr, - /*.low_vram =*/ false, - /*.mul_mat_q =*/ false, - /*.f16_kv =*/ true, - /*.logits_all =*/ false, - /*.vocab_only =*/ false, - /*.use_mmap =*/ true, - /*.use_mlock =*/ false, - /*.embedding =*/ false, - }; - - return result; -} - -struct llama_model_quantize_params llama_model_quantize_default_params() { - struct llama_model_quantize_params result = { - /*.nthread =*/ 0, - /*.ftype =*/ LLAMA_FTYPE_MOSTLY_Q5_1, - /*.allow_requantize =*/ false, - /*.quantize_output_tensor =*/ true, - }; - - return result; -} - -int llama_max_devices() { - return LLAMA_MAX_DEVICES; -} - -bool llama_mmap_supported() { - return llama_mmap::SUPPORTED; -} - -bool llama_mlock_supported() { - return llama_mlock::SUPPORTED; -} - -void llama_backend_init(bool numa) { - ggml_time_init(); - - // needed to initialize f16 tables - { - struct ggml_init_params params = { 0, NULL, false }; - struct ggml_context * ctx = ggml_init(params); - ggml_free(ctx); - } - - if (numa) { - ggml_numa_init(); - } - -#ifdef GGML_USE_MPI - ggml_mpi_backend_init(); -#endif -} - -void llama_backend_free() { -#ifdef GGML_USE_MPI - ggml_mpi_backend_free(); -#endif -} - -int64_t llama_time_us() { - return ggml_time_us(); -} - // -// model loading +// model loading and saving // +enum llama_file_version { + GGUF_FILE_VERSION_V1 = 1, +}; + static const char * llama_file_version_name(llama_file_version version) { switch (version) { - case LLAMA_FILE_VERSION_GGML: return "'ggml' (old version with low tokenizer quality and no mmap support)"; - case LLAMA_FILE_VERSION_GGMF_V1: return "ggmf v1 (old version with no mmap support)"; - case LLAMA_FILE_VERSION_GGJT_V1: return "ggjt v1 (pre #1405)"; - case LLAMA_FILE_VERSION_GGJT_V2: return "ggjt v2 (pre #1508)"; - case LLAMA_FILE_VERSION_GGJT_V3: return "ggjt v3 (latest)"; + case GGUF_FILE_VERSION_V1: return "GGUF V1 (latest)"; } return "unknown"; } -const char * llama_ftype_name(enum llama_ftype ftype) { +static std::string llama_format_tensor_shape(const std::vector & ne) { + char buf[256]; + snprintf(buf, sizeof(buf), "%5u", ne.at(0)); + for (size_t i = 1; i < ne.size(); i++) { + snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), ", %5u", ne.at(i)); + } + return buf; +} + +static std::string llama_format_tensor_shape(const struct ggml_tensor * t) { + char buf[256]; + snprintf(buf, sizeof(buf), "%5" PRId64, t->ne[0]); + for (int i = 1; i < GGML_MAX_DIMS; i++) { + snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), ", %5" PRId64, t->ne[i]); + } + return buf; +} + +struct llama_model_loader { + int n_kv = 0; + int n_tensors = 0; + int n_created = 0; + + int64_t n_elements = 0; + + bool use_mmap = false; + + llama_file file; + llama_ftype ftype; + llama_file_version fver; + + std::unique_ptr mapping; + + struct gguf_context * ctx_gguf = NULL; + struct ggml_context * ctx_meta = NULL; + + llama_model_loader(const std::string & fname, bool use_mmap) : file(fname.c_str(), "rb") { + struct gguf_init_params params = { + /*.no_alloc = */ true, + /*.ctx = */ &ctx_meta, + }; + + ctx_gguf = gguf_init_from_file(fname.c_str(), params); + if (!ctx_gguf) { + throw std::runtime_error(format("%s: failed to load model from %s\n", __func__, fname.c_str())); + } + + n_kv = gguf_get_n_kv(ctx_gguf); + n_tensors = gguf_get_n_tensors(ctx_gguf); + + fver = (enum llama_file_version) gguf_get_version(ctx_gguf); + + for (int i = 0; i < n_tensors; i++) { + const char * name = gguf_get_tensor_name(ctx_gguf, i); + struct ggml_tensor * t = ggml_get_tensor(ctx_meta, name); + n_elements += ggml_nelements(t); + } + + LLAMA_LOG_INFO("%s: loaded meta data with %d key-value pairs and %d tensors from %s (version %s)\n", + __func__, n_kv, n_tensors, fname.c_str(), llama_file_version_name(fver)); + + // determine file type based on the number of tensors for each quantization and print meta data + // TODO: make optional + { + std::map n_type; + + uint32_t n_type_max = 0; + enum ggml_type type_max = GGML_TYPE_F32; + + for (int i = 0; i < n_tensors; i++) { + const char * name = gguf_get_tensor_name(ctx_gguf, i); + struct ggml_tensor * meta = ggml_get_tensor(ctx_meta, name); + + n_type[meta->type]++; + + if (n_type_max < n_type[meta->type]) { + n_type_max = n_type[meta->type]; + type_max = meta->type; + } + + LLAMA_LOG_INFO("%s: - tensor %4d: %32s %-8s [ %s ]\n", __func__, i, name, ggml_type_name(meta->type), llama_format_tensor_shape(meta).c_str()); + } + + switch (type_max) { + case GGML_TYPE_F32: ftype = LLAMA_FTYPE_ALL_F32; break; + case GGML_TYPE_F16: ftype = LLAMA_FTYPE_MOSTLY_F16; break; + case GGML_TYPE_Q4_0: ftype = LLAMA_FTYPE_MOSTLY_Q4_0; break; + case GGML_TYPE_Q4_1: ftype = LLAMA_FTYPE_MOSTLY_Q4_1; break; + case GGML_TYPE_Q5_0: ftype = LLAMA_FTYPE_MOSTLY_Q5_0; break; + case GGML_TYPE_Q5_1: ftype = LLAMA_FTYPE_MOSTLY_Q5_1; break; + case GGML_TYPE_Q8_0: ftype = LLAMA_FTYPE_MOSTLY_Q8_0; break; + case GGML_TYPE_Q2_K: ftype = LLAMA_FTYPE_MOSTLY_Q2_K; break; + case GGML_TYPE_Q3_K: ftype = LLAMA_FTYPE_MOSTLY_Q3_K_M; break; + case GGML_TYPE_Q4_K: ftype = LLAMA_FTYPE_MOSTLY_Q4_K_M; break; + case GGML_TYPE_Q5_K: ftype = LLAMA_FTYPE_MOSTLY_Q5_K_M; break; + case GGML_TYPE_Q6_K: ftype = LLAMA_FTYPE_MOSTLY_Q6_K; break; + default: + { + LLAMA_LOG_WARN("%s: unknown type %s\n", __func__, ggml_type_name(type_max)); + ftype = LLAMA_FTYPE_ALL_F32; + } break; + } + + for (int i = 0; i < n_kv; i++) { + const char * name = gguf_get_key(ctx_gguf, i); + const enum gguf_type type = gguf_get_kv_type(ctx_gguf, i); + + LLAMA_LOG_INFO("%s: - kv %3d: %42s %-8s\n", __func__, i, name, gguf_type_name(type)); + } + + // print type counts + for (auto & kv : n_type) { + if (kv.second == 0) { + continue; + } + + LLAMA_LOG_INFO("%s: - type %4s: %4d tensors\n", __func__, ggml_type_name(kv.first), kv.second); + } + } + + if (!llama_mmap::SUPPORTED) { + LLAMA_LOG_WARN("%s: mmap is not supported on this platform\n", __func__); + use_mmap = false; + } + + this->use_mmap = use_mmap; + } + + ~llama_model_loader() { + if (ctx_gguf) { + gguf_free(ctx_gguf); + } + if (ctx_meta) { + ggml_free(ctx_meta); + } + } + + const char * get_tensor_name(int i) const { + return gguf_get_tensor_name(ctx_gguf, i); + } + + struct ggml_tensor * get_tensor_meta(int i) const { + return ggml_get_tensor(ctx_meta, get_tensor_name(i)); + } + + void calc_sizes(size_t & ctx_size_p, size_t & mmapped_size_p) const { + ctx_size_p = 0; + mmapped_size_p = 0; + + for (int i = 0; i < n_tensors; i++) { + struct ggml_tensor * meta = get_tensor_meta(i); + ctx_size_p += sizeof(struct ggml_tensor) + GGML_OBJECT_SIZE; + (use_mmap ? mmapped_size_p : ctx_size_p) += ggml_nbytes_pad(meta); + } + } + + struct ggml_tensor * create_tensor_for(struct ggml_context * ctx, struct ggml_tensor * meta, ggml_backend backend) { + if (backend != GGML_BACKEND_CPU) { + ggml_set_no_alloc(ctx, true); + } + + struct ggml_tensor * tensor = ggml_dup_tensor(ctx, meta); + tensor->backend = backend; // TODO: ggml_set_backend + ggml_set_name(tensor, ggml_get_name(meta)); + + if (backend != GGML_BACKEND_CPU) { + ggml_set_no_alloc(ctx, use_mmap); + } + + n_created++; + + return tensor; + } + + struct ggml_tensor * create_tensor(struct ggml_context * ctx, const std::string & name, const std::vector & ne, ggml_backend backend) { + struct ggml_tensor * cur = ggml_get_tensor(ctx_meta, name.c_str()); + + if (cur == NULL) { + throw std::runtime_error(format("%s: tensor '%s' not found", __func__, name.c_str())); + } + + { + bool is_ok = true; + for (size_t i = 0; i < ne.size(); ++i) { + if (ne[i] != cur->ne[i]) { + is_ok = false; + break; + } + } + if (!is_ok) { + throw std::runtime_error( + format("%s: tensor '%s' has wrong shape; expected %s, got %s", + __func__, name.c_str(), + llama_format_tensor_shape(ne).c_str(), + llama_format_tensor_shape(cur).c_str())); + } + } + + return create_tensor_for(ctx, cur, backend); + } + + void done_getting_tensors() const { + if (n_created != n_tensors) { + throw std::runtime_error(format("%s: wrong number of tensors; expected %d, got %d", __func__, n_tensors, n_created)); + } + } + + size_t file_offset(const char * name) const { + const int idx = gguf_find_tensor(ctx_gguf, name); + + if (idx < 0) { + throw std::runtime_error(format("%s: tensor '%s' not found in the file", __func__, name)); + } + + return gguf_get_data_offset(ctx_gguf) + gguf_get_tensor_offset(ctx_gguf, idx); + } + + void load_data_for(struct ggml_tensor * cur) const { + const size_t offs = file_offset(ggml_get_name(cur)); + + if (use_mmap) { + cur->data = (uint8_t *) mapping->addr + offs; + } else { + file.seek(offs, SEEK_SET); + file.read_raw(cur->data, ggml_nbytes(cur)); + } + } + + void load_all_data(struct ggml_context * ctx, llama_progress_callback progress_callback, void * progress_callback_user_data, llama_mlock * lmlock) { + size_t size_data = 0; + size_t size_lock = 0; + size_t size_pref = 0; // prefetch + + for (int i = 0; i < gguf_get_n_tensors(ctx_gguf); i++) { + struct ggml_tensor * cur = ggml_get_tensor(ctx, gguf_get_tensor_name(ctx_gguf, i)); + size_data += ggml_nbytes(cur); + if (cur->backend == GGML_BACKEND_CPU) { + size_pref += ggml_nbytes(cur); + } + } + + if (use_mmap) { + mapping.reset(new llama_mmap(&file, size_pref, ggml_is_numa())); + if (lmlock) { + lmlock->init(mapping->addr); + } + } + + size_t done_size = 0; + for (int i = 0; i < gguf_get_n_tensors(ctx_gguf); i++) { + struct ggml_tensor * cur = ggml_get_tensor(ctx, gguf_get_tensor_name(ctx_gguf, i)); + GGML_ASSERT(cur); // unused tensors should have been caught by load_data already + + if (progress_callback) { + progress_callback((float) done_size / size_data, progress_callback_user_data); + } + + // allocate temp buffer if not using mmap + if (!use_mmap && cur->data == NULL) { + GGML_ASSERT(cur->backend != GGML_BACKEND_CPU); + cur->data = malloc(ggml_nbytes(cur)); + } + + load_data_for(cur); + + switch (cur->backend) { + case GGML_BACKEND_CPU: + if (use_mmap && lmlock) { + size_lock += ggml_nbytes(cur); + lmlock->grow_to(size_lock); + } + break; +#if defined(GGML_USE_CUBLAS) + case GGML_BACKEND_GPU: + case GGML_BACKEND_GPU_SPLIT: + // old code: + //ggml_cuda_transform_tensor(lt.data, lt.ggml_tensor); + + // TODO: test if this works !! + ggml_cuda_transform_tensor(cur->data, cur); + if (!use_mmap) { + free(cur->data); + } + break; +#elif defined(GGML_USE_CLBLAST) + case GGML_BACKEND_GPU: + ggml_cl_transform_tensor(cur->data, cur); + if (!use_mmap) { + free(cur->data); + } + break; +#endif + default: + continue; + } + + done_size += ggml_nbytes(cur); + } + } +}; + +// +// load LLaMA models +// + +const char * llama_model_ftype_name(enum llama_ftype ftype) { switch (ftype) { case LLAMA_FTYPE_ALL_F32: return "all F32"; case LLAMA_FTYPE_MOSTLY_F16: return "mostly F16"; @@ -1007,8 +1334,9 @@ const char * llama_ftype_name(enum llama_ftype ftype) { case LLAMA_FTYPE_MOSTLY_Q5_0: return "mostly Q5_0"; case LLAMA_FTYPE_MOSTLY_Q5_1: return "mostly Q5_1"; case LLAMA_FTYPE_MOSTLY_Q8_0: return "mostly Q8_0"; + // K-quants - case LLAMA_FTYPE_MOSTLY_Q2_K: return "mostly Q2_K"; + case LLAMA_FTYPE_MOSTLY_Q2_K: return "mostly Q2_K"; case LLAMA_FTYPE_MOSTLY_Q3_K_S: return "mostly Q3_K - Small"; case LLAMA_FTYPE_MOSTLY_Q3_K_M: return "mostly Q3_K - Medium"; case LLAMA_FTYPE_MOSTLY_Q3_K_L: return "mostly Q3_K - Large"; @@ -1016,20 +1344,21 @@ const char * llama_ftype_name(enum llama_ftype ftype) { case LLAMA_FTYPE_MOSTLY_Q4_K_M: return "mostly Q4_K - Medium"; case LLAMA_FTYPE_MOSTLY_Q5_K_S: return "mostly Q5_K - Small"; case LLAMA_FTYPE_MOSTLY_Q5_K_M: return "mostly Q5_K - Medium"; - case LLAMA_FTYPE_MOSTLY_Q6_K: return "mostly Q6_K"; - default: return "unknown, may not work"; + case LLAMA_FTYPE_MOSTLY_Q6_K: return "mostly Q6_K"; + + default: return "unknown, may not work"; } } static const char * llama_model_type_name(e_model type) { switch (type) { - case MODEL_3B: return "3B"; - case MODEL_7B: return "7B"; + case MODEL_3B: return "3B"; + case MODEL_7B: return "7B"; case MODEL_13B: return "13B"; case MODEL_30B: return "30B"; case MODEL_65B: return "65B"; case MODEL_70B: return "70B"; - default: LLAMA_ASSERT(false); + default: GGML_ASSERT(false); } } @@ -1039,8 +1368,6 @@ static void llama_model_load_internal( llama_vocab & vocab, int n_ctx, int n_batch, - int n_gqa, - float rms_norm_eps, int n_gpu_layers, int main_gpu, const float * tensor_split, @@ -1054,22 +1381,83 @@ static void llama_model_load_internal( bool vocab_only, llama_progress_callback progress_callback, void * progress_callback_user_data) { - model.t_start_us = ggml_time_us(); std::unique_ptr ml(new llama_model_loader(fname, use_mmap)); - vocab = std::move(ml->file_loader->vocab); - model.hparams = ml->file_loader->hparams; model.n_gpu_layers = n_gpu_layers; - llama_file_version file_version = ml->file_loader->file_version; auto & hparams = model.hparams; - // TODO: read from file - hparams.f_rms_norm_eps = rms_norm_eps; + std::string general_name = "n/a"; + std::string general_arch = "n/a"; + // read hparams { + struct gguf_context * ctx = ml->ctx_gguf; + +#define GGUF_GET(dst, func, type, req, key) \ + { \ + const int kid = gguf_find_key(ctx, key); \ + if (kid >= 0) { \ + enum gguf_type ktype = gguf_get_kv_type(ctx, kid); \ + if (ktype != (type)) { \ + throw std::runtime_error(format("key %s has wrong type: %s", key, gguf_type_name(ktype))); \ + } \ + (dst) = func(ctx, kid); \ + } else if (req) { \ + throw std::runtime_error(format("key not found in model: %s", key)); \ + } \ + } + + std::string tokenizer_name; + GGUF_GET(tokenizer_name, gguf_get_val_str, GGUF_TYPE_STRING, true, "tokenizer.ggml.model"); + + if (tokenizer_name == "llama") { + vocab.type = LLAMA_VOCAB_TYPE_SPM; + } else if (tokenizer_name == "gpt2") { + vocab.type = LLAMA_VOCAB_TYPE_BPE; + } else { + LLAMA_LOG_WARN("%s: unknown tokenizer: '%s'", __func__, tokenizer_name.c_str()); + LLAMA_LOG_WARN("%s: using default tokenizer: 'llama'", __func__); + vocab.type = LLAMA_VOCAB_TYPE_SPM; + } + + // get hparams kv + GGUF_GET(hparams.n_vocab, gguf_get_arr_n, GGUF_TYPE_ARRAY, true, "tokenizer.ggml.tokens"); + GGUF_GET(hparams.n_ctx_train, gguf_get_val_u32, GGUF_TYPE_UINT32, true, "llama.context_length"); + GGUF_GET(hparams.n_embd, gguf_get_val_u32, GGUF_TYPE_UINT32, true, "llama.embedding_length"); + GGUF_GET(hparams.n_ff, gguf_get_val_u32, GGUF_TYPE_UINT32, true, "llama.feed_forward_length"); + GGUF_GET(hparams.n_head, gguf_get_val_u32, GGUF_TYPE_UINT32, true, "llama.attention.head_count"); + GGUF_GET(hparams.n_layer, gguf_get_val_u32, GGUF_TYPE_UINT32, true, "llama.block_count"); + GGUF_GET(hparams.n_rot, gguf_get_val_u32, GGUF_TYPE_UINT32, true, "llama.rope.dimension_count"); + GGUF_GET(hparams.f_norm_rms_eps, gguf_get_val_f32, GGUF_TYPE_FLOAT32, true, "llama.attention.layer_norm_rms_epsilon"); + + // n_head_kv is optional, default to n_head + hparams.n_head_kv = hparams.n_head; + GGUF_GET(hparams.n_head_kv, gguf_get_val_u32, GGUF_TYPE_UINT32, false, "llama.attention.head_count_kv"); + + // TODO: manually setting rope scale should override this + // rope_freq_scale (inverse of the kv) is optional + float ropescale = 1.0f; + GGUF_GET(ropescale, gguf_get_val_f32, GGUF_TYPE_FLOAT32, false, "llama.rope.scale_linear"); + if (ropescale != 1.0f) { + rope_freq_scale = 1.0f/ropescale; + } + + // get general kv + GGUF_GET(general_name, gguf_get_val_str, GGUF_TYPE_STRING, false, "general.name"); + GGUF_GET(general_arch, gguf_get_val_str, GGUF_TYPE_STRING, false, "general.architecture"); + + // special tokens + GGUF_GET(vocab.special_bos_id, gguf_get_val_u32, GGUF_TYPE_UINT32, false, "tokenizer.ggml.bos_token_id"); + GGUF_GET(vocab.special_eos_id, gguf_get_val_u32, GGUF_TYPE_UINT32, false, "tokenizer.ggml.eos_token_id"); + GGUF_GET(vocab.special_unk_id, gguf_get_val_u32, GGUF_TYPE_UINT32, false, "tokenizer.ggml.unknown_token_id"); + GGUF_GET(vocab.special_sep_id, gguf_get_val_u32, GGUF_TYPE_UINT32, false, "tokenizer.ggml.separator_token_id"); + GGUF_GET(vocab.special_pad_id, gguf_get_val_u32, GGUF_TYPE_UINT32, false, "tokenizer.ggml.padding_token_id"); + +#undef GGUF_GET + switch (hparams.n_layer) { case 26: model.type = e_model::MODEL_3B; break; case 32: model.type = e_model::MODEL_7B; break; @@ -1084,64 +1472,103 @@ static void llama_model_load_internal( } break; } + model.ftype = ml->ftype; + hparams.n_ctx = n_ctx; // LLaMAv2 - // TODO: temporary until GGUF - LLAMA_ASSERT(hparams.n_head % n_gqa == 0); - hparams.n_head_kv = hparams.n_head / n_gqa; - if (model.type == e_model::MODEL_65B && n_gqa == 8) { - LLAMA_LOG_WARN("%s: warning: assuming 70B model based on GQA == %d\n", __func__, n_gqa); - model.type = e_model::MODEL_70B; - hparams.f_ffn_mult = 1.3f; // from the params.json of the 70B model + // TODO: probably not needed + { + const auto n_gqa = hparams.n_gqa(); + + if (model.type == e_model::MODEL_65B && n_gqa == 8) { + LLAMA_LOG_WARN("%s: assuming 70B model based on GQA == %d\n", __func__, n_gqa); + model.type = e_model::MODEL_70B; + } } hparams.rope_freq_base = rope_freq_base; hparams.rope_freq_scale = rope_freq_scale; } - // ref: https://github.com/facebookresearch/llama/blob/6c7fe276574e78057f917549435a2554000a876d/llama/model.py#L194-L199 - const uint32_t n_ff_raw = 2*(4*hparams.n_embd)/3; - const uint32_t n_ff_mult = hparams.f_ffn_mult*n_ff_raw; - const uint32_t n_ff = ((n_ff_mult + hparams.n_mult - 1)/hparams.n_mult)*hparams.n_mult; - //const uint32_t n_ff = 28672; + // read vocab + { + struct gguf_context * ctx = ml->ctx_gguf; + + vocab.id_to_token.resize(hparams.n_vocab); + + const int token_idx = gguf_find_key(ctx, "tokenizer.ggml.tokens"); + if (token_idx == -1) { + throw std::runtime_error("cannot find tokenizer vocab in model file\n"); + } + + const int score_idx = gguf_find_key(ctx, "tokenizer.ggml.scores"); + if (score_idx == -1) { + throw std::runtime_error("cannot find tokenizer scores in model file\n"); + } + + const float * scores = (const float * ) gguf_get_arr_data(ctx, score_idx); + + const int toktype_idx = gguf_find_key(ctx, "tokenizer.ggml.token_type"); + if (toktype_idx == -1) { + throw std::runtime_error("cannot find token type list in GGUF file\n"); + } + + const int * toktypes = (const int * ) gguf_get_arr_data(ctx, toktype_idx); + + for (uint32_t i = 0; i < hparams.n_vocab; i++) { + std::string word = gguf_get_arr_str(ctx, token_idx, i); + + vocab.token_to_id[word] = i; + + auto & token_data = vocab.id_to_token[i]; + token_data.text = std::move(word); + token_data.score = scores[i]; + token_data.type = (llama_token_type) toktypes[i]; + + // determine the newline token: 0x0A == 10 == '\n' + if (token_data.text == "<0x0A>") { + vocab.linefeed_id = i; + } + } + } { - LLAMA_LOG_INFO("%s: format = %s\n", __func__, llama_file_version_name(file_version)); - LLAMA_LOG_INFO("%s: n_vocab = %u\n", __func__, hparams.n_vocab); - LLAMA_LOG_INFO("%s: n_ctx = %u\n", __func__, hparams.n_ctx); - LLAMA_LOG_INFO("%s: n_embd = %u\n", __func__, hparams.n_embd); - LLAMA_LOG_INFO("%s: n_mult = %u\n", __func__, hparams.n_mult); - LLAMA_LOG_INFO("%s: n_head = %u\n", __func__, hparams.n_head); - LLAMA_LOG_INFO("%s: n_head_kv = %u\n", __func__, hparams.n_head_kv); - LLAMA_LOG_INFO("%s: n_layer = %u\n", __func__, hparams.n_layer); - LLAMA_LOG_INFO("%s: n_rot = %u\n", __func__, hparams.n_rot); // a.k.a. n_embd_head, n_head_dim - LLAMA_LOG_INFO("%s: n_gqa = %u\n", __func__, hparams.n_gqa()); - LLAMA_LOG_INFO("%s: rnorm_eps = %.1e\n", __func__, hparams.f_rms_norm_eps); - LLAMA_LOG_INFO("%s: n_ff = %u\n", __func__, n_ff); - LLAMA_LOG_INFO("%s: freq_base = %.1f\n", __func__, hparams.rope_freq_base); - LLAMA_LOG_INFO("%s: freq_scale = %g\n", __func__, hparams.rope_freq_scale); - LLAMA_LOG_INFO("%s: ftype = %u (%s)\n", __func__, hparams.ftype, llama_ftype_name(hparams.ftype)); - LLAMA_LOG_INFO("%s: model size = %s\n", __func__, llama_model_type_name(model.type)); - } + // hparams + LLAMA_LOG_INFO("%s: format = %s\n", __func__, llama_file_version_name(ml->fver)); + LLAMA_LOG_INFO("%s: arch = %s\n", __func__, general_arch.c_str()); + LLAMA_LOG_INFO("%s: vocab type = %s\n", __func__, vocab.type == LLAMA_VOCAB_TYPE_SPM ? "SPM" : "BPE"); // TODO: fix + LLAMA_LOG_INFO("%s: n_vocab = %u\n", __func__, hparams.n_vocab); + LLAMA_LOG_INFO("%s: n_ctx_train = %u\n", __func__, hparams.n_ctx_train); + LLAMA_LOG_INFO("%s: n_ctx = %u\n", __func__, hparams.n_ctx); + LLAMA_LOG_INFO("%s: n_embd = %u\n", __func__, hparams.n_embd); + LLAMA_LOG_INFO("%s: n_head = %u\n", __func__, hparams.n_head); + LLAMA_LOG_INFO("%s: n_head_kv = %u\n", __func__, hparams.n_head_kv); + LLAMA_LOG_INFO("%s: n_layer = %u\n", __func__, hparams.n_layer); + LLAMA_LOG_INFO("%s: n_rot = %u\n", __func__, hparams.n_rot); // a.k.a. n_embd_head, n_head_dim + LLAMA_LOG_INFO("%s: n_gqa = %u\n", __func__, hparams.n_gqa()); + LLAMA_LOG_INFO("%s: f_norm_eps = %.1e\n", __func__, hparams.f_norm_rms_eps); + LLAMA_LOG_INFO("%s: n_ff = %u\n", __func__, hparams.n_ff); + LLAMA_LOG_INFO("%s: freq_base = %.1f\n", __func__, hparams.rope_freq_base); + LLAMA_LOG_INFO("%s: freq_scale = %g\n", __func__, hparams.rope_freq_scale); + LLAMA_LOG_INFO("%s: model type = %s\n", __func__, llama_model_type_name(model.type)); + LLAMA_LOG_INFO("%s: model ftype = %s\n", __func__, llama_model_ftype_name(model.ftype)); + LLAMA_LOG_INFO("%s: model size = %.2f B\n", __func__, ml->n_elements*1e-9); - if (file_version < LLAMA_FILE_VERSION_GGJT_V2) { - if (hparams.ftype != LLAMA_FTYPE_ALL_F32 && - hparams.ftype != LLAMA_FTYPE_MOSTLY_F16 && - hparams.ftype != LLAMA_FTYPE_MOSTLY_Q8_0) { - throw std::runtime_error(format("this format is no longer supported (see https://github.com/ggerganov/llama.cpp/pull/1405)")); - } - } + // general kv + LLAMA_LOG_INFO("%s: general.name = %s\n", __func__, general_name.c_str()); - if (file_version < LLAMA_FILE_VERSION_GGJT_V3) { - if (hparams.ftype == LLAMA_FTYPE_MOSTLY_Q4_0 || - hparams.ftype == LLAMA_FTYPE_MOSTLY_Q4_1 || - hparams.ftype == LLAMA_FTYPE_MOSTLY_Q8_0) { - throw std::runtime_error(format("this format is no longer supported (see https://github.com/ggerganov/llama.cpp/pull/1508)")); - } + // special tokens + if (vocab.special_bos_id != -1) { LLAMA_LOG_INFO( "%s: BOS token = %d '%s'\n", __func__, vocab.special_bos_id, vocab.id_to_token[vocab.special_bos_id].text.c_str() ); } + if (vocab.special_eos_id != -1) { LLAMA_LOG_INFO( "%s: EOS token = %d '%s'\n", __func__, vocab.special_eos_id, vocab.id_to_token[vocab.special_eos_id].text.c_str() ); } + if (vocab.special_unk_id != -1) { LLAMA_LOG_INFO( "%s: UNK token = %d '%s'\n", __func__, vocab.special_unk_id, vocab.id_to_token[vocab.special_unk_id].text.c_str() ); } + if (vocab.special_sep_id != -1) { LLAMA_LOG_INFO( "%s: SEP token = %d '%s'\n", __func__, vocab.special_sep_id, vocab.id_to_token[vocab.special_sep_id].text.c_str() ); } + if (vocab.special_pad_id != -1) { LLAMA_LOG_INFO( "%s: PAD token = %d '%s'\n", __func__, vocab.special_pad_id, vocab.id_to_token[vocab.special_pad_id].text.c_str() ); } + if (vocab.linefeed_id != -1) { LLAMA_LOG_INFO( "%s: LF token = %d '%s'\n", __func__, vocab.linefeed_id, vocab.id_to_token[vocab.linefeed_id].text.c_str() ); } } if (vocab_only) { + LLAMA_LOG_INFO("%s: vocab only - skipping tensors\n", __func__); return; } @@ -1149,20 +1576,22 @@ static void llama_model_load_internal( size_t ctx_size; size_t mmapped_size; - ml->calc_sizes(&ctx_size, &mmapped_size); + + ml->calc_sizes(ctx_size, mmapped_size); + LLAMA_LOG_INFO("%s: ggml ctx size = %7.2f MB\n", __func__, ctx_size/1024.0/1024.0); // create the ggml context { model.buf.resize(ctx_size); if (use_mlock) { - model.mlock_buf.init (model.buf.addr); + model.mlock_buf.init (model.buf.data); model.mlock_buf.grow_to(model.buf.size); } struct ggml_init_params params = { /*.mem_size =*/ model.buf.size, - /*.mem_buffer =*/ model.buf.addr, + /*.mem_buffer =*/ model.buf.data, /*.no_alloc =*/ ml->use_mmap, }; @@ -1198,9 +1627,7 @@ static void llama_model_load_internal( const uint32_t n_layer = hparams.n_layer; const uint32_t n_vocab = hparams.n_vocab; - ml->ggml_ctx = ctx; - - model.tok_embeddings = ml->get_tensor("tok_embeddings.weight", {n_embd, n_vocab}, GGML_BACKEND_CPU); + model.tok_embeddings = ml->create_tensor(ctx, TN_TOKEN_EMBD, {n_embd, n_vocab}, GGML_BACKEND_CPU); // "output" tensor { @@ -1221,8 +1648,8 @@ static void llama_model_load_internal( backend_output = GGML_BACKEND_CPU; } - model.norm = ml->get_tensor("norm.weight", {n_embd}, backend_norm); - model.output = ml->get_tensor("output.weight", {n_embd, n_vocab}, backend_output); + model.norm = ml->create_tensor(ctx, TN_OUTPUT_NORM, {n_embd}, backend_norm); + model.output = ml->create_tensor(ctx, TN_OUTPUT, {n_embd, n_vocab}, backend_output); if (backend_norm == GGML_BACKEND_GPU) { vram_weights += ggml_nbytes(model.norm); } @@ -1231,6 +1658,8 @@ static void llama_model_load_internal( } } + const uint32_t n_ff = hparams.n_ff; + const int i_gpu_start = n_layer - n_gpu_layers; model.layers.resize(n_layer); @@ -1239,21 +1668,18 @@ static void llama_model_load_internal( const ggml_backend backend_split = int(i) < i_gpu_start ? GGML_BACKEND_CPU : LLAMA_BACKEND_OFFLOAD_SPLIT; // NOLINT auto & layer = model.layers[i]; + layer.attention_norm = ml->create_tensor(ctx, format(TN_ATTN_NORM, i), {n_embd}, backend); - std::string layers_i = "layers." + std::to_string(i); + layer.wq = ml->create_tensor(ctx, format(TN_ATTN_Q, i), {n_embd, n_embd}, backend_split); + layer.wk = ml->create_tensor(ctx, format(TN_ATTN_K, i), {n_embd, n_embd_gqa}, backend_split); + layer.wv = ml->create_tensor(ctx, format(TN_ATTN_V, i), {n_embd, n_embd_gqa}, backend_split); + layer.wo = ml->create_tensor(ctx, format(TN_ATTN_OUTPUT, i), {n_embd, n_embd}, backend_split); - layer.attention_norm = ml->get_tensor(layers_i + ".attention_norm.weight", {n_embd}, backend); + layer.ffn_norm = ml->create_tensor(ctx, format(TN_FFN_NORM, i), {n_embd}, backend); - layer.wq = ml->get_tensor(layers_i + ".attention.wq.weight", {n_embd, n_embd}, backend_split); - layer.wk = ml->get_tensor(layers_i + ".attention.wk.weight", {n_embd, n_embd_gqa}, backend_split); - layer.wv = ml->get_tensor(layers_i + ".attention.wv.weight", {n_embd, n_embd_gqa}, backend_split); - layer.wo = ml->get_tensor(layers_i + ".attention.wo.weight", {n_embd, n_embd}, backend_split); - - layer.ffn_norm = ml->get_tensor(layers_i + ".ffn_norm.weight", {n_embd}, backend); - - layer.w1 = ml->get_tensor(layers_i + ".feed_forward.w1.weight", {n_embd, n_ff}, backend_split); - layer.w2 = ml->get_tensor(layers_i + ".feed_forward.w2.weight", { n_ff, n_embd}, backend_split); - layer.w3 = ml->get_tensor(layers_i + ".feed_forward.w3.weight", {n_embd, n_ff}, backend_split); + layer.w1 = ml->create_tensor(ctx, format(TN_FFN_GATE, i), {n_embd, n_ff}, backend_split); + layer.w2 = ml->create_tensor(ctx, format(TN_FFN_DOWN, i), { n_ff, n_embd}, backend_split); + layer.w3 = ml->create_tensor(ctx, format(TN_FFN_UP, i), {n_embd, n_ff}, backend_split); if (backend == GGML_BACKEND_GPU) { vram_weights += @@ -1351,8 +1777,9 @@ static void llama_model_load_internal( } // populate `tensors_by_name` - for (llama_load_tensor & lt : ml->tensors_map.tensors) { - model.tensors_by_name.emplace_back(lt.name, lt.ggml_tensor); + for (int i = 0; i < ml->n_tensors; ++i) { + struct ggml_tensor * cur = ggml_get_tensor(ctx, ml->get_tensor_name(i)); + model.tensors_by_name.emplace_back(ggml_get_name(cur), cur); } (void) tensor_split; @@ -1362,7 +1789,7 @@ static void llama_model_load_internal( } #endif - ml->load_all_data(progress_callback, progress_callback_user_data, use_mlock ? &model.mlock_mmap : NULL); + ml->load_all_data(ctx, progress_callback, progress_callback_user_data, use_mlock ? &model.mlock_mmap : NULL); if (progress_callback) { progress_callback(1.0f, progress_callback_user_data); @@ -1381,8 +1808,6 @@ static bool llama_model_load( llama_vocab & vocab, int n_ctx, int n_batch, - int n_gqa, - float rms_norm_eps, int n_gpu_layers, int main_gpu, const float * tensor_split, @@ -1397,7 +1822,7 @@ static bool llama_model_load( llama_progress_callback progress_callback, void *progress_callback_user_data) { try { - llama_model_load_internal(fname, model, vocab, n_ctx, n_batch, n_gqa, rms_norm_eps, n_gpu_layers, + llama_model_load_internal(fname, model, vocab, n_ctx, n_batch, n_gpu_layers, main_gpu, tensor_split, mul_mat_q, rope_freq_base, rope_freq_scale, low_vram, memory_type, use_mmap, use_mlock, vocab_only, progress_callback, progress_callback_user_data); return true; @@ -1414,7 +1839,7 @@ static struct ggml_cgraph * llama_build_graph( int n_tokens, int n_past) { - LLAMA_ASSERT((!tokens && embd) || (tokens && !embd)); + GGML_ASSERT((!tokens && embd) || (tokens && !embd)); // NOLINT const int N = n_tokens; @@ -1423,7 +1848,7 @@ static struct ggml_cgraph * llama_build_graph( const auto & kv_self = lctx.kv_self; - LLAMA_ASSERT(!!kv_self.ctx); + GGML_ASSERT(!!kv_self.ctx); const int64_t n_embd = hparams.n_embd; const int64_t n_layer = hparams.n_layer; @@ -1433,21 +1858,20 @@ static struct ggml_cgraph * llama_build_graph( const int64_t n_embd_head = hparams.n_embd_head(); const int64_t n_embd_gqa = hparams.n_embd_gqa(); - LLAMA_ASSERT(n_embd_head == hparams.n_rot); + GGML_ASSERT(n_embd_head == hparams.n_rot); - const float freq_base = hparams.rope_freq_base; - const float freq_scale = hparams.rope_freq_scale; - const float rms_norm_eps = hparams.f_rms_norm_eps; + const float freq_base = hparams.rope_freq_base; + const float freq_scale = hparams.rope_freq_scale; + const float norm_rms_eps = hparams.f_norm_rms_eps; const int n_gpu_layers = model.n_gpu_layers; auto & mem_per_token = lctx.mem_per_token; auto & buf_compute = lctx.buf_compute; - struct ggml_init_params params = { /*.mem_size =*/ buf_compute.size, - /*.mem_buffer =*/ buf_compute.addr, + /*.mem_buffer =*/ buf_compute.data, /*.no_alloc =*/ false, }; @@ -1545,7 +1969,7 @@ static struct ggml_cgraph * llama_build_graph( // norm { - cur = ggml_rms_norm(ctx0, inpL, rms_norm_eps); + cur = ggml_rms_norm(ctx0, inpL, norm_rms_eps); offload_func(cur); ggml_set_name(cur, "rms_norm_0"); @@ -1690,7 +2114,7 @@ static struct ggml_cgraph * llama_build_graph( { // norm { - cur = ggml_rms_norm(ctx0, inpFF, rms_norm_eps); + cur = ggml_rms_norm(ctx0, inpFF, norm_rms_eps); offload_func(cur); ggml_set_name(cur, "rms_norm_1"); @@ -1740,7 +2164,7 @@ static struct ggml_cgraph * llama_build_graph( // norm { - cur = ggml_rms_norm(ctx0, inpL, rms_norm_eps); + cur = ggml_rms_norm(ctx0, inpL, norm_rms_eps); offload_func_nr(cur); ggml_set_name(cur, "rms_norm_2"); @@ -1797,14 +2221,14 @@ static bool llama_eval_internal( int n_threads, const char * cgraph_fname) { - LLAMA_ASSERT((!tokens && embd) || (tokens && !embd)); + GGML_ASSERT((!tokens && embd) || (tokens && !embd)); // NOLINT - LLAMA_ASSERT(n_tokens > 0); - LLAMA_ASSERT(n_past >= 0); - LLAMA_ASSERT(n_threads > 0); + GGML_ASSERT(n_tokens > 0); + GGML_ASSERT(n_past >= 0); + GGML_ASSERT(n_threads > 0); // TODO: keep the values of n_batch and n_ctx - // LLAMA_ASSERT(n_tokens <= n_batch); - // LLAMA_ASSERT(n_past + n_tokens <= n_ctx); + // GGML_ASSERT(n_tokens <= n_batch); + // GGML_ASSERT(n_past + n_tokens <= n_ctx); const int64_t t_start_us = ggml_time_us(); @@ -1819,7 +2243,7 @@ static bool llama_eval_internal( const auto & kv_self = lctx.kv_self; - LLAMA_ASSERT(!!kv_self.ctx); + GGML_ASSERT(!!kv_self.ctx); const int64_t n_embd = hparams.n_embd; const int64_t n_vocab = hparams.n_vocab; @@ -1843,8 +2267,8 @@ static bool llama_eval_internal( struct ggml_tensor * res = gf->nodes[gf->n_nodes - 1]; struct ggml_tensor * embeddings = gf->nodes[gf->n_nodes - 2]; - LLAMA_ASSERT(strcmp(res->name, "result_output") == 0); - LLAMA_ASSERT(strcmp(embeddings->name, "result_norm") == 0); + GGML_ASSERT(strcmp(res->name, "result_output") == 0); + GGML_ASSERT(strcmp(embeddings->name, "result_norm") == 0); #if GGML_USE_MPI const int64_t n_layer = hparams.n_layer; @@ -1927,6 +2351,89 @@ static bool llama_eval_internal( // tokenizer // +static enum llama_vocab_type llama_vocab_get_type(const llama_vocab & vocab) { + return vocab.type; +} + +static bool llama_is_normal_token(const llama_vocab & vocab, llama_token id) { + return vocab.id_to_token[id].type == LLAMA_TOKEN_TYPE_NORMAL; +} + +static bool llama_is_unknown_token(const llama_vocab & vocab, llama_token id) { + return vocab.id_to_token[id].type == LLAMA_TOKEN_TYPE_UNKNOWN; +} + +static bool llama_is_control_token(const llama_vocab & vocab, llama_token id) { + return vocab.id_to_token[id].type == LLAMA_TOKEN_TYPE_CONTROL; +} + +static bool llama_is_user_defined_token(const llama_vocab & vocab, llama_token id) { + return vocab.id_to_token[id].type == LLAMA_TOKEN_TYPE_USER_DEFINED; +} + +static bool llama_is_unused_token(const llama_vocab & vocab, llama_token id) { + return vocab.id_to_token[id].type == LLAMA_TOKEN_TYPE_UNUSED; +} + +static bool llama_is_byte_token(const llama_vocab & vocab, llama_token id) { + return vocab.id_to_token[id].type == LLAMA_TOKEN_TYPE_BYTE; +} + +static bool llama_is_bos_token(const llama_vocab & vocab, llama_token id) { + GGML_ASSERT(llama_is_control_token(vocab, id)); + return id == vocab.special_bos_id; +} + +static bool llama_is_eos_token(const llama_vocab & vocab, llama_token id ) { + GGML_ASSERT(llama_is_control_token(vocab, id)); + return id == vocab.special_eos_id; +} + +static bool llama_is_pad_token(const llama_vocab & vocab, llama_token id ) { + GGML_ASSERT(id < 0 || llama_is_control_token(vocab, id)); + return id == vocab.special_pad_id; +} + +static uint8_t llama_token_to_byte(const llama_vocab & vocab, llama_token id) { + GGML_ASSERT(llama_is_byte_token(vocab, id)); + const auto& token_data = vocab.id_to_token.at(id); + auto buf = token_data.text.substr(3, 2); + return strtol(buf.c_str(), NULL, 16); +} + +static llama_token llama_byte_to_token(const llama_vocab & vocab, uint8_t ch) { + char buf[7]; + int result = snprintf(buf, sizeof(buf), "<0x%02X>", ch); + GGML_ASSERT(0 <= result && result < 7); + return vocab.token_to_id.at(buf); +} + +static std::string llama_escape_whitespace(const std::string& text) { + std::string result; + bool escaping = false; + result += "\xe2\x96\x81"; + for (size_t offs = 0; offs < text.length(); ++offs) { + if (text[offs] == ' ') { + if (!escaping) { + result += "\xe2\x96\x81"; + escaping = true; + } + } + else { + escaping = false; + result += text[offs]; + } + } + return result; +} + +static std::string llama_unescape_whitespace(const std::string& word) { + if (word.length() >= 3 && word.substr(0, 3) == "\xe2\x96\x81") { + return std::string(" ") + word.substr(3); + } + return word; +} + static size_t utf8_len(char src) { const size_t lookup[] = { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 3, 4 }; uint8_t highbits = static_cast(src) >> 4; @@ -1968,10 +2475,11 @@ struct llama_tokenizer { size_t offs = 0; while (offs < text.size()) { llama_sp_symbol sym; - size_t char_len = std::min(text.size() - offs, utf8_len(text[offs])); + size_t len = utf8_len(text[offs]); + GGML_ASSERT(offs + len <= text.size()); sym.text = text.c_str() + offs; - sym.n = char_len; - offs += char_len; + sym.n = len; + offs += len; sym.prev = index - 1; sym.next = offs == text.size() ? -1 : index + 1; index++; @@ -2016,23 +2524,36 @@ struct llama_tokenizer { for (int i = 0; i != -1; i = symbols_[i].next) { auto & symbol = symbols_[i]; - auto token = vocab_.token_to_id.find(std::string(symbol.text, symbol.n)); - - if (token == vocab_.token_to_id.end()) { - // output any symbols that did not form tokens as bytes. - for (int j = 0; j < (int) symbol.n; ++j) { - // NOTE: old version, before #2420 - not sure what are the implications of this - //llama_vocab::id token_id = static_cast(symbol.text[j]) + 3; - llama_vocab::id token_id = vocab_.token_to_id.at(std::string(1, symbol.text[j])); - output.push_back(token_id); - } - } else { - output.push_back((*token).second); - } + resegment(symbol, output); } } private: + void resegment(llama_sp_symbol &symbol, std::vector &output) { + auto text = std::string(symbol.text, symbol.n); + auto token = vocab_.token_to_id.find(text); + + // Do we need to support is_unused? + if (token != vocab_.token_to_id.end()) { + output.push_back((*token).second); + return; + } + + const auto p = rev_merge.find(text); + + if (p == rev_merge.end()) { + // output any symbols that did not form tokens as bytes. + for (int j = 0; j < (int)symbol.n; ++j) { + llama_vocab::id token_id = llama_byte_to_token(vocab_, symbol.text[j]); + output.push_back(token_id); + } + return; + } + + resegment(symbols_[p->second.first], output); + resegment(symbols_[p->second.second], output); + } + void try_add_bigram(int left, int right) { if (left == -1 || right == -1) { return; @@ -2049,31 +2570,42 @@ private: return; } - const auto &tok_score = vocab_.id_to_token[(*token).second]; + const auto &tok_data = vocab_.id_to_token[(*token).second]; llama_sp_bigram bigram; bigram.left = left; bigram.right = right; - bigram.score = tok_score.score; + bigram.score = tok_data.score; bigram.size = text.size(); work_queue_.push(bigram); + + // Do we need to support is_unused? + rev_merge[text] = std::make_pair(left, right); } const llama_vocab & vocab_; std::vector symbols_; llama_sp_bigram::queue work_queue_; + std::map > rev_merge; }; -static std::vector llama_tokenize(const llama_vocab & vocab, const std::string & text, bool bos) { +static std::vector llama_tokenize_internal(const llama_vocab & vocab, const std::string & raw_text, bool bos, bool escape) { llama_tokenizer tokenizer(vocab); std::vector output; - if (text.empty()) { + if (raw_text.empty()) { return output; } if (bos) { - output.push_back(llama_token_bos()); + output.push_back(vocab.special_bos_id); + } + + std::string text; + if (escape) { + text = llama_escape_whitespace(raw_text); + } else { + text = raw_text; } tokenizer.tokenize(text, output); @@ -2164,8 +2696,8 @@ std::pair, llama_partial_utf8> decode_utf8( // returns true iff pos points to the end of one of the definitions of a rule static bool llama_grammar_is_end_of_sequence(const llama_grammar_element * pos) { switch (pos->type) { - case LLAMA_GRETYPE_END: return true; - case LLAMA_GRETYPE_ALT: return true; + case LLAMA_GRETYPE_END: return true; // NOLINT + case LLAMA_GRETYPE_ALT: return true; // NOLINT default: return false; } } @@ -2178,7 +2710,8 @@ static std::pair llama_grammar_match_char( bool found = false; bool is_positive_char = pos->type == LLAMA_GRETYPE_CHAR; - LLAMA_ASSERT(is_positive_char || pos->type == LLAMA_GRETYPE_CHAR_NOT); + + GGML_ASSERT(is_positive_char || pos->type == LLAMA_GRETYPE_CHAR_NOT); // NOLINT do { if (pos[1].type == LLAMA_GRETYPE_CHAR_RNG_UPPER) { @@ -2203,7 +2736,7 @@ static bool llama_grammar_match_partial_char( const llama_partial_utf8 partial_utf8) { bool is_positive_char = pos->type == LLAMA_GRETYPE_CHAR; - LLAMA_ASSERT(is_positive_char || pos->type == LLAMA_GRETYPE_CHAR_NOT); + GGML_ASSERT(is_positive_char || pos->type == LLAMA_GRETYPE_CHAR_NOT); uint32_t partial_value = partial_utf8.value; int n_remain = partial_utf8.n_remain; @@ -2296,7 +2829,7 @@ static void llama_grammar_advance_stack( // end of alternate (LLAMA_GRETYPE_END, LLAMA_GRETYPE_ALT) or middle of char range // (LLAMA_GRETYPE_CHAR_ALT, LLAMA_GRETYPE_CHAR_RNG_UPPER); stack should never be left on // those - LLAMA_ASSERT(false); + GGML_ASSERT(false); } } @@ -2371,7 +2904,7 @@ static std::vector llama_grammar_reject_candidates_for_ } } - auto stack_pos_after = llama_grammar_match_char(stack_pos, 0).second; + const auto * stack_pos_after = llama_grammar_match_char(stack_pos, 0).second; // update top of stack to next element, if any std::vector stack_after(stack.begin(), stack.end() - 1); @@ -2393,7 +2926,7 @@ static std::vector llama_grammar_reject_candidates( const std::vector> & rules, const std::vector> & stacks, const std::vector & candidates) { - LLAMA_ASSERT(!stacks.empty()); // REVIEW + GGML_ASSERT(!stacks.empty()); // REVIEW if (candidates.empty()) { return std::vector(); @@ -2460,7 +2993,7 @@ void llama_grammar_free(struct llama_grammar * grammar) { // void llama_sample_softmax(struct llama_context * ctx, llama_token_data_array * candidates) { - assert(candidates->size > 0); + GGML_ASSERT(candidates->size > 0); const int64_t t_start_sample_us = ggml_time_us(); @@ -2604,7 +3137,6 @@ void llama_sample_tail_free(struct llama_context * ctx, llama_token_data_array * } } - void llama_sample_typical(struct llama_context * ctx, llama_token_data_array * candidates, float p, size_t min_keep) { // Reference implementation: // https://github.com/huggingface/transformers/compare/main...cimeister:typical-sampling:typical-pr @@ -2741,7 +3273,7 @@ void llama_sample_frequency_and_presence_penalties(struct llama_context * ctx, l } void llama_sample_grammar(struct llama_context * ctx, llama_token_data_array * candidates, const struct llama_grammar * grammar) { - assert(ctx); + GGML_ASSERT(ctx); const int64_t t_start_sample_us = ggml_time_us(); bool allow_eos = false; @@ -2752,31 +3284,28 @@ void llama_sample_grammar(struct llama_context * ctx, llama_token_data_array * c } } - const llama_token eos = llama_token_eos(); + const llama_token eos = llama_token_eos(ctx); std::vector, llama_partial_utf8>> candidates_decoded; std::vector candidates_grammar; for (size_t i = 0; i < candidates->size; ++i) { - const llama_token id = candidates->data[i].id; - const char * str = llama_token_to_str(ctx, id); + const llama_token id = candidates->data[i].id; + const std::string text = llama_token_to_text(ctx, id); if (id == eos) { if (!allow_eos) { candidates->data[i].logit = -INFINITY; } - } else if (*str == 0) { + } else if (text.empty()) { candidates->data[i].logit = -INFINITY; } else { - candidates_decoded.push_back(decode_utf8(str, grammar->partial_utf8)); - candidates_grammar.push_back({ - i, candidates_decoded.back().first.data(), candidates_decoded.back().second - }); + candidates_decoded.push_back(decode_utf8(text.c_str(), grammar->partial_utf8)); + candidates_grammar.push_back({ i, candidates_decoded.back().first.data(), candidates_decoded.back().second }); } } - const auto rejects = - llama_grammar_reject_candidates(grammar->rules, grammar->stacks, candidates_grammar); - for (auto & reject : rejects) { + const auto rejects = llama_grammar_reject_candidates(grammar->rules, grammar->stacks, candidates_grammar); + for (const auto & reject : rejects) { candidates->data[reject.index].logit = -INFINITY; } @@ -2804,10 +3333,12 @@ void llama_sample_classifier_free_guidance( float scale) { int64_t t_start_sample_us = ggml_time_us(); - assert(ctx); + GGML_ASSERT(ctx); + auto n_vocab = llama_n_vocab(ctx); - assert(n_vocab == (int)candidates->size); - assert(!candidates->sorted); + + GGML_ASSERT(n_vocab == (int)candidates->size); + GGML_ASSERT(!candidates->sorted); std::vector logits_base; logits_base.reserve(candidates->size); @@ -2831,7 +3362,8 @@ void llama_sample_classifier_free_guidance( } llama_token llama_sample_token_mirostat(struct llama_context * ctx, llama_token_data_array * candidates, float tau, float eta, int m, float * mu) { - assert(ctx); + GGML_ASSERT(ctx); + auto N = float(llama_n_vocab(ctx)); int64_t t_start_sample_us; t_start_sample_us = ggml_time_us(); @@ -2937,7 +3469,8 @@ llama_token llama_sample_token_greedy(struct llama_context * ctx, llama_token_da } llama_token llama_sample_token(struct llama_context * ctx, llama_token_data_array * candidates) { - assert(ctx); + GGML_ASSERT(ctx); + const int64_t t_start_sample_us = ggml_time_us(); llama_sample_softmax(nullptr, candidates); @@ -2961,25 +3494,25 @@ llama_token llama_sample_token(struct llama_context * ctx, llama_token_data_arra void llama_grammar_accept_token(struct llama_context * ctx, struct llama_grammar * grammar, llama_token token) { const int64_t t_start_sample_us = ggml_time_us(); - if (token == llama_token_eos()) { + if (token == llama_token_eos(ctx)) { for (const auto & stack : grammar->stacks) { if (stack.empty()) { return; } } - LLAMA_ASSERT(false); + GGML_ASSERT(false); } - const char * str = llama_token_to_str(ctx, token); + const std::string text = llama_token_to_text(ctx, token); // Note terminating 0 in decoded string - const auto decoded = decode_utf8(str, grammar->partial_utf8); + const auto decoded = decode_utf8(text.c_str(), grammar->partial_utf8); const auto & code_points = decoded.first; for (auto it = code_points.begin(), end = code_points.end() - 1; it != end; ++it) { grammar->stacks = llama_grammar_accept(grammar->rules, grammar->stacks, *it); } grammar->partial_utf8 = decoded.second; - LLAMA_ASSERT(!grammar->stacks.empty()); + GGML_ASSERT(!grammar->stacks.empty()); ctx->t_sample_us += ggml_time_us() - t_start_sample_us; } @@ -2988,37 +3521,37 @@ void llama_grammar_accept_token(struct llama_context * ctx, struct llama_grammar // quantization // -static void llama_convert_tensor_internal(const llama_load_tensor & tensor, llama_buffer & output, const int nelements, const int nthread) { - if (output.size < nelements * sizeof(float)) { - output.resize(nelements * sizeof(float)); +static void llama_convert_tensor_internal(struct ggml_tensor * tensor, std::vector & output, const size_t nelements, const int nthread) { + if (output.size() < nelements) { + output.resize(nelements); } - float * f32_output = (float *) output.addr; + float * f32_output = (float *) output.data(); ggml_type_traits_t qtype; - if (ggml_is_quantized(tensor.type)) { - qtype = ggml_internal_get_type_traits(tensor.type); + if (ggml_is_quantized(tensor->type)) { + qtype = ggml_internal_get_type_traits(tensor->type); if (qtype.to_float == NULL) { - throw std::runtime_error(format("type %s unsupported for integer quantization: no dequantization available", ggml_type_name(tensor.type))); + throw std::runtime_error(format("type %s unsupported for integer quantization: no dequantization available", ggml_type_name(tensor->type))); } - } else if (tensor.type != GGML_TYPE_F16) { - throw std::runtime_error(format("cannot dequantize/convert tensor type %s", ggml_type_name(tensor.type))); + } else if (tensor->type != GGML_TYPE_F16) { + throw std::runtime_error(format("cannot dequantize/convert tensor type %s", ggml_type_name(tensor->type))); } if (nthread < 2) { - if (tensor.type == GGML_TYPE_F16) { - ggml_fp16_to_fp32_row((ggml_fp16_t *)tensor.data, f32_output, nelements); - } else if (ggml_is_quantized(tensor.type)) { - qtype.to_float(tensor.data, f32_output, nelements); + if (tensor->type == GGML_TYPE_F16) { + ggml_fp16_to_fp32_row((ggml_fp16_t *)tensor->data, f32_output, nelements); + } else if (ggml_is_quantized(tensor->type)) { + qtype.to_float(tensor->data, f32_output, nelements); } else { - LLAMA_ASSERT(false); // unreachable + GGML_ASSERT(false); // unreachable } return; } - auto block_size = tensor.type == GGML_TYPE_F16 ? 1 : (size_t)ggml_blck_size(tensor.type); - auto block_size_bytes = ggml_type_size(tensor.type); + auto block_size = tensor->type == GGML_TYPE_F16 ? 1 : (size_t)ggml_blck_size(tensor->type); + auto block_size_bytes = ggml_type_size(tensor->type); - LLAMA_ASSERT(nelements % block_size == 0); + GGML_ASSERT(nelements % block_size == 0); auto nblocks = nelements / block_size; auto blocks_per_thread = nblocks / nthread; auto spare_blocks = nblocks - (blocks_per_thread * nthread); // if blocks aren't divisible by thread count @@ -3036,20 +3569,18 @@ static void llama_convert_tensor_internal(const llama_load_tensor & tensor, llam qtype.to_float(inbuf, outbuf, nels); } }; - workers.push_back(std::thread(compute, tensor.type, tensor.data + in_buff_offs, f32_output + out_buff_offs, thr_elems)); + workers.push_back(std::thread(compute, tensor->type, (uint8_t *) tensor->data + in_buff_offs, f32_output + out_buff_offs, thr_elems)); in_buff_offs += thr_block_bytes; out_buff_offs += thr_elems; } for (auto & worker : workers) { worker.join(); } - } static void llama_model_quantize_internal(const std::string & fname_inp, const std::string & fname_out, const llama_model_quantize_params * params) { ggml_type quantized_type; llama_ftype ftype = params->ftype; - int nthread = params->nthread; switch (params->ftype) { case LLAMA_FTYPE_MOSTLY_Q4_0: quantized_type = GGML_TYPE_Q4_0; break; @@ -3075,21 +3606,35 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s default: throw std::runtime_error(format("invalid output file type %d\n", ftype)); } + int nthread = params->nthread; + if (nthread <= 0) { nthread = std::thread::hardware_concurrency(); } std::unique_ptr model_loader(new llama_model_loader(fname_inp, /*use_mmap*/ false)); - llama_file_saver file_saver(fname_out.c_str(), model_loader->file_loader.get(), params->ftype); + + const size_t align = GGUF_DEFAULT_ALIGNMENT; + struct gguf_context * ctx_out = gguf_init_empty(); + + // copy the KV pairs from the input file + gguf_set_kv (ctx_out, model_loader->ctx_gguf); + gguf_set_val_u32(ctx_out, "general.quantization_version", GGML_QNT_VERSION); #ifdef GGML_USE_K_QUANTS int n_attention_wv = 0; int n_feed_forward_w2 = 0; - for (auto& tensor : model_loader->tensors_map.tensors) { - if (tensor.name.find("attention.wv.weight") != std::string::npos) { + + for (int i = 0; i < model_loader->n_tensors; ++i) { + struct ggml_tensor * meta = model_loader->get_tensor_meta(i); + + const std::string name = ggml_get_name(meta); + + // TODO: avoid hardcoded tensor names - use the TN_* constants + if (name.find("attn_v.weight") != std::string::npos) { ++n_attention_wv; } - else if (tensor.name.find("feed_forward.w2.weight") != std::string::npos) { + else if (name.find("ffn_down.weight") != std::string::npos) { ++n_feed_forward_w2; } } @@ -3109,46 +3654,69 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s return i_layer < num_layers/8 || i_layer >= 7*num_layers/8 || (i_layer - num_layers/8)%3 == 2; }; - size_t idx = 0; - for (llama_load_tensor & tensor : model_loader->tensors_map.tensors) { - llama_buffer read_data; - read_data.resize(tensor.size); - tensor.data = read_data.addr; + int idx = 0; + + std::vector read_data; + std::vector work; + + // populate the original tensors so we get an initial meta data + for (int i = 0; i < model_loader->n_tensors; ++i) { + struct ggml_tensor * meta = model_loader->get_tensor_meta(i); + gguf_add_tensor(ctx_out, meta); + } + + std::ofstream fout(fname_out, std::ios::binary); + + const size_t meta_size = gguf_get_meta_size(ctx_out); + + LLAMA_LOG_INFO("%s: meta size = %zu bytes\n", __func__, meta_size); + + // placeholder for the meta data + ::zeros(fout, meta_size); + + for (int i = 0; i < model_loader->n_tensors; ++i) { + struct ggml_tensor * tensor = model_loader->get_tensor_meta(i); + + const std::string name = ggml_get_name(tensor); + + read_data.resize(ggml_nbytes(tensor)); + tensor->data = read_data.data(); model_loader->load_data_for(tensor); - LLAMA_LOG_INFO("[%4zu/%4zu] %36s - %16s, type = %6s, ", - ++idx, model_loader->tensors_map.tensors.size(), - tensor.name.c_str(), llama_format_tensor_shape(tensor.ne).c_str(), - ggml_type_name(tensor.type)); + LLAMA_LOG_INFO("[%4d/%4d] %36s - [%s], type = %6s, ", + ++idx, model_loader->n_tensors, + ggml_get_name(tensor), + llama_format_tensor_shape(tensor).c_str(), + ggml_type_name(tensor->type)); // This used to be a regex, but has an extreme cost to compile times. - bool quantize = tensor.name.rfind("weight") == tensor.name.size() - 6; // ends with 'weight'? + bool quantize = name.rfind("weight") == name.size() - 6; // ends with 'weight'? // quantize only 2D tensors - quantize &= (tensor.ne.size() == 2); - quantize &= params->quantize_output_tensor || tensor.name != "output.weight"; - quantize &= quantized_type != tensor.type; + quantize &= (tensor->n_dims == 2); + quantize &= params->quantize_output_tensor || name != "output.weight"; + quantize &= quantized_type != tensor->type; enum ggml_type new_type; void * new_data; size_t new_size; - llama_buffer work; if (!quantize) { - new_type = tensor.type; - new_data = tensor.data; - new_size = tensor.size; - LLAMA_LOG_INFO("size = %8.3f MB\n", tensor.size/1024.0/1024.0); + new_type = tensor->type; + new_data = tensor->data; + new_size = ggml_nbytes(tensor); + LLAMA_LOG_INFO("size = %8.3f MB\n", ggml_nbytes(tensor)/1024.0/1024.0); } else { new_type = quantized_type; #ifdef GGML_USE_K_QUANTS - if (tensor.name == "output.weight") { - int nx = tensor.ne.at(0); - int ny = tensor.ne.at(1); + // TODO: avoid hardcoded tensor names - use the TN_* constants + if (name == TN_OUTPUT) { + int nx = tensor->ne[0]; + int ny = tensor->ne[1]; if (nx % QK_K == 0 && ny % QK_K == 0) { new_type = GGML_TYPE_Q6_K; } - } else if (tensor.name.find("attention.wv.weight") != std::string::npos) { + } else if (name.find("attn_v.weight") != std::string::npos) { if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_M || ftype == LLAMA_FTYPE_MOSTLY_Q2_K) new_type = GGML_TYPE_Q4_K; else if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_L) new_type = GGML_TYPE_Q5_K; else if ((ftype == LLAMA_FTYPE_MOSTLY_Q4_K_M || ftype == LLAMA_FTYPE_MOSTLY_Q5_K_M) && @@ -3156,32 +3724,32 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s else if (QK_K == 64 && (ftype == LLAMA_FTYPE_MOSTLY_Q4_K_S || ftype == LLAMA_FTYPE_MOSTLY_Q3_K_S) && (i_attention_wv < n_attention_wv/8 || i_attention_wv >= 7*n_attention_wv/8)) new_type = GGML_TYPE_Q6_K; ++i_attention_wv; - } else if (tensor.name.find("feed_forward.w2.weight") != std::string::npos) { + } else if (name.find("ffn_down.weight") != std::string::npos) { if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_M || ftype == LLAMA_FTYPE_MOSTLY_Q2_K) new_type = GGML_TYPE_Q4_K; else if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_L) new_type = GGML_TYPE_Q5_K; else if ((ftype == LLAMA_FTYPE_MOSTLY_Q4_K_M || ftype == LLAMA_FTYPE_MOSTLY_Q5_K_M) && use_more_bits(i_feed_forward_w2, n_feed_forward_w2)) new_type = GGML_TYPE_Q6_K; //else if (ftype == LLAMA_FTYPE_MOSTLY_Q4_K_S && i_feed_forward_w2 < n_feed_forward_w2/8) new_type = GGML_TYPE_Q6_K; ++i_feed_forward_w2; - } else if (tensor.name.find("attention.wo.weight") != std::string::npos) { + } else if (name.find("attn_output.weight") != std::string::npos) { if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_M || ftype == LLAMA_FTYPE_MOSTLY_Q2_K) new_type = GGML_TYPE_Q4_K; else if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_L) new_type = GGML_TYPE_Q5_K; } bool convert_incompatible_tensor = false; if (new_type == GGML_TYPE_Q2_K || new_type == GGML_TYPE_Q3_K || new_type == GGML_TYPE_Q4_K || new_type == GGML_TYPE_Q5_K || new_type == GGML_TYPE_Q6_K) { - int nx = tensor.ne.at(0); - int ny = tensor.ne.at(1); + int nx = tensor->ne[0]; + int ny = tensor->ne[1]; if (nx % QK_K != 0 || ny % QK_K != 0) { LLAMA_LOG_INFO("\n\nTensor sizes %d x %d are not divisible by %d, required for k-quants.\n",nx,ny,QK_K); convert_incompatible_tensor = true; } } if (convert_incompatible_tensor) { - if (tensor.name == "output.weight") { + if (name == TN_OUTPUT) { new_type = GGML_TYPE_F16; //fall back to F16 instead of just failing. LLAMA_LOG_WARN("F16 will be used for this tensor instead.\n"); - } else if (tensor.name == "tok_embeddings.weight") { + } else if (name == TN_TOKEN_EMBD) { new_type = GGML_TYPE_Q4_0; //fall back to Q4_0 instead of just failing. LLAMA_LOG_WARN("Q4_0 will be used for this tensor instead.\n"); } else { @@ -3190,27 +3758,28 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s } #endif - float * f32_data; - size_t nelements = tensor.ne.at(0) * tensor.ne.at(1); - llama_buffer f32_conv_buf; + const size_t nelements = ggml_nelements(tensor); - if (tensor.type == GGML_TYPE_F32) { - f32_data = (float *) tensor.data; - } else if (ggml_is_quantized(tensor.type) && !params->allow_requantize) { - throw std::runtime_error(format("requantizing from type %s is disabled", ggml_type_name(tensor.type))); + float * f32_data; + std::vector f32_conv_buf; + + if (tensor->type == GGML_TYPE_F32) { + f32_data = (float *) tensor->data; + } else if (ggml_is_quantized(tensor->type) && !params->allow_requantize) { + throw std::runtime_error(format("requantizing from type %s is disabled", ggml_type_name(tensor->type))); } else { llama_convert_tensor_internal(tensor, f32_conv_buf, nelements, nthread); - f32_data = (float *) f32_conv_buf.addr; + f32_data = (float *) f32_conv_buf.data(); } LLAMA_LOG_INFO("quantizing to %s .. ", ggml_type_name(new_type)); fflush(stdout); work.resize(nelements * 4); // upper bound on size - new_data = work.addr; + new_data = work.data(); std::vector hist_cur(1 << 4, 0); - int chunk_size = 32 * 512; + static const int chunk_size = 32 * 512; const int nchunk = (nelements + chunk_size - 1)/chunk_size; const int nthread_use = nthread > 1 ? std::max(1, std::min(nthread, nchunk)) : 1; if (nthread_use < 2) { @@ -3218,7 +3787,7 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s } else { size_t counter = 0; new_size = 0; - auto compute = [&mutex, &counter, &hist_cur, &new_size, new_type, f32_data, new_data, nelements, chunk_size] () { + auto compute = [&mutex, &counter, &hist_cur, &new_size, new_type, f32_data, new_data, nelements]() { std::vector local_hist; size_t local_size = 0; while (true) { @@ -3253,7 +3822,7 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s } } - LLAMA_LOG_INFO("size = %8.2f MB -> %8.2f MB | hist: ", tensor.size/1024.0/1024.0, new_size/1024.0/1024.0); + LLAMA_LOG_INFO("size = %8.2f MB -> %8.2f MB | hist: ", ggml_nbytes(tensor)/1024.0/1024.0, new_size/1024.0/1024.0); int64_t tot_count = 0; for (size_t i = 0; i < hist_cur.size(); i++) { hist_all[i] += hist_cur[i]; @@ -3267,14 +3836,34 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s } LLAMA_LOG_INFO("\n"); } - total_size_org += tensor.size; + total_size_org += ggml_nbytes(tensor); total_size_new += new_size; - file_saver.write_tensor(tensor, new_type, new_data, new_size); + + // update the gguf meta data as we go + gguf_set_tensor_type(ctx_out, name.c_str(), new_type); + gguf_set_tensor_data(ctx_out, name.c_str(), new_data, new_size); + + // write tensor data + padding + fout.write((const char *) new_data, new_size); + zeros(fout, GGML_PAD(new_size, align) - new_size); } + // go back to beginning of file and write the updated meta data + { + fout.seekp(0); + std::vector data(gguf_get_meta_size(ctx_out)); + gguf_get_meta_data(ctx_out, data.data()); + fout.write((const char *) data.data(), data.size()); + } + + fout.close(); + + gguf_free(ctx_out); + LLAMA_LOG_INFO("%s: model size = %8.2f MB\n", __func__, total_size_org/1024.0/1024.0); LLAMA_LOG_INFO("%s: quant size = %8.2f MB\n", __func__, total_size_new/1024.0/1024.0); + // print histogram for all tensors { int64_t sum_all = 0; for (size_t i = 0; i < hist_all.size(); i++) { @@ -3291,238 +3880,7 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s } } - - -// -// interface implementation -// - -struct llama_model * llama_load_model_from_file( - const char * path_model, - struct llama_context_params params) { - ggml_time_init(); - - llama_model * model = new llama_model; - - ggml_type memory_type = params.f16_kv ? GGML_TYPE_F16 : GGML_TYPE_F32; - - if (!llama_model_load(path_model, *model, model->vocab, params.n_ctx, params.n_batch, params.n_gqa, params.rms_norm_eps, params.n_gpu_layers, - params.main_gpu, params.tensor_split, params.mul_mat_q, params.rope_freq_base, params.rope_freq_scale,params.low_vram, - memory_type, params.use_mmap, params.use_mlock, params.vocab_only, params.progress_callback, - params.progress_callback_user_data)) { - LLAMA_LOG_ERROR("%s: failed to load model\n", __func__); - delete model; - return nullptr; - } - - return model; -} - -void llama_free_model(struct llama_model * model) { - delete model; -} - -struct llama_context * llama_new_context_with_model( - struct llama_model * model, - struct llama_context_params params) { - - if (!model) { - return nullptr; - } - - llama_context * ctx = new llama_context(*model); - - if (params.seed == LLAMA_DEFAULT_SEED) { - params.seed = time(NULL); - } - - unsigned cur_percentage = 0; - if (params.progress_callback == NULL) { - params.progress_callback_user_data = &cur_percentage; - params.progress_callback = [](float progress, void * ctx) { - unsigned * cur_percentage_p = (unsigned *) ctx; - unsigned percentage = (unsigned) (100 * progress); - while (percentage > *cur_percentage_p) { - *cur_percentage_p = percentage; - LLAMA_LOG_INFO("."); - if (percentage >= 100) { - LLAMA_LOG_INFO("\n"); - } - } - }; - } - - ctx->rng = std::mt19937(params.seed); - ctx->logits_all = params.logits_all; - - ggml_type memory_type = params.f16_kv ? GGML_TYPE_F16 : GGML_TYPE_F32; - - // reserve memory for context buffers - if (!params.vocab_only) { - if (!kv_cache_init(ctx->model.hparams, ctx->kv_self, memory_type, ctx->model.hparams.n_ctx, params.n_gpu_layers)) { - LLAMA_LOG_ERROR("%s: kv_cache_init() failed for self-attention cache\n", __func__); - llama_free(ctx); - return nullptr; - } - - { - const size_t memory_size = ggml_nbytes(ctx->kv_self.k) + ggml_nbytes(ctx->kv_self.v); - LLAMA_LOG_INFO("%s: kv self size = %7.2f MB\n", __func__, memory_size / 1024.0 / 1024.0); - } - - const auto & hparams = ctx->model.hparams; - - // resized during inference - if (params.logits_all) { - ctx->logits.reserve(hparams.n_ctx*hparams.n_vocab); - } else { - ctx->logits.reserve(hparams.n_vocab); - } - - if (params.embedding){ - ctx->embedding.resize(hparams.n_embd); - } - -#ifdef LLAMA_USE_ALLOCATOR - { - static const size_t tensor_alignment = 32; - // the compute buffer is used to store the tensor and graph structs, while the allocator buffer is used for the tensor data - ctx->buf_compute.resize(ggml_tensor_overhead()*GGML_MAX_NODES + ggml_graph_overhead()); - - // create measure allocator - ctx->alloc = ggml_allocr_new_measure(tensor_alignment); - - // build worst-case graph - int n_tokens = std::min((int)hparams.n_ctx, params.n_batch); - int n_past = hparams.n_ctx - n_tokens; - llama_token token = llama_token_bos(); // not actually used by llama_build_graph, but required to choose between token and embedding inputs graph - ggml_cgraph * gf = llama_build_graph(*ctx, &token, NULL, n_tokens, n_past); -#ifdef GGML_USE_METAL - if (params.n_gpu_layers > 0) { - ctx->ctx_metal = ggml_metal_init(1); - if (!ctx->ctx_metal) { - LLAMA_LOG_ERROR("%s: ggml_metal_init() failed\n", __func__); - llama_free(ctx); - return NULL; - } - ggml_metal_graph_find_concurrency(ctx->ctx_metal, gf, false); - ggml_allocr_set_parse_seq(ctx->alloc, ggml_metal_get_concur_list(ctx->ctx_metal), ggml_metal_if_optimized(ctx->ctx_metal)); - } -#endif - // measure memory requirements for the graph - size_t alloc_size = ggml_allocr_alloc_graph(ctx->alloc, gf) + tensor_alignment; - - LLAMA_LOG_INFO("%s: compute buffer total size = %7.2f MB\n", __func__, (ctx->buf_compute.size + alloc_size) / 1024.0 / 1024.0); - - // debug - for comparison with scratch buffer - //size_t prev_req = - // MEM_REQ_SCRATCH0(hparams.n_ctx).at(ctx->model.type) + - // MEM_REQ_SCRATCH1().at(ctx->model.type) + - // MEM_REQ_EVAL().at(ctx->model.type); - //LLAMA_LOG_INFO("%s: (debug) equivalent with scratch buffer = %7.2f MB\n", __func__, prev_req / 1024.0 / 1024.0); - - // recreate allocator with exact memory requirements - ggml_allocr_free(ctx->alloc); - - ctx->buf_alloc.resize(alloc_size); - ctx->alloc = ggml_allocr_new(ctx->buf_alloc.addr, ctx->buf_alloc.size, tensor_alignment); -#ifdef GGML_USE_METAL - if (ctx->ctx_metal) { - ggml_allocr_set_parse_seq(ctx->alloc, ggml_metal_get_concur_list(ctx->ctx_metal), ggml_metal_if_optimized(ctx->ctx_metal)); - } -#endif - } -#else - ctx->buf_compute.resize(MEM_REQ_EVAL().at(ctx->model.type) + ggml_graph_overhead()); -#endif - -#ifdef LLAMA_USE_SCRATCH - ctx->buf_scratch[0].resize(MEM_REQ_SCRATCH0(hparams.n_ctx).at(ctx->model.type)); - ctx->buf_scratch[1].resize(MEM_REQ_SCRATCH1().at(ctx->model.type)); -#endif - } - -#ifdef GGML_USE_METAL - if (params.n_gpu_layers > 0) { - // this allocates all Metal resources and memory buffers - - void * data_ptr = NULL; - size_t data_size = 0; - - if (params.use_mmap) { - data_ptr = ctx->model.mapping->addr; - data_size = ctx->model.mapping->size; - } else { - data_ptr = ggml_get_mem_buffer(ctx->model.ctx); - data_size = ggml_get_mem_size (ctx->model.ctx); - } - - const size_t max_size = ggml_get_max_tensor_size(ctx->model.ctx); - - LLAMA_LOG_INFO("%s: max tensor size = %8.2f MB\n", __func__, max_size/1024.0/1024.0); - -#define LLAMA_METAL_CHECK_BUF(result) \ - if (!(result)) { \ - LLAMA_LOG_ERROR("%s: failed to add buffer\n", __func__); \ - llama_free(ctx); \ - return NULL; \ - } - - LLAMA_METAL_CHECK_BUF(ggml_metal_add_buffer(ctx->ctx_metal, "data", data_ptr, data_size, max_size)); - - LLAMA_METAL_CHECK_BUF(ggml_metal_add_buffer(ctx->ctx_metal, "eval", ctx->buf_compute.addr, ctx->buf_compute.size, 0)); - LLAMA_METAL_CHECK_BUF(ggml_metal_add_buffer(ctx->ctx_metal, "kv", ctx->kv_self.buf.addr, ctx->kv_self.buf.size, 0)); - - LLAMA_METAL_CHECK_BUF(ggml_metal_add_buffer(ctx->ctx_metal, "alloc", ctx->buf_alloc.addr, ctx->buf_alloc.size, 0)); -#undef LLAMA_METAL_CHECK_BUF - } -#endif - -#ifdef GGML_USE_MPI - ctx->ctx_mpi = ggml_mpi_init(); - - if (ggml_mpi_rank(ctx->ctx_mpi) > 0) { - // Enter a blocking eval loop with dummy input, letting rank=0 drive the process - const std::vector tmp(ctx->model.hparams.n_ctx, llama_token_bos()); - while (!llama_eval(ctx, tmp.data(), tmp.size(), 0, 0)) {}; - llama_backend_free(); - exit(1); - } -#endif - - return ctx; -} - -struct llama_context * llama_init_from_file( - const char * path_model, - struct llama_context_params params) { - - struct llama_model * model = llama_load_model_from_file(path_model, params); - if (!model) { - return nullptr; - } - struct llama_context * ctx = llama_new_context_with_model(model, params); - ctx->model_owner = true; - return ctx; -} - -void llama_free(struct llama_context * ctx) { - delete ctx; -} - -int llama_model_quantize( - const char * fname_inp, - const char * fname_out, - const llama_model_quantize_params *params) { - try { - llama_model_quantize_internal(fname_inp, fname_out, params); - return 0; - } catch (const std::exception & err) { - LLAMA_LOG_ERROR("%s: failed to quantize: %s\n", __func__, err.what()); - return 1; - } -} - +// TODO: after the GGUF PR, this likely won't work and needs to be updated int llama_apply_lora_from_file_internal(const struct llama_model & model, const char * path_lora, const char * path_base_model, int n_threads) { LLAMA_LOG_INFO("%s: applying lora adapter from '%s' - please wait ...\n", __func__, path_lora); @@ -3538,10 +3896,6 @@ int llama_apply_lora_from_file_internal(const struct llama_model & model, const { uint32_t magic; fin.read((char *) &magic, sizeof(magic)); - if (magic != LLAMA_FILE_MAGIC_GGLA) { - LLAMA_LOG_ERROR("%s: bad file magic\n", __func__); - return 1; - } uint32_t format_version; fin.read((char *) &format_version, sizeof(format_version)); @@ -3559,7 +3913,6 @@ int llama_apply_lora_from_file_internal(const struct llama_model & model, const LLAMA_LOG_INFO("%s: r = %d, alpha = %d, scaling = %.2f\n", __func__, lora_r, lora_alpha, scaling); - // create a temporary ggml context to store the lora tensors // todo: calculate size from biggest possible tensor std::vector lora_buf(1024ull * 1024ull * 1024ull); @@ -3573,36 +3926,33 @@ int llama_apply_lora_from_file_internal(const struct llama_model & model, const // create a name -> tensor map of the model to accelerate lookups std::unordered_map model_tensors; - for (const auto & kv: model.tensors_by_name) { + for (const auto & kv : model.tensors_by_name) { model_tensors.insert(kv); } - // load base model std::unique_ptr model_loader; ggml_context * base_ctx = NULL; - llama_buffer base_buf; + std::vector base_buf; if (path_base_model) { LLAMA_LOG_INFO("%s: loading base model from '%s'\n", __func__, path_base_model); model_loader.reset(new llama_model_loader(path_base_model, /*use_mmap*/ true)); size_t ctx_size; size_t mmapped_size; - model_loader->calc_sizes(&ctx_size, &mmapped_size); + model_loader->calc_sizes(ctx_size, mmapped_size); base_buf.resize(ctx_size); ggml_init_params base_params; - base_params.mem_size = base_buf.size; - base_params.mem_buffer = base_buf.addr; + base_params.mem_size = base_buf.size(); + base_params.mem_buffer = base_buf.data(); base_params.no_alloc = model_loader->use_mmap; base_ctx = ggml_init(base_params); - model_loader->ggml_ctx = base_ctx; - // maybe this should in llama_model_loader if (model_loader->use_mmap) { - model_loader->mapping.reset(new llama_mmap(&model_loader->file_loader->file, /* prefetch */ 0, ggml_is_numa())); + model_loader->mapping.reset(new llama_mmap(&model_loader->file, /* prefetch */ 0, ggml_is_numa())); } } @@ -3707,19 +4057,18 @@ int llama_apply_lora_from_file_internal(const struct llama_model & model, const ggml_tensor * base_t; if (model_loader) { + struct gguf_context * ctx_gguf = model_loader->ctx_gguf; + // load from base model - if (model_loader->tensors_map.name_to_idx.find(base_name) == model_loader->tensors_map.name_to_idx.end()) { + if (gguf_find_tensor(ctx_gguf, base_name.c_str()) < 0) { LLAMA_LOG_ERROR("%s: error: tensor '%s' not found in base model\n", __func__, base_name.c_str()); return 1; } - size_t idx = model_loader->tensors_map.name_to_idx[base_name]; - llama_load_tensor & lt = model_loader->tensors_map.tensors[idx]; - base_t = model_loader->get_tensor(base_name, { (uint32_t)dest_t->ne[0], (uint32_t)dest_t->ne[1] }, GGML_BACKEND_CPU); - lt.data = (uint8_t *) lt.ggml_tensor->data; - model_loader->load_data_for(lt); - lt.ggml_tensor->data = lt.data; - } - else { + + // TODO: not tested!! maybe not working! + base_t = model_loader->create_tensor(base_ctx, base_name, { (uint32_t)dest_t->ne[0], (uint32_t)dest_t->ne[1] }, GGML_BACKEND_CPU); + model_loader->load_data_for(base_t); + } else { base_t = dest_t; } @@ -3803,6 +4152,341 @@ int llama_apply_lora_from_file_internal(const struct llama_model & model, const return 0; } +// +// interface implementation +// + +struct llama_context_params llama_context_default_params() { + struct llama_context_params result = { + /*.seed =*/ LLAMA_DEFAULT_SEED, + /*.n_ctx =*/ 512, + /*.n_batch =*/ 512, + /*.gpu_layers =*/ 0, + /*.main_gpu =*/ 0, + /*.tensor_split =*/ nullptr, + /*.rope_freq_base =*/ 10000.0f, + /*.rope_freq_scale =*/ 1.0f, + /*.progress_callback =*/ nullptr, + /*.progress_callback_user_data =*/ nullptr, + /*.low_vram =*/ false, + /*.mul_mat_q =*/ false, + /*.f16_kv =*/ true, + /*.logits_all =*/ false, + /*.vocab_only =*/ false, + /*.use_mmap =*/ true, + /*.use_mlock =*/ false, + /*.embedding =*/ false, + }; + + return result; +} + +struct llama_model_quantize_params llama_model_quantize_default_params() { + struct llama_model_quantize_params result = { + /*.nthread =*/ 0, + /*.ftype =*/ LLAMA_FTYPE_MOSTLY_Q5_1, + /*.allow_requantize =*/ false, + /*.quantize_output_tensor =*/ true, + }; + + return result; +} + +int llama_max_devices(void) { + return LLAMA_MAX_DEVICES; +} + +bool llama_mmap_supported(void) { + return llama_mmap::SUPPORTED; +} + +bool llama_mlock_supported(void) { + return llama_mlock::SUPPORTED; +} + +void llama_backend_init(bool numa) { + ggml_time_init(); + + // needed to initialize f16 tables + { + struct ggml_init_params params = { 0, NULL, false }; + struct ggml_context * ctx = ggml_init(params); + ggml_free(ctx); + } + + if (numa) { + ggml_numa_init(); + } + +#ifdef GGML_USE_MPI + ggml_mpi_backend_init(); +#endif +} + +void llama_backend_free(void) { +#ifdef GGML_USE_MPI + ggml_mpi_backend_free(); +#endif +} + +int64_t llama_time_us(void) { + return ggml_time_us(); +} + +struct llama_model * llama_load_model_from_file( + const char * path_model, + struct llama_context_params params) { + ggml_time_init(); + + llama_model * model = new llama_model; + + ggml_type memory_type = params.f16_kv ? GGML_TYPE_F16 : GGML_TYPE_F32; + + if (!llama_model_load(path_model, *model, model->vocab, params.n_ctx, params.n_batch, params.n_gpu_layers, + params.main_gpu, params.tensor_split, params.mul_mat_q, params.rope_freq_base, params.rope_freq_scale, + params.low_vram, memory_type, params.use_mmap, params.use_mlock, params.vocab_only, + params.progress_callback, params.progress_callback_user_data)) { + LLAMA_LOG_ERROR("%s: failed to load model\n", __func__); + delete model; + return nullptr; + } + + return model; +} + +void llama_free_model(struct llama_model * model) { + delete model; +} + +struct llama_context * llama_new_context_with_model( + struct llama_model * model, + struct llama_context_params params) { + + if (!model) { + return nullptr; + } + + llama_context * ctx = new llama_context(*model); + + if (params.seed == LLAMA_DEFAULT_SEED) { + params.seed = time(NULL); + } + + unsigned cur_percentage = 0; + if (params.progress_callback == NULL) { + params.progress_callback_user_data = &cur_percentage; + params.progress_callback = [](float progress, void * ctx) { + unsigned * cur_percentage_p = (unsigned *) ctx; + unsigned percentage = (unsigned) (100 * progress); + while (percentage > *cur_percentage_p) { + *cur_percentage_p = percentage; + LLAMA_LOG_INFO("."); + if (percentage >= 100) { + LLAMA_LOG_INFO("\n"); + } + } + }; + } + + ctx->rng = std::mt19937(params.seed); + ctx->logits_all = params.logits_all; + + ggml_type memory_type = params.f16_kv ? GGML_TYPE_F16 : GGML_TYPE_F32; + + // reserve memory for context buffers + if (!params.vocab_only) { + if (!llama_kv_cache_init(ctx->model.hparams, ctx->kv_self, memory_type, ctx->model.hparams.n_ctx, params.n_gpu_layers)) { + LLAMA_LOG_ERROR("%s: llama_kv_cache_init() failed for self-attention cache\n", __func__); + llama_free(ctx); + return nullptr; + } + + { + const size_t memory_size = ggml_nbytes(ctx->kv_self.k) + ggml_nbytes(ctx->kv_self.v); + LLAMA_LOG_INFO("%s: kv self size = %7.2f MB\n", __func__, memory_size / 1024.0 / 1024.0); + } + + const auto & hparams = ctx->model.hparams; + + // resized during inference + if (params.logits_all) { + ctx->logits.reserve(hparams.n_ctx*hparams.n_vocab); + } else { + ctx->logits.reserve(hparams.n_vocab); + } + + if (params.embedding){ + ctx->embedding.resize(hparams.n_embd); + } + +#ifdef LLAMA_USE_ALLOCATOR + { + static const size_t tensor_alignment = 32; + // the compute buffer is used to store the tensor and graph structs, while the allocator buffer is used for the tensor data + ctx->buf_compute.resize(ggml_tensor_overhead()*GGML_MAX_NODES + ggml_graph_overhead()); + + // create measure allocator + ctx->alloc = ggml_allocr_new_measure(tensor_alignment); + + // build worst-case graph + int n_tokens = std::min((int)hparams.n_ctx, params.n_batch); + int n_past = hparams.n_ctx - n_tokens; + llama_token token = llama_token_bos(ctx); // not actually used by llama_build_graph, but required to choose between token and embedding inputs graph + ggml_cgraph * gf = llama_build_graph(*ctx, &token, NULL, n_tokens, n_past); +#ifdef GGML_USE_METAL + if (params.n_gpu_layers > 0) { + ctx->ctx_metal = ggml_metal_init(1); + if (!ctx->ctx_metal) { + LLAMA_LOG_ERROR("%s: ggml_metal_init() failed\n", __func__); + llama_free(ctx); + return NULL; + } + ggml_metal_graph_find_concurrency(ctx->ctx_metal, gf, false); + ggml_allocr_set_parse_seq(ctx->alloc, ggml_metal_get_concur_list(ctx->ctx_metal), ggml_metal_if_optimized(ctx->ctx_metal)); + } +#endif + // measure memory requirements for the graph + size_t alloc_size = ggml_allocr_alloc_graph(ctx->alloc, gf) + tensor_alignment; + + LLAMA_LOG_INFO("%s: compute buffer total size = %7.2f MB\n", __func__, (ctx->buf_compute.size + alloc_size) / 1024.0 / 1024.0); + + // debug - for comparison with scratch buffer + //size_t prev_req = + // MEM_REQ_SCRATCH0(hparams.n_ctx).at(ctx->model.type) + + // MEM_REQ_SCRATCH1().at(ctx->model.type) + + // MEM_REQ_EVAL().at(ctx->model.type); + //LLAMA_LOG_INFO("%s: (debug) equivalent with scratch buffer = %7.2f MB\n", __func__, prev_req / 1024.0 / 1024.0); + + // recreate allocator with exact memory requirements + ggml_allocr_free(ctx->alloc); + + ctx->buf_alloc.resize(alloc_size); + ctx->alloc = ggml_allocr_new(ctx->buf_alloc.data, ctx->buf_alloc.size, tensor_alignment); +#ifdef GGML_USE_METAL + if (ctx->ctx_metal) { + ggml_allocr_set_parse_seq(ctx->alloc, ggml_metal_get_concur_list(ctx->ctx_metal), ggml_metal_if_optimized(ctx->ctx_metal)); + } +#endif + } +#else + ctx->buf_compute.resize(MEM_REQ_EVAL().at(ctx->model.type) + ggml_graph_overhead()); +#endif + +#ifdef LLAMA_USE_SCRATCH + ctx->buf_scratch[0].resize(MEM_REQ_SCRATCH0(hparams.n_ctx).at(ctx->model.type)); + ctx->buf_scratch[1].resize(MEM_REQ_SCRATCH1().at(ctx->model.type)); +#endif + } + +#ifdef GGML_USE_METAL + if (params.n_gpu_layers > 0) { + // this allocates all Metal resources and memory buffers + + void * data_ptr = NULL; + size_t data_size = 0; + + if (params.use_mmap) { + data_ptr = ctx->model.mapping->addr; + data_size = ctx->model.mapping->size; + } else { + data_ptr = ggml_get_mem_buffer(ctx->model.ctx); + data_size = ggml_get_mem_size (ctx->model.ctx); + } + + const size_t max_size = ggml_get_max_tensor_size(ctx->model.ctx); + + LLAMA_LOG_INFO("%s: max tensor size = %8.2f MB\n", __func__, max_size/1024.0/1024.0); + +#define LLAMA_METAL_CHECK_BUF(result) \ + if (!(result)) { \ + LLAMA_LOG_ERROR("%s: failed to add buffer\n", __func__); \ + llama_free(ctx); \ + return NULL; \ + } + + LLAMA_METAL_CHECK_BUF(ggml_metal_add_buffer(ctx->ctx_metal, "data", data_ptr, data_size, max_size)); + + LLAMA_METAL_CHECK_BUF(ggml_metal_add_buffer(ctx->ctx_metal, "eval", ctx->buf_compute.data, ctx->buf_compute.size, 0)); + LLAMA_METAL_CHECK_BUF(ggml_metal_add_buffer(ctx->ctx_metal, "kv", ctx->kv_self.buf.data, ctx->kv_self.buf.size, 0)); + + LLAMA_METAL_CHECK_BUF(ggml_metal_add_buffer(ctx->ctx_metal, "alloc", ctx->buf_alloc.data, ctx->buf_alloc.size, 0)); +#undef LLAMA_METAL_CHECK_BUF + } +#endif + +#ifdef GGML_USE_MPI + ctx->ctx_mpi = ggml_mpi_init(); + + if (ggml_mpi_rank(ctx->ctx_mpi) > 0) { + // Enter a blocking eval loop with dummy input, letting rank=0 drive the process + const std::vector tmp(ctx->model.hparams.n_ctx, llama_token_bos(ctx)); + while (!llama_eval(ctx, tmp.data(), tmp.size(), 0, 0)) {}; + llama_backend_free(); + exit(1); + } +#endif + + return ctx; +} + +struct llama_context * llama_init_from_file( + const char * path_model, + struct llama_context_params params) { + + struct llama_model * model = llama_load_model_from_file(path_model, params); + if (!model) { + return nullptr; + } + struct llama_context * ctx = llama_new_context_with_model(model, params); + ctx->model_owner = true; + return ctx; +} + +void llama_free(struct llama_context * ctx) { + delete ctx; +} + +int llama_n_vocab(const struct llama_context * ctx) { + return ctx->model.vocab.id_to_token.size(); +} + +int llama_n_ctx(const struct llama_context * ctx) { + return ctx->model.hparams.n_ctx; +} + +int llama_n_embd(const struct llama_context * ctx) { + return ctx->model.hparams.n_embd; +} + +int llama_model_n_vocab(const struct llama_model * model) { + return model->vocab.id_to_token.size(); +} + +int llama_model_n_ctx(const struct llama_model * model) { + return model->hparams.n_ctx; +} + +int llama_model_n_embd(const struct llama_model * model) { + return model->hparams.n_embd; +} + +int llama_model_type(const struct llama_model * model, char * buf, size_t buf_size) { + return snprintf(buf, buf_size, "LLaMA %s %s", llama_model_type_name(model->type), llama_model_ftype_name(model->ftype)); +} + +int llama_model_quantize( + const char * fname_inp, + const char * fname_out, + const llama_model_quantize_params * params) { + try { + llama_model_quantize_internal(fname_inp, fname_out, params); + return 0; + } catch (const std::exception & err) { + LLAMA_LOG_ERROR("%s: failed to quantize: %s\n", __func__, err.what()); + return 1; + } +} + int llama_apply_lora_from_file(struct llama_context * ctx, const char * path_lora, const char * path_base_model, int n_threads) { try { return llama_apply_lora_from_file_internal(ctx->model, path_lora, path_base_model, n_threads); @@ -3865,6 +4549,46 @@ size_t llama_get_state_size(const struct llama_context * ctx) { return s_total; } +// llama_context_data +struct llama_data_context { + virtual void write(const void * src, size_t size) = 0; + virtual size_t get_size_written() = 0; + virtual ~llama_data_context() = default; +}; + +struct llama_data_buffer_context : llama_data_context { + uint8_t * ptr; + size_t size_written = 0; + + llama_data_buffer_context(uint8_t * p) : ptr(p) {} + + void write(const void * src, size_t size) override { + memcpy(ptr, src, size); + ptr += size; + size_written += size; + } + + size_t get_size_written() override { + return size_written; + } +}; + +struct llama_data_file_context : llama_data_context { + llama_file * file; + size_t size_written = 0; + + llama_data_file_context(llama_file * f) : file(f) {} + + void write(const void * src, size_t size) override { + file->write_raw(src, size); + size_written += size; + } + + size_t get_size_written() override { + return size_written; + } +}; + /** copy state data into either a buffer or file depending on the passed in context * * file context: @@ -3998,7 +4722,7 @@ size_t llama_set_state_data(struct llama_context * ctx, uint8_t * src) { rng_ss.str(std::string(&rng_buf[0], rng_size)); rng_ss >> ctx->rng; - LLAMA_ASSERT(rng_ss.fail() == false); + GGML_ASSERT(rng_ss.fail() == false); } // set logits @@ -4009,7 +4733,7 @@ size_t llama_set_state_data(struct llama_context * ctx, uint8_t * src) { memcpy(&logits_cap, inp, sizeof(logits_cap)); inp += sizeof(logits_cap); memcpy(&logits_size, inp, sizeof(logits_size)); inp += sizeof(logits_size); - LLAMA_ASSERT(ctx->logits.capacity() == logits_cap); + GGML_ASSERT(ctx->logits.capacity() == logits_cap); if (logits_size) { ctx->logits.resize(logits_size); @@ -4025,7 +4749,7 @@ size_t llama_set_state_data(struct llama_context * ctx, uint8_t * src) { memcpy(&embedding_size, inp, sizeof(embedding_size)); inp += sizeof(embedding_size); - LLAMA_ASSERT(ctx->embedding.capacity() == embedding_size); + GGML_ASSERT(ctx->embedding.capacity() == embedding_size); if (embedding_size) { memcpy(ctx->embedding.data(), inp, embedding_size * sizeof(float)); @@ -4048,7 +4772,7 @@ size_t llama_set_state_data(struct llama_context * ctx, uint8_t * src) { memcpy(&kv_ntok, inp, sizeof(kv_ntok)); inp += sizeof(kv_ntok); if (kv_size) { - LLAMA_ASSERT(kv_self.buf.size == kv_size); + GGML_ASSERT(kv_self.buf.size == kv_size); const size_t elt_size = ggml_element_size(kv_self.k); @@ -4084,7 +4808,7 @@ size_t llama_set_state_data(struct llama_context * ctx, uint8_t * src) { const size_t nread = inp - src; const size_t max_size = llama_get_state_size(ctx); - LLAMA_ASSERT(nread <= max_size); + GGML_ASSERT(nread <= max_size); return nread; } @@ -4192,7 +4916,6 @@ int llama_eval( return 0; } - int llama_eval_embd( struct llama_context * ctx, const float * embd, @@ -4218,7 +4941,7 @@ int llama_eval_export(struct llama_context * ctx, const char * fname) { const int n_batch = 1; const int n_ctx = 512 - n_batch; - const std::vector tmp(n_batch, llama_token_bos()); + const std::vector tmp(n_batch, llama_token_bos(ctx)); if (!llama_eval_internal(*ctx, tmp.data(), nullptr, tmp.size(), n_ctx, 1, fname)) { LLAMA_LOG_ERROR("%s: failed to eval\n", __func__); @@ -4228,13 +4951,54 @@ int llama_eval_export(struct llama_context * ctx, const char * fname) { return 0; } -int llama_tokenize_with_model( - const struct llama_model * model, +float * llama_get_logits(struct llama_context * ctx) { + return ctx->logits.data(); +} + +float * llama_get_embeddings(struct llama_context * ctx) { + return ctx->embedding.data(); +} + +const char * llama_token_get_text(const struct llama_context * ctx, llama_token token) { + return ctx->model.vocab.id_to_token[token].text.c_str(); +} + +float llama_token_get_score(const struct llama_context * ctx, llama_token token) { + return ctx->model.vocab.id_to_token[token].score; +} + +llama_token_type llama_token_get_type(const struct llama_context * ctx, llama_token token) { + return ctx->model.vocab.id_to_token[token].type; +} + +llama_token llama_token_bos(const struct llama_context * ctx) { + return ctx->model.vocab.special_bos_id; +} + +llama_token llama_token_eos(const struct llama_context * ctx) { + return ctx->model.vocab.special_eos_id; +} + +llama_token llama_token_nl(const struct llama_context * ctx) { + return ctx->model.vocab.linefeed_id; +} + +int llama_tokenize( + struct llama_context * ctx, const char * text, llama_token * tokens, int n_max_tokens, bool add_bos) { - auto res = llama_tokenize(model->vocab, text, add_bos); + return llama_tokenize_with_model(&ctx->model, text, tokens, n_max_tokens, add_bos); +} + +int llama_tokenize_bpe( + struct llama_context * ctx, + const char * text, + llama_token * tokens, + int n_max_tokens, + bool add_bos) { + auto res = llama_tokenize_internal(ctx->model.vocab, text, add_bos, false); if (n_max_tokens < (int) res.size()) { LLAMA_LOG_ERROR("%s: too many tokens\n", __func__); @@ -4248,94 +5012,75 @@ int llama_tokenize_with_model( return res.size(); } -int llama_tokenize( - struct llama_context * ctx, +int llama_tokenize_with_model( + const struct llama_model * model, const char * text, llama_token * tokens, int n_max_tokens, bool add_bos) { - return llama_tokenize_with_model(&ctx->model, text, tokens, n_max_tokens, add_bos); -} + auto escape = llama_vocab_get_type(model->vocab) == LLAMA_VOCAB_TYPE_SPM; + auto res = llama_tokenize_internal(model->vocab, text, add_bos, escape); -int llama_n_vocab_from_model(const struct llama_model * model) { - return model->vocab.id_to_token.size(); -} - -int llama_n_ctx_from_model(const struct llama_model * model) { - return model->hparams.n_ctx; -} - -int llama_n_embd_from_model(const struct llama_model * model) { - return model->hparams.n_embd; -} - -int llama_n_vocab(const struct llama_context * ctx) { - return ctx->model.vocab.id_to_token.size(); -} - -int llama_n_ctx(const struct llama_context * ctx) { - return ctx->model.hparams.n_ctx; -} - -int llama_n_embd(const struct llama_context * ctx) { - return ctx->model.hparams.n_embd; -} - -int llama_model_type(const struct llama_model * model, char * buf, size_t buf_size) { - return snprintf(buf, buf_size, "LLaMA %s %s", llama_model_type_name(model->type), llama_ftype_name(model->hparams.ftype)); -} - -int llama_get_vocab_from_model( - const struct llama_model * model, - const char * * strings, - float * scores, - int capacity) { - int n = std::min(capacity, (int) model->vocab.id_to_token.size()); - for (int i = 0; ivocab.id_to_token[i].tok.c_str(); - scores[i] = model->vocab.id_to_token[i].score; - } - return n; -} - -int llama_get_vocab( - const struct llama_context * ctx, - const char * * strings, - float * scores, - int capacity) { - return llama_get_vocab_from_model(&ctx->model, strings, scores, capacity); -} - -float * llama_get_logits(struct llama_context * ctx) { - return ctx->logits.data(); -} - -float * llama_get_embeddings(struct llama_context * ctx) { - return ctx->embedding.data(); -} - -const char * llama_token_to_str_with_model(const struct llama_model * model, llama_token token) { - if (token >= llama_n_vocab_from_model(model)) { - return nullptr; + if (n_max_tokens < (int) res.size()) { + LLAMA_LOG_ERROR("%s: too many tokens\n", __func__); + return -((int) res.size()); } - return model->vocab.id_to_token[token].tok.c_str(); + for (size_t i = 0; i < res.size(); i++) { + tokens[i] = res[i]; + } + + return res.size(); } -const char * llama_token_to_str(const struct llama_context * ctx, llama_token token) { - return llama_token_to_str_with_model(&ctx->model, token); +int llama_token_to_str(const struct llama_context * ctx, llama_token token, char * buf, int length) { + return llama_token_to_str_with_model(&ctx->model, token, buf, length); } -llama_token llama_token_bos() { - return 1; +int llama_token_to_str_bpe(const struct llama_context * ctx, llama_token token, char * buf, int length) { + if (0 <= token && token < llama_model_n_vocab(&ctx->model)) { + std::string result = ctx->model.vocab.id_to_token[token].text; + if (length < (int) result.length()) { + return -result.length(); + } + memcpy(buf, result.c_str(), result.length()); + return result.length(); + } + return 0; } -llama_token llama_token_eos() { - return 2; -} - -llama_token llama_token_nl() { - return 13; +// does not write null-terminator to str +int llama_token_to_str_with_model(const struct llama_model * model, llama_token token, char * buf, int length) { + if (0 <= token && token < llama_model_n_vocab(model)) { + if (llama_is_normal_token(model->vocab, token)) { + std::string result = model->vocab.id_to_token[token].text; + if (llama_vocab_get_type(model->vocab) == LLAMA_VOCAB_TYPE_SPM) { + result = llama_unescape_whitespace(result); + } + if (length < (int) result.length()) { + return -result.length(); + } + memcpy(buf, result.c_str(), result.length()); + return result.length(); + } else if (llama_is_unknown_token(model->vocab, token)) { // NOLINT + if (length < 3) { + return -3; + } + buf[0] = '\xe2'; + buf[1] = '\x96'; + buf[2] = '\x85'; + return 3; + } else if (llama_is_control_token(model->vocab, token)) { + ; + } else if (llama_is_byte_token(model->vocab, token)) { + if (length < 1) { + return -1; + } + buf[0] = llama_token_to_byte(model->vocab, token); + return 1; + } + } + return 0; } struct llama_timings llama_get_timings(struct llama_context * ctx) { @@ -4403,7 +5148,6 @@ const std::vector>& llama_internal_ return ctx->model.tensors_by_name; } - void llama_log_set(llama_log_callback log_callback, void * user_data) { g_state.log_callback = log_callback ? log_callback : llama_log_callback_default; g_state.log_callback_user_data = user_data; diff --git a/llama.h b/llama.h index 9d732f914..aa5b7d69c 100644 --- a/llama.h +++ b/llama.h @@ -34,29 +34,18 @@ # define DEPRECATED(func, hint) func #endif -#define LLAMA_FILE_MAGIC_GGJT 0x67676a74u // 'ggjt' -#define LLAMA_FILE_MAGIC_GGLA 0x67676c61u // 'ggla' -#define LLAMA_FILE_MAGIC_GGMF 0x67676d66u // 'ggmf' -#define LLAMA_FILE_MAGIC_GGML 0x67676d6cu // 'ggml' -#define LLAMA_FILE_MAGIC_GGSN 0x6767736eu // 'ggsn' +#define LLAMA_DEFAULT_SEED 0xFFFFFFFF -#define LLAMA_FILE_VERSION 3 -#define LLAMA_FILE_MAGIC LLAMA_FILE_MAGIC_GGJT -#define LLAMA_FILE_MAGIC_UNVERSIONED LLAMA_FILE_MAGIC_GGML -#define LLAMA_SESSION_MAGIC LLAMA_FILE_MAGIC_GGSN -#define LLAMA_SESSION_VERSION 1 +#define LLAMA_FILE_MAGIC_GGSN 0x6767736eu // 'ggsn' -#define LLAMA_DEFAULT_SEED 0xFFFFFFFF +#define LLAMA_SESSION_MAGIC LLAMA_FILE_MAGIC_GGSN +#define LLAMA_SESSION_VERSION 1 #if defined(GGML_USE_CUBLAS) || defined(GGML_USE_CLBLAST) || defined(GGML_USE_METAL) // Defined when llama.cpp is compiled with support for offloading model layers to GPU. #define LLAMA_SUPPORTS_GPU_OFFLOAD #endif -#ifndef LLAMA_DEFAULT_RMS_EPS -#define LLAMA_DEFAULT_RMS_EPS 5e-6f -#endif - #ifdef __cplusplus extern "C" { #endif @@ -72,6 +61,50 @@ extern "C" { typedef int llama_token; + enum llama_log_level { + LLAMA_LOG_LEVEL_ERROR = 2, + LLAMA_LOG_LEVEL_WARN = 3, + LLAMA_LOG_LEVEL_INFO = 4 + }; + + enum llama_vocab_type { + LLAMA_VOCAB_TYPE_SPM = 0, // SentencePiece + LLAMA_VOCAB_TYPE_BPE = 1, // Byte Pair Encoding + }; + + enum llama_token_type { + LLAMA_TOKEN_TYPE_UNDEFINED = 0, + LLAMA_TOKEN_TYPE_NORMAL = 1, + LLAMA_TOKEN_TYPE_UNKNOWN = 2, + LLAMA_TOKEN_TYPE_CONTROL = 3, + LLAMA_TOKEN_TYPE_USER_DEFINED = 4, + LLAMA_TOKEN_TYPE_UNUSED = 5, + LLAMA_TOKEN_TYPE_BYTE = 6, + }; + + // model file types + enum llama_ftype { + LLAMA_FTYPE_ALL_F32 = 0, + LLAMA_FTYPE_MOSTLY_F16 = 1, // except 1d tensors + LLAMA_FTYPE_MOSTLY_Q4_0 = 2, // except 1d tensors + LLAMA_FTYPE_MOSTLY_Q4_1 = 3, // except 1d tensors + LLAMA_FTYPE_MOSTLY_Q4_1_SOME_F16 = 4, // tok_embeddings.weight and output.weight are F16 + // LLAMA_FTYPE_MOSTLY_Q4_2 = 5, // support has been removed + // LLAMA_FTYPE_MOSTLY_Q4_3 = 6, // support has been removed + LLAMA_FTYPE_MOSTLY_Q8_0 = 7, // except 1d tensors + LLAMA_FTYPE_MOSTLY_Q5_0 = 8, // except 1d tensors + LLAMA_FTYPE_MOSTLY_Q5_1 = 9, // except 1d tensors + LLAMA_FTYPE_MOSTLY_Q2_K = 10,// except 1d tensors + LLAMA_FTYPE_MOSTLY_Q3_K_S = 11,// except 1d tensors + LLAMA_FTYPE_MOSTLY_Q3_K_M = 12,// except 1d tensors + LLAMA_FTYPE_MOSTLY_Q3_K_L = 13,// except 1d tensors + LLAMA_FTYPE_MOSTLY_Q4_K_S = 14,// except 1d tensors + LLAMA_FTYPE_MOSTLY_Q4_K_M = 15,// except 1d tensors + LLAMA_FTYPE_MOSTLY_Q5_K_S = 16,// except 1d tensors + LLAMA_FTYPE_MOSTLY_Q5_K_M = 17,// except 1d tensors + LLAMA_FTYPE_MOSTLY_Q6_K = 18,// except 1d tensors + }; + typedef struct llama_token_data { llama_token id; // token id float logit; // log-odds of the token @@ -86,25 +119,10 @@ extern "C" { typedef void (*llama_progress_callback)(float progress, void *ctx); - enum llama_log_level { - LLAMA_LOG_LEVEL_ERROR = 2, - LLAMA_LOG_LEVEL_WARN = 3, - LLAMA_LOG_LEVEL_INFO = 4 - }; - - // Signature for logging events - // Note that text includes the new line character at the end for most events. - // If your logging mechanism cannot handle that, check if the last character is '\n' and strip it - // if it exists. - // It might not exist for progress report where '.' is output repeatedly. - typedef void (*llama_log_callback)(enum llama_log_level level, const char * text, void * user_data); - struct llama_context_params { uint32_t seed; // RNG seed, -1 for random int32_t n_ctx; // text context int32_t n_batch; // prompt processing batch size - int32_t n_gqa; // grouped-query attention (TEMP - will be moved to model hparams) - float rms_norm_eps; // rms norm epsilon (TEMP - will be moved to model hparams) int32_t n_gpu_layers; // number of layers to store in VRAM int32_t main_gpu; // the GPU that is used for scratch and small tensors @@ -129,33 +147,18 @@ extern "C" { bool use_mlock; // force system to keep model in RAM bool embedding; // embedding mode only }; - // model file types - enum llama_ftype { - LLAMA_FTYPE_ALL_F32 = 0, - LLAMA_FTYPE_MOSTLY_F16 = 1, // except 1d tensors - LLAMA_FTYPE_MOSTLY_Q4_0 = 2, // except 1d tensors - LLAMA_FTYPE_MOSTLY_Q4_1 = 3, // except 1d tensors - LLAMA_FTYPE_MOSTLY_Q4_1_SOME_F16 = 4, // tok_embeddings.weight and output.weight are F16 - // LLAMA_FTYPE_MOSTLY_Q4_2 = 5, // support has been removed - // LLAMA_FTYPE_MOSTLY_Q4_3 = 6, // support has been removed - LLAMA_FTYPE_MOSTLY_Q8_0 = 7, // except 1d tensors - LLAMA_FTYPE_MOSTLY_Q5_0 = 8, // except 1d tensors - LLAMA_FTYPE_MOSTLY_Q5_1 = 9, // except 1d tensors - LLAMA_FTYPE_MOSTLY_Q2_K = 10,// except 1d tensors - LLAMA_FTYPE_MOSTLY_Q3_K_S = 11,// except 1d tensors - LLAMA_FTYPE_MOSTLY_Q3_K_M = 12,// except 1d tensors - LLAMA_FTYPE_MOSTLY_Q3_K_L = 13,// except 1d tensors - LLAMA_FTYPE_MOSTLY_Q4_K_S = 14,// except 1d tensors - LLAMA_FTYPE_MOSTLY_Q4_K_M = 15,// except 1d tensors - LLAMA_FTYPE_MOSTLY_Q5_K_S = 16,// except 1d tensors - LLAMA_FTYPE_MOSTLY_Q5_K_M = 17,// except 1d tensors - LLAMA_FTYPE_MOSTLY_Q6_K = 18,// except 1d tensors - }; + + // Signature for logging events + // Note that text includes the new line character at the end for most events. + // If your logging mechanism cannot handle that, check if the last character is '\n' and strip it + // if it exists. + // It might not exist for progress report where '.' is output repeatedly. + typedef void (*llama_log_callback)(enum llama_log_level level, const char * text, void * user_data); // model quantization parameters typedef struct llama_model_quantize_params { int nthread; // number of threads to use for quantizing, if <=0 will use std::thread::hardware_concurrency() - enum llama_ftype ftype; // quantize to this llama_ftype + enum llama_ftype ftype; // quantize to this llama_ftype bool allow_requantize; // allow quantizing non-f32/f16 tensors bool quantize_output_tensor; // quantize output.weight } llama_model_quantize_params; @@ -208,27 +211,16 @@ extern "C" { int32_t n_eval; }; - // Set callback for all future logging events. - // If this is not called, or NULL is supplied, everything is output on stderr. - LLAMA_API void llama_log_set(llama_log_callback log_callback, void * user_data); + LLAMA_API struct llama_context_params llama_context_default_params(void); + LLAMA_API struct llama_model_quantize_params llama_model_quantize_default_params(void); - LLAMA_API int llama_max_devices(); - - LLAMA_API struct llama_context_params llama_context_default_params(); - LLAMA_API struct llama_model_quantize_params llama_model_quantize_default_params(); - - LLAMA_API bool llama_mmap_supported(); - LLAMA_API bool llama_mlock_supported(); - - // TODO: not great API - very likely to change // Initialize the llama + ggml backend // If numa is true, use NUMA optimizations // Call once at the start of the program LLAMA_API void llama_backend_init(bool numa); - // Call once at the end of the program - currently only used for MPI - LLAMA_API void llama_backend_free(); - LLAMA_API int64_t llama_time_us(); + // Call once at the end of the program - currently only used for MPI + LLAMA_API void llama_backend_free(void); LLAMA_API struct llama_model * llama_load_model_from_file( const char * path_model, @@ -240,17 +232,26 @@ extern "C" { struct llama_model * model, struct llama_context_params params); - // Various functions for loading a ggml llama model. - // Allocate (almost) all memory needed for the model. - // Return NULL on failure - LLAMA_API DEPRECATED(struct llama_context * llama_init_from_file( - const char * path_model, - struct llama_context_params params), - "please use llama_load_model_from_file combined with llama_new_context_with_model instead"); - // Frees all allocated memory LLAMA_API void llama_free(struct llama_context * ctx); + LLAMA_API int64_t llama_time_us(void); + + LLAMA_API int llama_max_devices (void); + LLAMA_API bool llama_mmap_supported (void); + LLAMA_API bool llama_mlock_supported(void); + + LLAMA_API int llama_n_vocab(const struct llama_context * ctx); + LLAMA_API int llama_n_ctx (const struct llama_context * ctx); + LLAMA_API int llama_n_embd (const struct llama_context * ctx); + + LLAMA_API int llama_model_n_vocab(const struct llama_model * model); + LLAMA_API int llama_model_n_ctx (const struct llama_model * model); + LLAMA_API int llama_model_n_embd (const struct llama_model * model); + + // Get a string describing the model type + LLAMA_API int llama_model_type(const struct llama_model * model, char * buf, size_t buf_size); + // Returns 0 on success LLAMA_API int llama_model_quantize( const char * fname_inp, @@ -272,9 +273,9 @@ extern "C" { LLAMA_API int llama_model_apply_lora_from_file( const struct llama_model * model, - const char * path_lora, - const char * path_base_model, - int n_threads); + const char * path_lora, + const char * path_base_model, + int n_threads); // Returns the number of tokens in the KV cache LLAMA_API int llama_get_kv_cache_token_count(const struct llama_context * ctx); @@ -324,11 +325,40 @@ extern "C" { // IMPORTANT: do not use for anything else other than debugging and testing! LLAMA_API int llama_eval_export(struct llama_context * ctx, const char * fname); + // Token logits obtained from the last call to llama_eval() + // The logits for the last token are stored in the last row + // Can be mutated in order to change the probabilities of the next token + // Rows: n_tokens + // Cols: n_vocab + LLAMA_API float * llama_get_logits(struct llama_context * ctx); + + // Get the embeddings for the input + // shape: [n_embd] (1-dimensional) + LLAMA_API float * llama_get_embeddings(struct llama_context * ctx); + + // + // Vocab + // + + LLAMA_API const char * llama_token_get_text(const struct llama_context * ctx, llama_token token); + + LLAMA_API float llama_token_get_score(const struct llama_context * ctx, llama_token token); + + LLAMA_API llama_token_type llama_token_get_type(const struct llama_context * ctx, llama_token token); + + // Special tokens + LLAMA_API llama_token llama_token_bos(const struct llama_context * ctx); // beginning-of-sentence + LLAMA_API llama_token llama_token_eos(const struct llama_context * ctx); // end-of-sentence + LLAMA_API llama_token llama_token_nl (const struct llama_context * ctx); // next-line + + // + // Tokenization + // + // Convert the provided text into tokens. // The tokens pointer must be large enough to hold the resulting tokens. // Returns the number of tokens on success, no more than n_max_tokens // Returns a negative number on failure - the number of tokens that would have been returned - // TODO: not sure if correct LLAMA_API int llama_tokenize( struct llama_context * ctx, const char * text, @@ -336,6 +366,13 @@ extern "C" { int n_max_tokens, bool add_bos); + LLAMA_API int llama_tokenize_bpe( + struct llama_context * ctx, + const char * text, + llama_token * tokens, + int n_max_tokens, + bool add_bos); + LLAMA_API int llama_tokenize_with_model( const struct llama_model * model, const char * text, @@ -343,57 +380,30 @@ extern "C" { int n_max_tokens, bool add_bos); - LLAMA_API int llama_n_vocab(const struct llama_context * ctx); - LLAMA_API int llama_n_ctx (const struct llama_context * ctx); - LLAMA_API int llama_n_embd (const struct llama_context * ctx); - - LLAMA_API int llama_n_vocab_from_model(const struct llama_model * model); - LLAMA_API int llama_n_ctx_from_model (const struct llama_model * model); - LLAMA_API int llama_n_embd_from_model (const struct llama_model * model); - - LLAMA_API int llama_model_type(const struct llama_model * model, char * buf, size_t buf_size); - - // Get the vocabulary as output parameters. - // Returns number of results. - LLAMA_API int llama_get_vocab( - const struct llama_context * ctx, - const char * * strings, - float * scores, - int capacity); - - LLAMA_API int llama_get_vocab_from_model( - const struct llama_model * model, - const char * * strings, - float * scores, - int capacity); - - // Token logits obtained from the last call to llama_eval() - // The logits for the last token are stored in the last row - // Can be mutated in order to change the probabilities of the next token - // Rows: n_tokens - // Cols: n_vocab - LLAMA_API float * llama_get_logits(struct llama_context * ctx); - - // Get the embeddings for the input - // shape: [n_embd] (1-dimensional) - LLAMA_API float * llama_get_embeddings(struct llama_context * ctx); - // Token Id -> String. Uses the vocabulary in the provided context - LLAMA_API const char * llama_token_to_str( + // Does not write null terminator to the buffer + LLAMA_API int llama_token_to_str( const struct llama_context * ctx, - llama_token token); + llama_token token, + char * buf, + int length); - LLAMA_API const char * llama_token_to_str_with_model( + LLAMA_API int llama_token_to_str_bpe( + const struct llama_context * ctx, + llama_token token, + char * buf, + int length); + + LLAMA_API int llama_token_to_str_with_model( const struct llama_model * model, - llama_token token); - - // Special tokens - LLAMA_API llama_token llama_token_bos(); // beginning-of-sentence - LLAMA_API llama_token llama_token_eos(); // end-of-sentence - LLAMA_API llama_token llama_token_nl(); // next-line + llama_token token, + char * buf, + int length); + // // Grammar // + LLAMA_API struct llama_grammar * llama_grammar_init( const llama_grammar_element ** rules, size_t n_rules, @@ -401,7 +411,9 @@ extern "C" { LLAMA_API void llama_grammar_free(struct llama_grammar * grammar); + // // Sampling functions + // /// @details Repetition penalty described in CTRL academic paper https://arxiv.org/abs/1909.05858, with negative logit fix. LLAMA_API void llama_sample_repetition_penalty(struct llama_context * ctx, llama_token_data_array * candidates, const llama_token * last_tokens, size_t last_tokens_size, float penalty); @@ -470,6 +482,10 @@ extern "C" { // Print system information LLAMA_API const char * llama_print_system_info(void); + // Set callback for all future logging events. + // If this is not called, or NULL is supplied, everything is output on stderr. + LLAMA_API void llama_log_set(llama_log_callback log_callback, void * user_data); + #ifdef __cplusplus } #endif @@ -479,10 +495,11 @@ extern "C" { #include #include + struct ggml_tensor; const std::vector>& llama_internal_get_tensor_map(struct llama_context * ctx); -#endif +#endif // LLAMA_API_INTERNAL #endif // LLAMA_H diff --git a/models/.editorconfig b/models/.editorconfig new file mode 100644 index 000000000..78b36ca08 --- /dev/null +++ b/models/.editorconfig @@ -0,0 +1 @@ +root = true diff --git a/models/ggml-vocab-llama.gguf b/models/ggml-vocab-llama.gguf new file mode 100644 index 0000000000000000000000000000000000000000..63bfaf672f382c0f5bbcffe54736e2698ef3ac55 GIT binary patch literal 595423 zcma&P`Ez8~btb6US4~fMOi#=bGtn+pn^hE9H1|a$?Zi@5pnxiHbm=Euappuc3k?>pbQ3AH^P z?U0GA%=hj+_uO;Oe$Kse<>ux8S(|S0?1DE#es<(LRpWvzg1OFfE`TL%F<=`+O`^CTK!G3jIk6#(KCG->+n@V~^9A3XWh$BC+Gdr%LXyY=|e-rk^pY0&oS{?Fm_AOGEl^v>kJ!4G`$ zWA_64&(4Mi&;Mum_p_7d3DdI=O`hjW&;HV#U;3r;>EZI}k@D%$^69bi>GAUEiSp^m z^69Da>FM&RQ$8(~Ps`=gO8K-}K3&dF4;T0zF7Q2E;Cr~h_i%yl;R4^o1-^$1d=D4+ z9xm`bT;O}S!1r*0@8JU9!v(&F3w)0h_#P?nJyPI%q`>z`f$xz5-y;RSM+$t86!;z~ z@I6xCd!)ekNP+K>0^cJAzDEjtj~4hIE$}^B;Cr;d_h^Cd(E{J21-?fMe2*6R9xd=a zTHt%M!1ri@@6iI^qXoW43w)0i_#P|pJyzg*tibnJf$y;b-(v;7#|nIp75E-2@I6-G zd#u3sSb^`c0^ef=zQ+oDj~DnJFYrBH;CsBl_jrNt@dDrD1-{1%e2*9S9xw1cUf_GY z!1s88@9_fP;|0FQ3w%!$_?{^6JyGC$qQLh=f$xa|-xCGCCklK|6!@Mf@I6uBd!oSi zM1k*#0^bt_z9$NNPZs!|Ebu*9;Cr&b_hfT>d`}kmo-FV^S>SuJ!1rW< z@5ut+lLfvf3w%!%_?{~8Jyqa)s=)VDf$ymT-%|y?rwV*e75JVi@I6)Fd#b?qRDtiQ z0^d^wzNZR&PZ#)}F7Q2F;Cs5j_jG~p=>p%=1-_>Xd`}nno-Xh`UEq7V!1r{4@96^H z(*?e#3w)gdU#Gy=De!d)e4PScr@+@K@O27&odRE{z}G48bqai)0$-=V*D3II3Vcfi zzNG@+Qh{%&z_(Q3TPpA^75J74d`ktsr2^klfp4k6w^ZO;D)228_?8NM%LTsW0^f3h zZ@Iv?T;N+S@GTekmJ58#1-|71-*SO(xxlwv;9D;6Ef@Hf3w$dDzLf&sN`Y^sz_(K1 zTPg6Z6!=yOd@BXMl>*;Nfp4Y2w^HC+De$cn_*M#hs|CK*0^e$ZZ?(X;THsqP@U0g3 zRttQq1-{h+-)ezxwZOMp;9D*5trqxJ3w)Oge3uJ+mkWHC3w)Oge3uJ+mkWHC3w)Og ze3uJ+mkWHC3w)Oge3uJ+mkWHC1HPYq^NaW&HA4KE{-^j`kNMwb=RcA(vkm;_-s4l zf2+Q~+x6c8zpZ=zSL?q!dSgJ%7aF|&4EOk5BN!^dQ1|%*lzgwREMDszY8|zV3_zIk z3MG?#W!iGkt_&GGw8npp+y4ds#lQaE{|Sh`XKZ)*&z{!a&6zInyK3N-Ec?-3B4x`z z%>1|Gzc2Z3ZIo8SM9MY?Y;ysJf%WcuJ4-6r^g@>SbfxKL3H)>XZoiIw>+uh4BmSAz zW4;EPE(kA)aYZoi63B|ioRI?)(X-^=b^mhQ&b*=>fz?0e@P~0cD$&)ntU1Y`6;+HI zPBzP_&v~4qeGd)W*OOlLPZI&#Bn+$m`7+1p9{8b3gZuCa9hG2e!hQyo-5`j~WXeC- z_Wufh@a=TJO8CmDIqw-h!5P}v_@uO3{BB=+)dg1lO$K~ysm8ZLf{5A@rAfdBq`N2efAC%PKsAYbqg%dP0OsR9GJ z?0)4%yk@{^v9c}+A-}n!Ndpr*e*Ct+6APT>R4;Jorn~R=ddX?7`-`Cb=8%uITyLoF9(ssN zX-8aGz1Ix?Tl@uXfiIqvUy6VId&vF||C7tz;ZPj%^oyro;-u9?``Fy&Y!@$S&OIY$ z{x|;6j=ft)RT4@iJhiht09rv+`0%GW$NYo&Ti%x!Iq+U$Y!U)tBmnlbPwlL)os7j- zVs&?7jO?JyrbM7v6OZke};R4 zX(*00|MC1oJ=)L1)T>3Yt>4j^IWm(!ARc3iYLe7e_w}uqSk*O_X2YHVuEze*NTkn1 zhNB55g-4WsuoK>HfwCKujiiG#K@n8|4ZkFx1pR&JK_^l(CasnfLgT_kJ~J`QjV46fqbrhenak8ABs9Y_5(&Vzrbia6Vfk-7(JVOQu;z%f;aRcR3r08=VKm%8junWX zzdQec@9)(qkNVnZtnjcX8lT}4<0?7C{QCs49(FYs_ZjEZ3n7Eo8YQK0T7>80qAC2M zm%iUj{Ao53ZGeF3@5X4IArFGbJAz=;V(;p}oqj3s(r=H9=;c(%umU|8Dr7s0<2sb$ zHR7U$*Pn#^K~O_fJPSjj7Dt-x@eyc~g4sm1KMlmjkMp0Le%%uf!&*qOJx;W>A+O@K zZm5yHR6}P%3sxcWzK|@il`47^hh%(g%;2zbMfEr|#ISHCQ|a;Tjvsn;rnj0Zwc&P_ zJ2~q6nPDv*9*k4O1ss2pdlcigacZXfb&B-EYMcj{GT4Kd6rr07lMMX$DZUy&Omy1E zp^syg@0NE{uKr5W$=?)yI2C)U?|A#ib#6}8*P&sd}Rjx*8GE{r2SfS-{cY9lW?s-#)fjgO3w0`nO}n3){fx-zUzIT|BzHNsHRDH z`$YqqzwePoV^J$3X&wj+FE+Ek>t}jR9-n@5AoPu^V-DSnC&|Bh<&+F6(d)QL#REtm z8vkALUalP)x`7I`9cgm7{LXJ`ZOw69Ub#n0YvOnkVp1w|i`e3LYx00dS zOuD)FZkv1bv)BJ6=b1@`YlnHft6s3ue*6hQ%~}}tBi#KEb%)Sm@54VqpZ_ucBWQT; z*EnnFQ{z^Qv93hPG7+U1R3b~96s}^n16_@?R>2!E#3YTLThfXi#Q$;7iFAixw$)yU z@h{Eai+A(5{p|HW;1{*6gsyfH_%XJJh+!IcCx6Zdn_9=Go$YBZ@XI_Ty*dQ#?HDh2 z=V)R1b#(NP#CQghSI<7=!EdBst0$hh8HR4LqsObeCc963;uC&l{=JweRl<%KZ9m*E z^X~_vMc9;2O+udG7<^dBXdp8!6v_G9MX!Iz5A>>O`W3=(dHI4t01M-oZBfT=Y7T5a zu^y88(xpoY<{NzYxahop`+xJ_O;?-K+~GeK+QjLXJ^N7=zJpN0+G6GJrJB@Bm&+xN z0Hwk&w!?n%nu%24y023Sr$Lgm-4Yis65I=^WFxT4k*Y(zHEoCHXOn$0Q7)#YndWII zW&E+~Px9DKq~hSj=2XAhN*6qNCuQ$}P<7$rr%e67p6j*!KAD(5er9$?SDPfO0oTaY zt%y?FVd3xPB=5Fqj||$P41&jd%2&KGCvWnLY8no+T?hv6Z#TFfL;HMmI>v$w<~HO$ zv36IJS3N1gvk>unp+xT}+W@B$f~m02ZJa-jH7n|8(cJcngX)d}+e(Js-Pfsr5FRDo zZ^Lvmn4?xl8hW~zb+a36VO&cd-MeRyr^qeP#*RV~d~o_T|FYYJJywkmh>50KDXxLRWro#n2{(rejD`F7nSgfan<$nRidxghNW{X9x_HM10S4Ksf4U+ z;NxWD^}tIV&ZN>D)^fe!Y)cPlLDPPq*T|~|4Z2NswU&ZvGKx`?Ua>9t5o>GzPe6br z{|El(Il$%pshNhpJ_*Fv{hrUtNX$9W^=AoOczVDB!jswVdIPusQqzMiD;@0Jm`n+`aalOlJY--t=t2ey5$uvAFyNv|+AgTJWO~DWJ z$Eh5*jr_VWTvBzzE;YHkJ9W~0-H6BDlfrZ^(KeZ-PSlPP=V2sFQx}~Ut#q?!y3=u$ zpaS9Ch&o@N9;L64dm-`;1)yI*nKU6kQubEzw(SIxVPEUl`Z#r*Gn#0(IG?I+Nb+Ot z8TgKl!&pQyY09gRg!&;4&pr7~Uu^0lueB=b%}zp1AA^5|cI1Jo9q861fj<1Q9B@$h zQ{N6-umF>CGoZQ0r=TKv(}&vh`0q2%c>JUN)Ya;-HD^%rXE{$bZ6zt8Y8%lzb;#qB zH*;^tGf4z6Gf&c$VsQWcd5ZpoBhC6%ShldXJny(pu5vJJ6BUy>f8KB8i5f0=_xy(; z<3VI%oDgAgeizE#e%L6pX^t{3tYW_KRcr(-r>~!WnXnx;VU@$z)D^;@H5=m)%i#|i zH?olSwTRQ-D9U&zSxOyV0cM<~grC2a`x|E&+<@CLDUum-4tAs;+6pl?S~w5Jb?)0% zl10C5#pM>z~L3>r=wYIj!>ZWO1fD*&pV$l%;FTs#c_P#XqX;JpX9 zZO~MUl&WUaVi--}pY%k7N{)pa+5z$)9>0r8Q=J%eT&Hlq+8*(Nk&M*SFQy9(32#J6 z8%le(P9=kcIaKd#uq9n7T{wc$>A;wEq~dbu&Iv2H8VO`YF^GxE$(D3 z1Pfgi^Q~?hNM%}bg0*W#mAUv{k(AJ(_`t4o_?f3y^h0LV_r(f$^0Ec zbMkHR1?V;Ja>k+5X_$X@8;2L2s}YsFJ%7*WUcFB}fJx~=<5|&?N3E>8s`w9fs<5E;MOv$OxM;Y%1oYOn{so1Ee__Z4 z;@&;|lHu4v&!GudA+H z^Ih30$`Ljo=`8-Ip0=pS&pjtXct4K_gfG9SCc&?f%ghmQL{nhk@bVkTk`xo|Zt9i0 zO@!OQ@RHv`MDp?Ubjr67Zt0#B=5DjF(dNIaxg1PdhiYK7L5>rs*;ot^l&^pFQ%xMT zQ0Tbpoo&HBD-JgJUXwn$DT}E68($FWI%Hz(;hzY<2$ zIQfBO!S2{HyS^hsz>qY3@;hCCrI&5+cx@BiDVY4%6K3?tTf)jB;rZ2?DhVu7d3)7U(A9|M91~ zpnTBil-Io`?5ZE1e%aKsr5%JTm$1J4DStcvJ#JkC6@p)fY83WRufAhsgI0Qf=9I7##w_efSEV*Q)+DRd&j3nm4XCy-u*r^q|Y`Pgl z0MF%1^AE!VyFVQW^^k(K6ShK0!D%s$pk;)+1t9OHezcm~OmDpg{NVJP{EN7pjx2~G z#K7t`qHJ6xeb-zjQPRn<%ubyOKO9ymv%x8Iu_tdD5?G$)_g(~LUJTLRtrdtd4TGj> zxAI?N@!B}!FxW+LuoUWqh?iXkK~?P~->mkKWk6UfDY?sH^+&LjlEnUiYXkILdAEtM z)(Djw{2H!${%ML5x*2(ELwWt6S*Dzw%)$W#-9=L@1k(eD<~UU&76f5zyiCJp zS%Ei{(_{Ph%@pQ;!=SL=N72Y$@|hZPN8AvOop3&aMNf>~-3)-L=7y_2mLdyj;2ELq z@#~qQehB>}7XF;y?#Uhj?;x;kFU@;1TWHYl@`rk?#|}gFfqTG{OJ;K!hp?Vah+#$H zC~)}uq+6f6_$e=IQiY~4|3O5oA#UY*!fE)jAef3QAr~}nH5ThSe@URGZm|>6D>Rtn z@VViD<`i%Ma*6#KJHs)t!^UKJ7p{jO2>avYyOn8x5hSrgPIGw1eH>jc zTn*(z>({$d8xGUMG#!V4xNx3N)q@mQ1867w=H!hW{91kF?T6TKItdEeylr_%Ob~EA z=p=0KOFsec{AKQ%7;|JGq)GrUsNAt|G=w$<|6x)f6e)iM+YGhxoAbB*6=|DeV*B1J zHpOOg=+FV-wn*m5B%dV7R~0ty)1gQ@n@UY%B+(C#ppc{-Gc7c`DTW@D@r)3pM;nxU z$zI*{0LbPjMgw&9@0ljI6S~uXPTV%T!6GMe$?b@3j;qK)D6Y6cD3R^BICB|0jbyf? zbh0=I3g?Jtn*U3G*ho$U;liz=FUWvbv&3$%J<^May~ZBkXz}4~OSiOX&Ocz>A0rgV z2f){~o1@0lUD4N2q0@0Q4mDs$p4g)T@X;)_3%h&eay4KpO-i60_*KRK&RyaJefozC zLf2-`Z3l*S?1+QwHt1c>pjTe8D`nF572`=>!3u3}$MOezsN(_%NQV3T?n8Lo`(;?N?rtC*VejB@Ud%2OI6Z`|%*o$$#Yk zOlUY?<7C()I8X@AjqCjH7ariu@Sn=akk^3A>CzEz6t)mMKGHEOm%$jcL#fCX@xu1Q zJuvI~O{k?p)!+W)lD~mQr6k8X&eo)HCKnBA&I^Sod}k+biq+LqNu(>Q{1JBeBAD~8 zL6R7k5_jnVgVC!(yoPG^z&U>n#>i2yhTSycaF~nd=HL3U0N@6f~cKoi#B;$#}Uc{K)+yQTFN!kVr7#>OX=WvqWWP^0()zT7=XvxeL)CM9fa*nD%MDh$q z75L+J#)r2B=xqrz=m*Im2O&=tW-L=+8;HThEl=zDQ zQ|qOo$F)Nh_gWy&6e)v4|1E!Y2sPdx_u5uCn}07j?(%j)!BB!@r%sw<$jv{(AN`i8 zbD}UGr)-9;X$DFGIldo;mrDCb8;yWBedM8Xu~kNV26Qd)Cj@&-^ai_KHt5B04sUJo zwR;zhqk32V5ZV}=zn&%%Ce5Ct`uzRVFY}GHZN3ry01^naxx^;ZZTNC`mJAz_GkoVM zkZkQ$j9|a;-P5m>NYirh9D~w0&nG{b$iqU6A8m5#ItO)R41tO9`Ij#VZ{t++pa9WO z=G^oSmaqyKLt*`sDno4KMbd8JeUnH*xVRnVw$!wD@?@NGl_2*hLEFZuJV?++t@P=8 zQswJc{U!t0qW90;=cPd@E;9J!O-n&&XNrpxL6A9b>F-ZMCkIE7sJ(QN-@>aaGXa)Z z@Js4X3B$Bf0>W&HYz%JzJl}lnOn?I}@!UL!*5q#iAczdjlkx~ zcM3&N0221oKl0z-4EvQ4FAvm7@`Z9r^nxP!1|U$F0j`h8JCl9HUx0W;`S5o;H()ZI zW!}YXP>$Rfn=8!W>>BId9L5|-J$q7{jV)2!clt_Mq{? zsQ!Qnn%Z$LQn!T8Empg+&RHNTIJ$C0Ib93ej9)a1-KbNXyc z;mN_sZ6*gvMalAtaE|b z8A=wD9I{TLzA}_thJDiUfX&tmlW;|Pg3aQ8IYPRyM>(6ObpE{Y^tV6puRF8G`MJ~9 zrx4UU$+%+`i|dRe(jRz<6Tz8|a3m>-teAg61JXA4;5-`kJTh#RFg};kDuHas3XSVB z_q?aDz;$kDt%{~J1Cew$7eRlp%(3oWk?tWBKfm1!OJoxhWI9ts|3EArt^uzLX+m7lkX&d24?uhPOA)`v5ZORGZ9Ocdh6*2byR6Xb&~XIAhk`} zvR38OJj!_Ar#{ZzYmjSXu(0GJ+YwZWEVOzk>9!M|_Ei}X8&P!HW^VCOafk0N{N{9g z_yFA;%ByJ%cPp8Az;INS-@CxgK9J7~Mq-FQ4d=s z4kCOf!w6dvMR1pp6yfwUj&9V_tKq&%Hv_|}Grf$nDZ~%ES+E7JU?|ej+mqlEy_-8`F}6wzH-N1w@K1Sfu>?=6{+nyy==;)p0!DE2CTO7fY1@APZQ+3PWNYwk?v zno+qCQlk_-!@7~aI)4XBVJesNKU>(a$SdKzFhCZpPVsDh+e0b_F*a^eGOUK7xJax- ziXH*j369iD%p$pkFScVj3fCpf7iVA%+Ji|KUi(O0zOCTR@Z`H3Zk+S&7TjKIQ_#V3 zhV!fZ7MDTXHiFpE&%^@hUxJ*=qc#aoOyH2fWc!lleULgqt&NO zss?A$w1Yh4%Q6*E`4iyuBg6WCuNgiM1fcVK_ zo4WcmG6o>vQq9ys(u#xVT5Kz%rcazXz zABuslmAKGSlvy!}`Hbg_NlY9Q5kH}zTV}fo7b=m30x6T|L}IWS?DV9Av?F;~W@E{R zk$fWKDf{~2Kq-~7csHw$Bx1m1xyDa?!aIeIfBm$n>N|D!e~5R1iVRzulYw`FpbbXk zfL(@gHeLX;I93B6wABeFkdcfy-O4KeJ5JV2t2@G+n=X*t$%MeAvUvL4%iN&S%+;gP z-K2MAYhMsRVT47T$9&YEr2yTpq;SJimz;f;*1B2{60D6?^906w0gopg)l``M+D^Ui zhpN8xF}}E+RMA6_UPr2yT11vhL{)vLibfDloAUZzPS?{$as40(#hanc@@MK)qx^nw zRy1ktR2fdcZkXV9tl-m2Y{540=|vcjfRHWcN*o_)oF05X{?BXNwNR}yFoUE#{=_iW z{73wRV+}Mr0*G|y&{|2m4a^AdJDnOJ>GbYZ{WL8=k}p(J%P61Z!#Vk@lfRI%1o2RI z8z&S3&Uj<1k4vFJxofl(dkfMac_#+uPK?4oxSwP-Xb+Qgf1AVcO!K*>pNrjV zgMe+wG~H5JcO;7(xy#@1yhEl+a1^9NC!xV>t?{D51S)W3p@E1w?_mSay;nrpr4{wU zhFO()j=ajR&40var#v)%cK)x6%>uT|Enp#OWQf2Y9yVf|OPJI`>eLt?R6o znMksOEk%q2`*I+p-3^PfS;Q`#zPo-%fKDnaV_{A7u@x@r;mKN!ErXLI6&GE1c=~ULm%3T;viRSdgW* zU@)i{5Zy2w!bd@>LORrcLkIfh5A|MZ>uvHucnw4xPHfsmnGjhjPG|QZdA9}w*ruxa zA9~bb8}<5MooBRYjU^kBUy&*YbWdh`X}u%ATnigRFnO%=7xLK89L&a#9G*0C`V~*K zSzOz_=rr-%eSCj5g_|N4oVpNg!aV)WP~gG+k(pkQF`ka0)2t<90FEkDU?@+~RxuJ# zH82nboV=MU89F3e!|CpoTnnB2u+qM;+)9c!`PAy2ZlnfzIV( zqs1|TI(!M7v!u6dDNATJY%+k1(gIP_P5t+-yfsd~=@bneRcitkHAC}+XyGRxwJD9@ zCotWhYYaq_myL{3DFa1eT_`j+LZe3Vn9j}&3b@t)^Oz*24TCf$z3-_%$m+H+vJ)9x z@R)1uBS|Khc$k(WAfT_)jCvODEYf__LWZH48da}WdM->(X_yx-s!04DoOfBgoUR@u-@X^c!<%qd@rSCP zEb143k^k=3hlUjfGy<8M^npbWVESK`Fm_Avh*%jn*$S-Js;CA{Ij+ z&<`!FzFseMEBL3A*+aB=0uZ9CmCwCwZjQN~E83SnN^1qjh$A9iDMLaB^27O$IK*n` z!`>KVdoyFzV-FExHGsVt)nBg!>MTFiD4!flR|k15rHd2%JtT0z_0Qyn75%00Jj%>t zge?rTqE3l20L(qP(}eb8d;KQdSNysujA}Ikm$2g{NuVDjs@F?O5EJzYD=PEwGm(Hl z;xcmRr3-|vF(ldXJlrR5l(H&()Yh(Pwd&kX2vBinu=hyb{0ebcLJybQnT8`Y4`jH` zWZ5Gf1|afj1cCYI2Zpl!~glLPfF=v743(r zQDgg=(GNKb4P{k|ktXnF8!B-2$aRL&74nx(+eJtm3@Y-@6}OYR#1Iid>qc8XJg*d@ zqfC*WjmYN=AEXDks{aAUhO@#kL{ZV+KK&Zc@=9_t2FCft${dhl1adj zR|eOzXF$FUxIAeJVg3l3&i0V$LU^0L{$StrujyT)8hU8*>0!yeyv3ALqa8{IT^Stm zcz`$wse)A+-Y4WqiK}igz~NWulgEKF^o*S7yg;ocHx#ci>zQVX)s2ed9V%IiEV55ln0qzWgHX}alexS<2B-E`RA*y$(6Eat0-ti~CtI~B z>As5@4~{k5ZDHgV!I7wqh23D3qcX9C?r2CFxqW0Emxa7t z2ozjXMiESZB|Jb=AaW*cH@P#zNuv1B>UG9BCbC6XZo;4LeLky#YXzE&y50K7JH%)r zG+cfkcbo7nI57EV@~WipQR(7_JQjYBaKBdseK+Rmgw`675Fu|?+9*t-qa*_A$*ekO zTB!8GVNU5@D`+Clku2<5t8i1N)TU58dUC`qBkKtqv+nr_Lg1BJlFW%kNs(4%=N0_T zMl#MEOrumGl|vtW9!LAq!mzsdHfEVOce}dj^hfFB5#KEE%cUrAB=jUz>uK7nN*ny4&!T z4hqW+=5PC~KJ+{%Imkje#@}))CpA)G`0H_KjxbgYAY{JhsTNH_N~!7&3@bl~6ck|z zsSZXk{br@Mb238wjgNx;d44*Ia2_{*2(v$aL;BcBXX4MGGNwADuGxy4%523RABc!i%HfYdL!w4&K zat!NXmGsUiE}rlfEZPaObp$86E=E=QKQKKbTqIo_8iI+W9SIeltjL|@wuuhvDc&QRm_Kh|XsND-d|GM8G7xl5wT^ z6%O#bx4^>qD>dyn%M{`qjk&0P2+ij52n-j@{jhDviLvpXGA_M_ds#ujRwv#rc5=zH z+A2AT4`cS3d;~E0BKSPZ8bV~k9ptR27EUJs6sXxk_%dSb#nn?e`KnpwMlXkup!c2H zwAa!F9GqEchlY(UOp;~E>jq_-kJBv<+VWFK*$K6Y9TDWOIQh0DIJR;Amxf}uARBrC z(FknjUL((t->?{o5PXIyUffA!!1(RXIKe!q>ybse^B%x9`4$*-lx+N5e5urLpsNm_ zf@b74imyl7W;~w zq*h(>b9X~OO=4z9!#f zbLCLk5DVbKu5B3vGg~}g^%NgELU=UQM0S2zXf+I9OBgd6cKQ`gI-6$X;N-XXZ#Yjl zCz4B(!YJYUN)7$w3Z>!uhK=;k*PrfSW6`sJOg~#ykas%rK#WUphF*@ z+)0s#@pmv?hT5)|J}Jyy<&!tpL=%O<-va6J`+aS~CuJO<9|{y^F7Y-DEiHU3M?m^i z;ho^Gyikt6R{h;nAXE~Vq>SUx3>Eyl6Dl2?2Ye3}gidZh=>~)~9DhC8n8T47y(wVO-S$@Qa8!d`Bo$+q&fo9S<0x-ex&WO z=Q_s(O(VjSMa}LmE24q-g4oy!SC;nx?bT3ZVl1sgN)q@cQcist}(Kn8BQ$7%^J!qnF$(#?S4x;0raOOl^+sm(UEo4;&d^;j6 z{M~TzK#BFTa+6QjJJH;HrB=T|&_I-@D7iOjq#IaVzUmEzKMFc#svZ&zVQ1_{zRip_ z3P%S&`H^QvtF|yNmaiMSP9zzC0yZRI5mKnt^#(3#z1>2OCx#q=;6QS2g6r^e)})a5 z*QI#gdqo()k;*IxD1A^*7s6=?!B!FVP#Q#B4q0#mr=eE}e!>JX4!yROha954Y~zMb z^79|}*w$1y-{Ke8S9<3Bk=;><_-dGLfqj4^8hO7| zexNgJG-My8rVVI2&DnLgaEu~A$x+u=OQ9QjBZ*$l24BuS zaE{Pii2EO);gvJfZ1=sVCsh$l3}fq*?MdAxVkMu<^CHQnFp!n#8V#ci zlt}2Xbde!0Ate|$*Vny`BWa4AO+ExOG>;hcRd>M0b+3G*4$T$i)@^a#4H+pXz&Dd( z7uUdEaFF`*QAoTS{(_}eIVZ?Yt6nj&QH@z$8N`a0Wi`7xol*<@Xsh} z9AIviD-K@!xG^~YS46>;JQtm$#q1nGdQJI@#4L;<|3h_`{t(j>A_PrVa~_vAZ)Rz0O1j zBSjM6*x--zACu9FdiuTO@V6qU2c3aGuuU6RKhsSypUX05lxYdijvk@FF(VHM)L}CW z1;b|=9^Mv_QRcU)jL;8l_yURJ$nMx_A;|d>#U=mJ1?#2i9vi-fOqI+ZAhVAg^;aJ7 zidRA@2?N&BUg|az8WTaLhK*2M5n?c(rmF*n8Hr*)1Q&Y_Mrj930`xpGFwB?~-&BGK z_L{)5irzEK|0LRLyKn0voQ0V7fW>#|K->lJ&?n4Ew5+XirotXY3o%NPP7M-IR^4#n*-#(-g# z`W89HP_0z&jU1lI_<1~5b)MRR90rKhOMZO*HMNkM4KhEBO)0d=-!H71=2toyqXj40 zPb!HlG+!iu7;vc4Y0z#253lxSMJz1(K5aEwtOy=Nk?qx9kOoyFf5RA+j>2f2G0L`H z5$-|E#xHGO^@beRNki45KS&qlCH}p$DZ`-ltpaaHz1q40m?jR=R4oz_!czQ$f+M4wovHXuzqDvL0$jnc#9wSsF!1+m_rLPuoi!V2HHGVDHyDe6V{^QfI(BiNp0aa`K90a`NxQ<|g}17FMFsH+tjv z;B3mg=2&QGBb|^!^7M6s_w2KYu0>B8XiDDH5bVB%8mS|LtPX3agw)9)>6>j4(&@Vkpo_gVe{~0~^ z+Q_!T>g>aUCCJCNHwnhy{zSf2;5VfdI1Dz^6K+)E#oP2|5SHuMa&HitpE56m4}$j5 zE;E;dMC{0AYxZzfIY|@6EEm5(z*_hj&(JW+^M zu!5s-Dz{mH5V&9WNp5U<703!A7{_5*JW~oY|EkcMa{LG#N`Z?O*DGy8Ad<20M9NAu z2d{ei;)Wt6oN1A+2@rN>(njv&Vh~Ie&+3HdbJ1HKr>MAYZq&(|uSNw7oQTH2F4{&K z!)U1p5$o@97l!B`#t7l(MrH67drPp;1jEJi&T>0>GZR~64|yPDD=9q!G4#Ybss)uQ z&hQpVW|F0KNL(}OLM_f?O5xR}dJ-;-6dvn~STCHsFj7sVx0NZlmN&nGd4%tb8wTCY zzzt5`2w^>uZn%^p6uF4XqeRYBvSgT6h7AV`cDL-zpr;tPvQHJg# zl~?Zd=CH|_17dOE3_SU63XEZRwbnAMbs7E}Zxcc;h^_O~FtLC;0S@~?=~_FBq5+V~ z3$2BFmZc;*pMAK;X)b-p9LHCj+QBBaj91WMEIxcBBzu0 zBRy#HYB^GX-B^R87}UlQPySNO0||nL6Z%J!bW&%IwqHYQ$3^Ro7fZjxts#A~6*;b{5uN8 zY~VJNO5FxPEDyHExM(Ce=&7O)WQAT{^8z+ek7Rnhxn|rfIR|~%wN?U-&yb8}yY6O) z?9DbU!bEQ z9)@e{GwZ|(jG7ESZ$Js>WXBo5hrDXr>~_GQQ;{Zz#XZt%ag~Xb9QiBX&e7xQqoy>O zs`@BfF|xr6$}#Fi+6Ol9R+ENUwA@3k5Q(boC$jH<$ha+~z3_U;tba%?0C9x5B8)CI z`C1r&%)sULz%@_p!}N$fL>j2!>CiLm&y;`CqV9X(zFbQ*TYFB<8SuX%1viRh*mO@G zuscl#cMP&<5_ME&0{#8^RRVx1f}#u!YcNX;!3}7Jiz`9UVm*&D5@(YX?4}0hnhbzN zMRjBz<9pPjxx0JGPWpfUI1?+Nj72e6yO(PCnHmRpFtW%Tf+-(B0}A2$<tc`X#II58tI3wU00-hyY4Uv43@x!4Tw>O>{gtlTh%o$93=*n;XT->VNd zTp3vm3}3N`62~x-JOt)jahL5x-X+vM@I7LAK?tD$^Nh8(365NWYN_`N=SaG<#bw90 zWL-+pTa|cyCU5F})vH5;F}?#gU!HfhV-yphdtL*=y=hckxad8DO1)@E23L-DLCM*E z^4gYHjJm~C$>9oyvLaq|RhR^M{c6ZNbXU|~qin8R-Ln$ba5GU#9Eg^{)cxkRM;=2( zAR6v}$cwqW2<=8T0EC$!mksZ?rid7M5$JG)A`w$Xw7IvCGzGqP91mB(6w9m6y(xEIG;PuS5UP>q=Bz3a!va)38Jeh zNr64dm%C9GGi+oIEk{CuSFlVC)-nn4oM8_xM~=UGZQGzTpb{03aLTv*tR+V*MOU2qU7MqONM zXc9$+mp@^*2`!u(`OQjH=ipi(F0g?oQsjMA0S7o#^0}?5Ur>V9bh3>325zs6gsB5r zLpM@+IE=1-EHgb5lsfr=0qK>FU#hc?EccG*Npev#7zS$*HaaRHi7Yp zY#b&28P~a&xZy2n_L_Bn9Hx^u_!J1xyA_3#CMINUif?+a@2MO@)aGL}|HE>B4OqjO z?)#adu_`mJ>i_SU1hf7B{mrENt9dAro~ED;`GgSk{sQ*$QapE+yG}P zNvjj_77%=RHP95)hZGg~mE6*Fb!xsIIudiYTNi8@2}G_9Nr= zK#2s{BV{~F!rV&|RfNhjqe`wxlWbCM-7ybXtMd2n~f}tt@#1H_|J77ol4& z8&xMTz>{98C_Th(@Edl*4^d68l1J#|M!_=>T|`EDctgHP?iH*SvE!3Blq>Ynmon#* z#0+klnk=*!A~PIzd=JR-M_LiWbW8jBs?I~q#m0+!TDen)z7nj zUm|6{pd08)*02l7!cAao37m|xZ7Cv@n+HduKcWw3D)hUucB)Zr#zCRWEJiLL4O4Q5 zKMh2ZJYX+!7iPIh6M&K`_NsCta^*NjX=@x&6*md&fibxrWf&brpe&~S4C6Jfg*i^O z0tOEb9sCEP{Hfm1g_oWNX&@!My;15C;ELz-%|U8qTR41TDPWTN=^%8SMfwi-+fuOK zexbmshq#A#?P~n!^c6h4B^9$<66|hpuS{9vm2rACQA<|Xg|?nx zw~=EEb{XhzB8kmZ0sR$<4iGx=7nJmid^S|raO=1g1O^e=DsxrC>s3ksO0R{gD-va< zjsYBfB!2}~i9@_F4Gv00D56~9uiw+g+>Vd|^5~;N0(Xm~xdQ7%0XVR|T>gG@+r)qX zG&mL44F4&?W)dX-!p2Ag7;vqge;CY)(L%xzK)BVzJql=qHPD4} z`f#H}UK>j-$OB&sOe1^JU*IZmk}3PJ@c$^uZR&xKnt*9srlHr<;0Qg7%$225@CLa@ zrb9$?ABfcppW}ZoU*<^c7=y9Dp;I|f*}%9`O*7O@)Hs_jftjd~jCXSJQ6!i67o;U5 z!qIx()BvTRG_VA%!mj5042|>U$|d(~5b0445$)kmYuBz2femU8lTInp`5lab>W-Bf z)(V4x^9^V;92V;x#da+!g}eN6QKRZ6W^?ta_IB9kCr_ z(%J^tq@RF(Eq$Ol)NY}!o@i*|mNX@laL8-OB_P&-{bw$kWGZm+4Dnyjt0ub05)=?{ zv*<66Am5}1M0EAhw@OtNJZ7oB(4JLPjvbk$c3*XA#CbW9P_&)h7 zsc{pQm4_c1y?0X~CAUFu3a{$)H(1ijg{VQMR>UGx)4*!TFZf^OkGSs=w*xH>_;UrN zCc0>2mSDwe*;)CSpS}KDhOfsO9PS!KcI*M1*u>wjXMO!q6Xqwng~fAG>H|YRj4DZ6 zPJAOeCE)6b!b`;E*+H7v#@$Zzb0=cx3dslKm?5#D@RfB@*k1H#xOe`N5yJCQgcTjO zVmk;Dl+-cAp%HfD3XY|f3{v5scRmI8PPw@bG?;|W?0f^%nJt<+nSnQZiFN3mer%)a zn%O}#4JYKomcMy9R*4!T>_(WjS50@|hna1HHk27iM-`NBb>e!XQ;a*!;52PV5EY9} zk;V)xZVKY``b#gFsTsM9RICd_U>ob?0SHKY7tmy@)aa@A8Wu-5H@5iSV9L1L)1#2R z!P~^mo`wnw?8&g2ME_Ex3Pmc!QMPnA{fZY1pXYl<8=xd5W2n-1U};=J>JDdyN9wZu>EMdAjP%x1C0ncJG04Rfz)#0va zUKx5Rq{jnDGrK!Oj5lDMcyK0ZM<^au6AGQ4e~?Il59oUyjrscoYFVW{{*spr@=GKE zK$`N6)87eq4r~bVPc+UJNyy7xobGHNqVa!x`bDny-siLhuroX``nTy|Bbg*61LZrP z!r!cgyOIje3#BX9HJopOsix|D9!=%6!Of&p+Ao8nCe^a@SII?!i%@+9_rue~grfJ2 zA|z;omJHr5!pVgDjs)GwyO9Fm&N>KYrcr~Y@u{AmW?5uh?X{^2Sh!Xt7n#SeYif~TNl;W*%U z*EBRcQbsM7>;IHg#NdtJV9qLmQf-;qJI!{EQy+x3#&#V!IBXOF$-ZDFGj#pH13D2c z?w|3u;i}O2IMb(Lkm(6WxXG3q(S-jBz@jxmunGqKPQOwu_{I6}N3nS`$v!(Pb!n|u?Ta>1@ft+w%mY9)iTrx78Y$sR$7{LG zdt)(@Nhb3$leW-6b@Dx~Y^$4BG9W4#t>untWIK^~a4~!elR=#xOva|dqY7&tE6Y>N z%RVA!wHc*VuX2Bm1TbIbY{)qzLhw)6b3P+XAPM(tPqC}Z9d9aI;(;G&%OLJkY_At* zDqu&1RFDq$(zUY9dm(cx>=?m(bl?(#ewCdBB2SucGq1^Dx$Rf5dXMrVE~nwoEMnYI ztL5|3)8U5y8o!T(fwC?g4@A2INiYeg`|3)jHrxaOG)J;wIyUcI^DI}lH`ckea#;Cf zY~P*80*?!PjJ@}+6g9~CKc9Ff`Mt@;FRHC zI?%oA(KbCKs-;~J!~&M|A~>Y-WJ!;9mOQT zy@>?$G?t!oTtepOFWi?@gx*y}3qg;#MF!e7GOkQ2$Y0U%Q*LE~+NDF!4=})_jXzLq z2}ln+9;MI(VAj?$|GbtZIV~H?483*Nf(43%KdL`@V2$J*<=WuDw3;IC^c+Ljy zsH1lAWdk#chEmso4z|?fGvnh?x`;_d1y9KDO_G9FCQ9m&9OhSX^#Rd8by)oD`> zOGg&Da5tjTX1t#Yb#yWyJPiuuo7CmOEU+`lj#2tL&dnQCi`071pEMj4ZhAI!L2ck& zRv!CE;n=5BpKp*rZe4i>fv3F&(<eKpBs z{>yoEx=1kLS`lR|^*IBCBny5^#0&{{nJ9x))wt6blc9MTYtpRK%3jtrTwcyb5FfwC z=_1}-2S~iNlhHrMAB^&M|L6B_M~VsjaJeO&yL|QBGg*pLl@t}EZHoNa93e@G4!n?s z;LGnu8X7Kp4HNQ4hf7EFtHe2P-~v}~?xZc<1~y|)rd84!E|yKj5J^;T8P>KG*GvH} zp&XY^#pp=GlZo0T@p~jH6ypp@xnO%9W4CQg?ec!;C76opIfibE;=r3(Q3|XfXv8(Z z{Sst#`=?(q{4C?4jk3Jz0ei@Z_caLYv1QqU?w2#!vq;B`SP*tUjArWO^LKm_?!U+d zq3w-_-+&w?%t{n^pAXSVnabbYBkcsdtRHyx8!Z&K)S|A3u7-Yc9Q1bS#Du6)@=5eD zFR850JVV@}B1?d7k?@T!v;goYxCgxF=o<|do8&V8t{1Q)r2&?V=pCqg<(1rN#NmvM zDzEvYWebf%Yn}?l>+m{B3kD%;E3=9lyXUu|e3$YHI195EuXC2Ju)n#nv8y6a104Oy zQZl`mm3wPzYXlwXOM4kWg9l9@A)}jHdn`2u)7mH*xzlmGEEy<|4GdqgEBO}(oJN*!dycr5R%CM3_;Hbw{4!1&;cM=MoMXjE^ z;>%zN=sDxPS-$SAK-59^x52CLpFXVx*TXz|ApWQhfY);VzF%WN$C%~4l41C5Dl|^- zW^_SnXu$iaitSbwC%q`Lqy6I~1t``bsL_TimDUdYrim1^M?e{VKE0YC+2Ntq5;steG!Dd!+=5K}{ zBkpCGB)Rdr>_zz};;C`v43;3Jkmbd>=YKj^9$cf?%O^jX4t*w_rG}x429w{wS>R0w zSN0j_gxd9&A5l07*D9e$WH5J|k$8Ru6*)+2Pjuc{-2Y$#M`4E{q&?+x!_*{kfp#3d z2N7UMzQYRUvO%~F)=5&ZA2PO4t(U%c!op1)@Ftn+23=kE89c8$X z7beDe@%$z31v_Q(|2PxM&9sU+8cy=xNODNc5-e93h0?OPP<dcIL2y)|J-V!tfe*5C7*~tvNl~Cs0nj}#i^rv9{(;5htnKB~Lrz$}HfbMb;> zb*vFnHw|J5ohwp58%0hoxCW7U?_5$onNLza`Z5qg3?8i~5qtWke|rHMHYZ*!Ep&`$YjB|w8Od)W0BIUvunanbCf$8|(eyCC3qo1D7 z;G7{P%urp0(|}N&ydlf1ok=FZ@=q;jJ3L6RdEx_uh7yUPs~N6H2ZWp4IFv9wQ_X}b zm&^`9$A?fk(eXJ^npdbX{_?YH}~7pR!Be3z84 zmG#MLjS;C-Cx2ZSZ&$h%`jVJlaD>az-dbf}77o(6Y)n4?yaAwkREqUSQ0R>r98@M} zaI_n4Y-#LVZe&u>Cu2`owgeI`y%zLYOJRq&9wVp3&B**th@BEM#GzX3*YBbC>4 zdFaq}E*frLhU~|g@q0>Ej)sS`Okn-l>wi>;61WH3%vp|riSmz4Ia+*NM83dH`KPX= zN+a*W1y5IzV5Je$3&3sM%jG~Fz=0uVN4-ou?#)IXklm)xXHW5o+i=f$Ya>7+QdF?r z`#Oq?VL;uux6D(s-$dQ>!jz_;)pj(eJWWQWJr1bFCg5O+%&@`of?qzN+l-)d?sN?ceP;--Gt!4czD_K$MX!zIbGhHv}srH-ONN*(s zP23)eHDA^dZBE{l63K-3bbsyI2E(p;)F=Vf``x?#4izf!C_5Xj9+rAaA4fR1f*qZH z`Sf)WuQd@yQD*b1u4|a+#!8`vk>0mG^Hyq2SHj6og+F8^oaYcQ#1pP% z+6UI)$MYiRZ$voZxpTi_8dc&F5Xtc)ao2#O(8D_jD+{@+;yB3aSD3@f53=X^Va!x*K7kT$PXylVPs5Wp`H6iuB2*Iz6~{h@)UE(=2`)YR zoTu$V!^)I$7okQHF*sRhYj;hzI7$fC5iM|4NP$9gtdzlEFp3FF1`BF9b(9wmA0c5z z+i-r}oP4Lm5mrlid|a%)S~lP&{kx{A@ZNF=M1u`9H1IYvmvDPpn+^QJPpv5V^UoSF zoog$6xjIZ#DA579P>kyQYl#Q2x2&y=snP8C<%9b?*|vBlM&5^5y!H2t|XxhkPz}&!izC zlwoEKvK+%);&j`k>F#7Ea|AKsuxCIA;n)FXgy&Yq&RnL%+CsgMaobv19mcUAyM@n*D6c1!%&CGR9!VqTPK;Yt;0(5((F9Cy&~y z6@MK^`8Q)xlH4c_G!T7fA41L2jn#;A2q;{i#YNplNH@CHW;DCqakvLk9%Sxe=*uMK z+>_r0#(-@2rDYi0W1>|7or$F%abPH?_wWvq6dlz2U|tDFb4zk zYf`45XA9f`1rf~cF5#Z-Xsv)LFZ=@EgB4wNH#)3{#VP(ly@mg}lOY&f%&fpVeCnJV zsYzew-!ln}=(f+zpj?$ACqL^emk2PtlYDp^Nn}1>%WP`ugTdfaxds3x5MhIo89Wrm z4<8FpHx4q#$+!tt!pjdl3=DC!TrfQ_LmhbG^OT?u5maZ6@KNZ=#@%CkmsV)5D|g2>L0ZFIO>xC6#Q2oh-dZ%pf5; znz12@cYd8zN4&x-&#~csib=pq!z2Sepyrx0EN6lgym9ab6`K^d2^Ek23*bs*4{Sjn zwSORsPPBi5*^7Qy{53)tW+R`8{&jV4e`Kz{Fy-i zUqu5MX=xuHAA5;32`VO*Wo+)Yq8&p z=ZJZ9%M@Kbl$SE2a4I22f>&8Fgq8Yg7{bqG8w#G(z~!QeHt!aURIp|*Rk(UD3L>!! zL_D&oIXg8gZ!-#bIWNB99VMag0Tqy1e2^Nl!J5BACEDA9NH#s_{%KFGygeXn7(awQ z88j-4Q}m@vvu>p=e$babN9-b@i-q@@bW8_MWI-a+3fD0c5=0BW;c=~HEOIX>jgV;T zeL_}rB$=VylW!tIl96f@z(*?Emh#0Q929||2q$*BVKBf{l52!G*KM9&8MJ3Z0ArNi zeLjHx2O$d4zgPdJeBcL2$%u0R!}92;7(2#bbB+(;>Q7zF{EXgO#$lo|*SJ z4*m&YgOn~?rPN`e!FNBO( zVIHq{*?QdDjcY$5rTrPvp*OB=@*gi>{FGPAf)~g>K6jk1E2{opJ`g^UEf^h1 z4I8}mq#xR~;tFu1Tb%^B-Egqs(^l4( zZz!t92|8DT-<3>DHZOz{k?E!!pk$`?VF7S_h-VaD7#7exFMj6y2nnY20)yc<*nuRu z7fV_VdU{}M_%oQDgz_Pr@Lr6XYm;DBKg%c3ttGR8;Fc|t`xvR_e5l;bHil*{(7sQ` z&izIB(L>&1h*m!W@zKWr3bxajx;;yw2Q4h)Zb>Ne(c?fhvb2?DqJ{QLaBzb7oezmj z_jJ>4Y?OK3%t3qC^8g2&Kx)9S{aIE zoSyVn!4D77Mr>SG7C}2cQ%c6WHNe2QWJJxpEw{mxqX`@8=<B)^^39zj2_)DUqNQSCGq0h^6*8YTr$Op~FG+>T6x!UM zY4sJ|peBtY1gkkY8mgsE2p!XED|prq^Eosy5hy94MvEK^ggL=bIm(iqa&=snhtA_g zZvv!bH0Ykd?@(;)hu|&dr#Jyd4(ir3-pNP?OtK>Hd*hkU40at}xD@{E&04}$TO1M6 z$j!ImI}i&;#|KeVpI$1i4i%tLh=C2&kpb+XI3mPm)X?oHG_E^{3vQ5`i0qzB=tE^w zlb`8LNydJis;HNW-uqnp$(u4Py0}LvqjUDu0qqEP!K=y_qPrl$WVApF7~<*)5-zQM z{(u8hVmEQ|IKRaxpZfwIqbuA~FIT!fTQBIKd385nqa_d<)g5l{GZ(xIyad|%=iFyO$(}x(lbLQ1^e=!HN>%i{68Dt)z_764*(q%Xsef5aM~h7z+?USzJMb1n z!X>DA_`Tg|P=tAj%u$meerCp$3cahLpe=)iW(>O<13%!E^I~Bs=&rKYIfFoU&BYZ;~C#x=d3 zqJXCQ=u2EL+`NBCn65Kt-a9S7U1Uez9H(WyIvMeK)LzLpKmF?I*R_^2WxTf$!sRnq zk=V^)R6X?@1%1`uEETsYYkCb6JJJ<37{;1D%Ml8t*i?#+A0}vo7}30WFWa_&E2VQ_ zNs+CS_vL8;|FEv$5#V&;r!eKpxeC&~wz|#jS-yUaxb8*K4IJGlH20G=Zu{Y;unJHG zTi&612p%EagRVd_+9S0kk6snNfQX!+=mPrmDKkV(xa8Poi^+rO}y2(3V1MEV0S zmI;z;(W+yCKHN%1^pnG6=UwOv1bEKE4AOAFoZSXPRAV|9s9N+;7B6MvxGdqTLv z`3cqkU~v5ObN5<;(MK)}b2W0T`R~Q8FJn#FQ`2x(n$drK`VEV8uzz-I2W^_}mVU1E zP{FSpkX?Vg^SI_G1^>0{${PE0W^lqBXOkT7!~Fm80tO|!7rK$o1H_(3TLDS&`EAm_ zenV=s(EvLNxe{V8H&}S-;ZOua;>7)}9D|qoz@x-5;9rQ_2UxIxqtB-clm&Yb(>vfd%2r9;s)VQ z04-9DEY4IpvU6=iF;uP?Mr`f?lWf$kx1TZF?W?PXZV$Of#sC6hgtJa39r7VWlh{!$ zpf}Na^D^|5j-~wuJ*=%7BqkOQx;9ng#~?sEN#P z@m6i)Yk42ZS7=zQjify0w6_?L^f*xF;>V*l1OUuht_cd!Tk{`!b?2ixo#DgW7?@qo zCFIdSJ!d#th|3I2o_^h!Wls`dF}b6?G6n#*!!{`eP>dz9F+F+5^XxlsAKcWijN0re ziC7Snf1-UQ-8%coD~ch&vq)^?lQ&FYQ48QnxAT&c(?6NNTNLuRiU!2xSI|e!4Wm%v zo!SHp&Bc8R;1IG$k%{E}{77xLV3lxoWKVf0h8Lpp27xF3ggQk2!Sy%%gK9lP#?CyV zx8-$zgC=}YH?nn|D1hpUYdQVf73M?wGZN1tT=+~TL)H|wM%&e5y5E4)Be!w>yOCFb zo;aB(fnFd<``7zO?dL{xdb;|KsJ^6V5*RepU{fcGy?6E}8ZGUDGj}S2O#N zvk&XrMVW=R0StR-&;!P4eA0avmE({PI`25nl1Erf8eP$ehX~j~s)j)hy+jV4g$270 zZ=>J&gBE8}=@>!F z1%j2NID#Jnb4Zc%wKZ@zarYqcqjwm_WbSVY8Msr!sefSPnj_<`1yW>{1d9eDYD#&%|b?Ps83rb$;cja{AwJRPU7$`Y*j3Xc-nf{WUW2Y?v2Xtb{?oeFQ@1icH zhm zu&MrgLA+wGe$}vCX7_Ht3{@%n@*`@JI@ho`y%FigE|zgO#;N>l6dpH$S}r>V{+?)) zS*mfiN|%`BeO{bKeBbkRpXi`U5Ffh4|88ae2d=S%_{jr=`qor?wr0sk5p&7&gGOOQ z*+=h-_L1F|#D{)iZvC^hdcy@sj0BA_D>wIJo_UVxPn z^?d%w5eDq|Qn_}?z2Qu3h~xSb;xcA*ahJUu^15;auRr_C-VA6^iu)X-J{L`@@yPWX ze*J2O4{*|=^bHL}2o7xVoD2m+0vf%I^~jm#kTBPNM^8J_jEg0-cHo?d+;4WQXt<05 zHWQ+=$$&F2S{6CjG^|%P)DGrH;Tykg5i1 zJWHW9UXHlL;u=6?NrYGD2GT(|JV*G z<_?sjVpy`4hgfPOAdma=q@Y8@g)TN)oVhjL1VPEhEVirB5ROXnjj#qsZQTYTKq8j_ul>KSGXxBZ)ONp{S-{A zxZugdOs2ElUDG5eD>$TKKCipXE4b*4d)#crt`%MhgFmGlh+Y+WqM!mQl;B@wp5gl1WM&|SCha0rbK}H&`J*;+sP09E zS8)NL6Mi%%(FENje`gVLLiksuOL!nSa+P{OwnNL+z{;s3-go*hB8hVy##bp8W7Lls zU0wmNsZF8;M;IaOn0E|-Qz5gLks?#`W^i9TM$OjC9OJZk=Gl;3mx%wuFB?YYm(#lR z!yqa3PQ9m^vK`>TU_jUCCs&1!ug*psBMrB_!Zm_{mlz*fa1kihlulD$K`D$JCfK8u zC4;;i1_!oJN1+0Fa7Ix7$#>wgH{cVbuZFV)p~7%DoV+2YFtXf8mSOnVJ+YQoUd;a> zx^lPsX*}bcWFE;j+U*f)y05%QU_P?LKkC?8bU7ct35`of;YzPxqiCVpl!fEExGeFNRP?PSQ6Dbc-5y1eDB`UvFGjIzNA=>Rr-Sz`p6aHVu+qfzyfgPE}E|@`H7Kd=;sVPBMnRikq7 zLBSTpXpfoe?LxEwC0yLe4@A1eA^%xVRW#njy)$gXdiuH^dtWy5=kNEDkbn+Ht^a|Q zLccI5Na)n&-;Jmzsvy36`a8z>pcpRqWn5vS%|avyS(dVd5A9lpq|B&{;d9x&4cw7q z?xJ^pc7;;h4IO^Kmpt88^oRwMSSs8H?vUtaeuZ7teICe1RXw|gY<{nUyU%gp81dEF zYfu|Vl#q|oOJK~FGM_8%qlCFo^OvG$1%?TC`T^O@u!GBdu&+2&jZ`cz?Yd+xymA>bOz$AJLraZ*=nDXVyo^N*{2-vg$(zGp|) z8EnwPHoT*rBuF;HE6ZsPDoAlW& zssw8A{5z9sX`c0`+=?$1iHft!eb5(P1s~j7(b19C`lN7az(_kAnNH zcCdzb8m_)Lerwy2!&#(CD?hCO9&vyDU3Vwn2am?a3@icbz5a^cLRt-Q-n;=3646EJ zHcH!6lla{B-+$w$xsLrSbp);e{DTBj42cgl3-}MW`r@73?N_Q(_LA@I(t($cP40iLN5F<<|A(2@;!?hi zM5Sb^{B1b?Wq!yZjM_+_3}#707XJ zS181mVpowM|w;c4gLk3AKQ(wuI>~ORik<5%Hd$~ymBU2Bet87mpCtCQGvR~S5 zShSJxUv#tBd##4uRI_-dOO&!6+AOpIrdL<M%T2(sB2RGviCMuDjL zO7u-o98E8guZlJgBEcIkB@46<8RaZ!LdHMz z__WREr!V+%Y!iL~#?UW{4mY@J{5%cZO))<*Y;Xt)@RmsTDWS(@J#=wNRw0=G#&BZX`&?N+iV?SR9g#Mz)|RStgaVydYrSa1HgXp{HM zDpx4?MyTb|o7S6moUuw@!0(%TMxpm4t>eA%o0{GB_=ECsqZZ`Vau%aX=@tzO9iZRE zPcVkdr~qCTx8Y0L;`QbVMG*V20Ac0IxqwGQDih>9Zui&q@?9!4c@ii*x$_tQOYaf9 zUm`+}>&mpQfTIJeJ`(mh*QO$Pe%x6B6!LEYz{YW%_h38RKDonWBg5-7bJ8VpTMB%O zGcBjO6`+dozjY@CRe)LMh$^T}HlYUTotr@`Y8C;un@&pFj$k7-Kyp%19|{>=g!fx; z2k+-j(|M_1nTeWR5i3WIBd1wubHQ!r*{75(LziYtMO=F+d-zyUc9p%K4lh_1ASc^v zr>IZP(Yle!_K8S=y#89#_=BxnD0Mlo#ma;ku9JEfh3Zf3(N$5zT*4NPf#TM{H?^^< z#>q-FR7P}v-QYK3v-X!1Gn^%SQ3nioQnGKq<5YqAs{B`kmf&xs~W4syxDas6L>?uvOJqK zIo&O15Q-ttgaA1VPb;$lQFu@X#{fNQnCA7`F-b7)ikclpuwH+{&DoIb7r)MFC1Ny; z!l_oidJeI!`Ev(zK8xL*JbcQ_wEAmv82AbE;PS7lEz1O-xjd|R^fJewDKzNca&*Zq zFVTvvLxo^M7~+p`jB2l2bSvqWKx#7Bb0$(?eTVUUf3xXzqKFB ztNFn()iw2PBu3eebk1xE5f}e-`Fif0AZDshukBbHP-jHTSPL3~9^@JDPMwV3EK+{u zg_4wg92XXbj(}YWq!EQfv29?{sgXf+>UQGMx+DX+!aX4fJc{W=)Y8N_gP<{taV#USHBt2${OxGW;?xOskIAu86p=ACoXK^^LILAgGdeYh>XnzT=hSDK>T&VUEp1_y4#5Vb!ru17OMF*Dp+@ED z9qx^=a&mDP0pVu?f_PAx5X$_oqN5HT`h0Mn*raJ>D=bD7;Hav?Ruf2&w=JI}_8RRO z_{&Vc@XkGSuF_&QsbHjaBW}Z@#A)RFAva!*a)-3t(x&lik%sOgK5=zleVNfC97#DT zEpRwG%d15`s%kIhE-eqswYUoUGt4KeuvO@TihvY6$<0{-i_?mS+>Jg6bOWjbdo40c zzxpU&MdA#4MXg*dxT+c=W%My(g6g@AWiw$oRP-ZiBs4*ob%VSQC=w@PqFM>k* zSOiuha8X5#c)q1uLyJhj6l1ER=9&>XW9|=~{7q_YEat<*sT7N-=N@Y&Js0IU6Zu> zE|)p!@IX9XpY5K6YgqX)`8O&}4(e8IAckX?ba%w5wS5qWM1F8TD0Y4}zp5tG=8blD z=M1PJB|a!>MoG#K4=1fTh|8w9Wzi(Tw+PgJB-_J_T6G?D1`Qxh9xH|pce!3_=j7&6 zVQ#QT8)Cg~{9d_03gTLKRoQWR0()J%pfuw=WkMa{=giyq=(!>oA;SGMCt0SeDglSu ze$X{IeA_C(Vsn|Dnpv$*=j>2mp#D}$fBet6abl;Xiy>81soPARyQg``tKom&o?UA< zBJz0+Zn0XNf1t)`#nu*879`&wFM6>I)nN!FNDeoX1}y68<*^t*Y&CeHBXDf40r(8;T$8-{)qL2eqg&;^Dy5>{E5N9RbHMuQncU)SdS1zf(v~zGl{D}+E$F{D zKBxb-WNsxQCTU}f=ueMs4iCLhG zYD>VuPF;?5vV4;IJJo6z7DSlE8?DPn9~6=*@H>BP02S!+0RV{Vi0QqJ^lbbN$AP zMfrapNTFdJ;H)D-BsNx{3(tCVcUtK?M;me1A`iGUfyLz6I`7{^=sGfGd~OY6&Rl}& z8L?2>!sItx3nQSTLJsBjO+_sW&76SKmVz>1#>rXl7epx!h=OHJy;+R*q4G|#m0H{? zT#;Y%*#*wNtdLB0>#>WJf^bBVU@3b?T6#V%cbcP`q_>HrIHBfbqA^ZgHAdAm?`7{| zM3l7OT%B~{hQH&AHSg8j+S+oS$ML8fRgfoEL3(sr_0pM^rU_z9-!TXKWqzCTBKg}^ zw0`dzT(%-}OtS`r+Bzq=h4Y}7|r1}yTL3uD?D@SDjBWXduwo1_9;1DCd{y1Ys8^;vRyMj85Yk&4f#;pGXBV7RQ|>sai;GIDpWqM zZ}mojYn0Zn&4QjF=-MO{vi?DGmMAL#J@45suM4>%r3kELIk4JHhet={`?KQ;+uJAntFkV{ zxcU~Eqmy${)Z${YI@Bd4@apkTUaNIb6tE5hpT9Y`Bu7IHLbO`(vKdAWB&IxxNKGLu zb16VOym!kGdJ$SvufqA!gz15O;)EFri=bW8hED-?D1YYR(Ei2l2Ifm~Iu*trI68Op znVgVgGO;p}hI2C1iM3S)m++*xiREp$-SlyR;edCRP!=9S!LJ+_OeH8~b6wQLI)hu% z9K7jolqxG5uL`YO7$$Fh_T}elcd0pGhr<0SeRZp4bw$(yc|k#^A`q7XY7oz7zLwK* z)#FjcHeiX4@RI#yvxgg~&~o6Nu!o?aNK)nx(IJQICE|XfwJxB9q%{H%_;~Wo3P(uU z&%@g|D$sGSd#HYX`5od#SW;TY(ZGBJpL&(pSZrw(IJPtnIx_t#b+Tag`qmK?%ZWSO zzP8e;!c&Y2+n278$z#`?n--pVy>f7Dt%Ox=7gV1t(zYD*hnU(%bLCQh(Dd@v7xjG* zIJvy_e!wx5C;MH!%`jEnnUl(mxYaz1t0+>||GMYZ7gDhTs*U6c&*RbXoR?IFU&|wE zV&DDAnZ|tjx!gS>r96;xn2pMcod@~vS6+FgNlnT*PSO$N8rENA8jLU);lBBm2@J0z z_eYZdEj-A2?Psl-qegclh=LpDhdB|Mond<)4WfMxn>v2eo3u8U(;#t$ij1u_JqATH zcS#7+vs53?Ex^~)+@8cT&EWz8QnR86xwl4@HT2z;NcE7$Q=$eQ8{5|fd!I`rcA{me zJO(Szx4|!3eZpxps?uNS7W?VkgoAD%anGBM^USM{k}=EkBDVyAk=zelaFnFfv_AQ` zqB3fjQf2*p!sT;b8`jrW*`d~9EBc^u(~M*(UZ7q6mE%+)H-OV@q9aeL%5I_KC8*tkB^@gUEW z51{BJ&8`n2d~;v~jaUwe58(u7#XrAvh5#9SG`UL3?;VwMbBQ=I7eHT@BP*AKu!n-{ zo&Y(iWg^8UB?2%0<>EKFH^X&L%*5MRtzI_%hyYGy?GVE&{Y1K@UOGe}%2o8hR_tY- z87pNeQA{eSjxv136z@Ywo}4JEwUkh4+Z<{EiEC}8cYOK>*0W%;+DWv5?jHSnO03Tk zzSVSCxT|#zPseZQ-{_hyZoPk8Sh;_hVFk+H z_%;5g#c)#<)y-3;4Bl=FdTd(&fdeD&e z1YmzvdKum+)&3@!D5AjC0Eke(8L3x%g;IvBg_nFTWwQ9BP#$obeQ|1K#G^lfWdgEsDDm+vN#wqAyT5%*HsV<5cpgM*&m)#aR zZLaut-^q!G*`aGP|E0b=9yZL+nuu4jn}|YghmS$T6h{>sRHBAUu5VFn!mHU(l^c z!VR)KtfpgG$g>V3E7Tk<;sL*(ByQP~1Y=P|1mU#6K2Dkn z-VYC3s7H__O^R9fTK1M|PPLXqgi*d)#+`Wc!L8eE)#+O?rRf zvsKvow11vFHc~_h=yLqIRcZ~;G(~=Gxf8~36)x-K%>2E(WkV9m zIOrB4gu;)s)g_pmJkmmf_lA^18!8E|Fi}g3IB2=G7=_lo%!5|LCvvmU!dcUTiWaTy zFIj&R-(+RN(2uI`FvgyypFCY5+DJ*AN3>@+LWW}Q6kgQkIq9_YK;|eul3TQP=0Gy) zev)ZA7E7?s!j1JPscXptP;`oQ&kP+9Kit$|K3pvsCAaV-VI1SU$c7{pk>&X9UbNHe zau;(g)1H>bxwV#bFn_RRTV}9VlI85okdmF_NyY*-$n&+Xh(w$s2UMVB!eO&*)`{H3 zIXv?!aN6;RC?s;g+CJs;a>R4e{fMdh-XS%J-9^B0zMK^}z47Rma|Jn!JJ!Lg$Hz7y z$8Uu9Y3jc2)N@gX8RDWKe(Lw~CuE9o9@?rhd$r?3JkG(}h;SFhl6jZU4GwUq=TZ(Q z<@8+qKR+fb#2iK?^yAE^!Z1;Agw)Bw=oT4hGxYOPBsdU2HJUh@YKpZr;c8J@jV+m( z(Q$qL^SN7i>ztvP+;GKY<1#1}K?E8?2puvqhiPUJ;jK|=TJ%P0TEa?SHS2RN>aU{P zi-)_YkCJLR{Y4d7)}Uum6KJ zh;zkq^%;9>shGGMG^I{)Mu)~o5jobw^ry2=Nj=AMX{YD}nmi@y zbnZmHKx$}fAN_^?t%z>)37jr{-E%xi)ctT zDn2vUt(`=Aw%pFSRtTu;1|LOLEKRSH=LYFfU8~_6sFhI+mc0$iIo>T3m12PbN|6!; zkJ8HiP=)G0&O-u8K$&@;CvqlN1znEc(5jw3eMb98_|;q?Ou!UDT+nCC6AGwoxhz94Jm<+a$fRd7R%Wtz}NmU zf2_VRxo3`$d8yGzch*{>oNGC+)DYF)*^PZz5tF7E7OCwGpvZ9`6 z3J$s;P|HQL$r?WTGsm2ZA`vpO&ZwAV4Sjhn>6VZS`cnx`vI7gxLr@P=5Y-ZLF?Nvm zQY0||HE0mF71dKDsqE}ZE=QFnxHX+3gKKT5EC;Tcjdto=fTwZ^+yj89IT&bB?cocL z^xxaoD$i6bS^s?XdMY(U-NfqP`C8_(%U28!5;bX!hm*y3*nsSKb#nZpZWZl(;yX(X zKh5jj$n_IhY1*gx_4^i5xPHr6SsxZV!rRh99pG@t!)&Pp0Ldu3p47OKHY65=O=0S3SY4?o$RkuT)s>&Fg>pT`l;N?|TrTzG@QRkC+;m7Q<=}xKZVa;7VTcX3C-Tjbz_A zB!5HN57O@9=Q+?BmR`P^?i}tFng0YW=|O@2t%?)C*t-K{xc*+0fiz@wumI zc6d7Ka{BS*>oqdzyJk-`KGRok+_6yj9~;RYt9f z_|a=cTR(2dBo)d)m&iX5{58{f`bR|~oQCd5z7lbkK%m=YQ0xp1il)hjkF2Bkm*x;& z>|Beox5{YP1;4L_bXI5cB`_!Tw<;aC^PlR2)M@YadUai!!L1HupHNy0~PuJ=B9EvXUy zqXxAiVCJ^wc-9xbl=FAIEB9zSVtz{18ydi42y%`h3+Sk|9N{v1L2FTeiE&9K%acD) z`CDRBTJUthp0>BQ+`4x>hER+v(T$yxp%u--_nHX!nU@2;c*!h#SSohSVIVTrI zF~@H=69q=|j)v%QpqkEJx5UjM$C=-H*2#-iI`Fw?u7t3Z;}4bP#-RoMtzy6W-sbq@ z?Izz8P)!QJJepKa0<>W$G7Bb1qXK`YEoV)02pt*mgg^7e1C0eZyZ`^oJ+o?cB zaD-h8`L%T3>Jdt3=8`~=t-v`sV=|bvTyi8*kHlFNSIQ1HE3|7BsC#o9|JS7!=U@@8KAGQV>?>N#XHP$GDKCg5BC+9T4eSw}a_NOhT6$$_XZ zlI}$z3PVuf0k)om!lf6ZNYKYOuO=ETsDP#<-}T0fKR6gT`l)(LmC&NXbd&vBQx~l~ zb>iJ-X{`h>FRc1PX&UA1KpsypvG`)RWBilx=`N8t|m?d9L65NursvBGGpQf#6O;>}E+yEs=FS4i^t!${Re_Ef zie$B2C8~fPr29KHOoYmfXZ}!2Dq9U6AIWp*VyQdX8=p_7yR2whedtP=c6~JSl^azH zMG5RE;+i=k<#V6QU#4&SRKNSeXcbMpDA&zI{<7EDrVAC$Du4dFbx^?%TP_&e4^My}Uf<1Zd2$w--Jw zUNLcUI^>&^k;^vbF6U_(U`&17A{tbu*OX_Df{>n+qV$7r z{Qd{-I?R|$6;wmLsLD}P;BX8f4f1xW>Ztx(Gr^T>roBJt+H_r5O_IP^&9XlfO+!+* z>=4MnsC_&&iK`qQS^;~~5YBS+)2_&vj}hfoipYtK<*#V=E^$D0P0H!%{e<0evU2}l z<++QLz{t(TE+2YBFnLOu0VG>D8z{BoIkN|@V|A1#p-O={z$Rtj%eNyCGBKEz(9xYl z1-yNK$hfP+Nxup*K(sAG_K;VYG%>PkiB$6`SJ85}Sc8*C>(bHjAoTLc)XuG0>vw%a zz$=$SYRBnP z#m6~<+K#~yB$bcrTDQ;){2|QA${EcHv9;7rs9$(iyPDec5bJM?aFS-1=YC9a$GRru z^p`)n3MnnGl&@wzDjBdx(cgijgYi#tOUcwRi$uZFZj6qNiPN_CkBg%$IPzv)$i*sv zzZD$`a6nd24u-UmA~C88I!Jh#M2+1N8;tIQgRtZq!*f@v+_QjKYX-aB^L&q_g4l{y zF8y6O!MO{9sWmqoD#mwmF66gpVKA(l`I;d}POz668HY(eMnzYBaDQPfDFDC?u5kIy zlR6ezg+@Z~m0L|x6LD^Z<=U5Wq%TI1vpI&(_wdO8X!xh8~QPp5?4odF={bf^^EBrp>bz@M+oMDsaB-n*6B1hb< zu%=^grQWzIL)}bSV1r_}_}I&Oxd(9CH)4}{XgI+ktqr6qm2bNH-@ zVT+F`-W={S6~~oux4}F9;3N!zb2@eB?$A0;#7HJxUGY7NW9y^-hH*wDuxVSyZyeZZ z1Ffu9Tu4c2o_H4~o|9CqkQ^GNug(%USRP2vEmD&ZP$-2B;+~Zn%;7>CMLhg{lbEb# zbI`5e-+Eu-J4YtTLva}Gnrc`SNy>F$B@3FRVqTnefafu#et7ZAoZ`yR=5crtdsg=={+JEL0FY*cN}ExJy{&K#C| zsxFkd^>D(i@@U}cmSyye|B)RhHjMAk!MxGp{iFQ2A&hmUt4?RD<4>BN%Z}wI7PRknXOxn|`C%^>oKQ*bf7|bTOHJg<;M|6dB=h~)2tcRY%LwHPE&T4dLZ=%jIjYx3;Mr1tPE@O(^lRzYr#j$Wpf)hQXZ%(S=6rt zNOEtL4XG%QzBsG)R-zBIS^HUVeU-^@zl^LwX?~p(t0MT3aB$|nO%3A)56p#8Lbb`C zykWmZRn^lA2)0zNDS}9!5J)RcZq~)%^i2*$KOS@za^i^GkM*?LWYA#OLy2rPZwC z4A7e%eI|)R&Au>2_Nun%?BgC-5e!PbgQgCe9vyTnlh9eWe7IMVbMRDnq4;=`9GDIw zBMkmYo9VS%`9h^b_3&SK(+;|a$)u(e7p2UQ6g@cWCfyPEJ@Eqw#$q>;pp?zh9A%`} zasD*h6S@i!8}y)1Ouzru4@IPWG`pfNUAuAP^ZByn!e9``JE;9O|HUJuBjl_=K9%T> zSYv+omXeP&UiWbeA*_xn=0iTAzi%YIo}z@sueH0~Z2NRKSGg`@!%p8_Be{v3x6 z?WC`7)=;CDZ&n9g$45h9q}Iv=LH0LE zt;)QI*ZC3XqO+1ORZ#~0D(K1tN61RhR|Ap!pqW%lC=S}Rl_dJ6S&7gTVM(I<{{W#hSxVSd`mnc<%qE4OjW_b!q*22P8)Yu&xAvwZ26)N@iRi~XL z1=?G8Fv7;liBU8pY2$F40x~#B@=0Vs))UgHjFkAT-IP9%w=kt|i@QNwmG?dc2vzq3 zWV*ghRhft&{_&@4;c(Kt23fyDvBEr=O>TyjD6HT@Nz6eeqM6O&qpsRh%uK*h%m7Vn zC?8UCuY4I+GkQtw>_d?IxyDIEFyCxsiHVXRy*?9QM1{3{*>_!0*ey=-;yyJ6Yp4Bc zR;he`sVX@jS(Yt?jmltc=cEb~Uwcl&g1T&HVa`OeD0sofK{DUv8IG;lA6gI7`dW9{_k#UGY?-j!0M#Is<&uRIBf~uKpx0Bi+A4F>r z%vu}1FFaOk*+i3e-Wn67Vus$$PQamZo&bM508v8I`{lQBvAtV|FMZE!KFis_?Pm&0+rv_iBDm^C?JC^EnztVqt`N+qnRrw_fmIG2i9lEp^p#rE6cFkkw1} zlr~h@)N&+lAV1JFlV&C*kMRB0l!TjXd#gyamU}ZotjG*AT?!uL5c^@EbIjJy^GH^E z+m+wWJZe)9heUPrduKyCN;uNUI+fC(HDCVi<+pQLsQuXaNmU9jV^8OeoHpPLEL7oJ z@Qwr*9vN?_eHZ1)f09#z?Uo(-z(qQetMC??Kb1%fyL0LOkUW7n%lTNb6v($e zXgNW%8>)p=@=?V#ogSx=4jjKxLVlR!R4d7)ETdWJEkt|EiU6r}lr}6b?w?7qwu{Nm zbD_AbX))pJU(MA}3?-VX-Mj4>8#+%DZH50%T{KAZZsb#7weNuMZ*@OWmI{*091>a3t~Iy5S{8tN9$TQgK8lw@FDQ`Kw&-Ip{`fDu$9A_QVe_zo}&#LaIfnRTkh~~7bGkHP{*B-E;9Xkpc*V$* zsJ^(>Kl5A1{~rFPUTZLg0D#1E0WrEr{#9( zo{~qHg8=rb@*iLVRFBEhVB zRFnMGs*+?rH)YpOW>hB4jq1(yJ`XDEFSOg1@oDiU(sZv$h~#|Gq0`>`k&6eb8kj}z%uZu zaL1MXLl$D5|I%aKqBVy_@wcE!nI6!k^JQ4H+of<|Z*HQJ>#IbiTVCE21w~Vis)REB z7P7(hb+dUgKXONB0!1b>r{z1y?K^2Q(8Eim`P{ZZT%5{Np#WT&&!Hc-P|~hl?`odc zA{y&Y?8+tSCod;0$w6RfGpJ9LtJ>${`EF1-R(~1%?nhpglnG5>yF&aB&}LEa_@jt( zDt9fHM8Ru)9WZo$KuT6wBtl0>GI=Ng3}i{>oLDq52ECH<7g5T8uv=rSj_U1#t(!rv zGMJv0>!mmbeQvDL!Rt2sN4RHrZso4L_?2dafX15EZ$kLBghHgrsQ5U1u;eczX~)UK zBn{hc7QvShJRz7?QSmj2+Wy>V6>!DX$*9 zaQ1OEq6cLOb1HbJbSYFDSApK6Gy=(Rd7=*@1Vl8XuEZ??wk8LO<|wLC%uvUzC6X~% zmZTaPd07f46lYZdb=3JHP{KP~!z3Gf866f(O^u8za4jwFjsfmHs7x>$4I*^rp}+N12uX{U z#R~xhCiRJH58S-*V(#3XejVe<(fHr}yucz?I%Lc=#{w6k0%2I;dI!w$@&`H6YWGxc zKOMiVm#%`|fh6l5;KHtgAPTlvHC!o%4$InUrllJl<Ls&A04B6urNav2F#`3Wnr8BB4`Y7I+$r@3p;?;wd>xx8F0 z?!QQxr9olIMk`>af<>G>> z;r+z~K$@E)77S>=0I0|xpwc$o=?(0j_{VdvW(`TJ>DLcIt*N6{I*Gk_NGa% zgtYY#Ri8CPBJ66fS)8RDs(8AyZC;q^n`FWcX5dYTW-Qx*Mr;wTT_-}RZTJqndK|# zXr$@OO3A?~7Uq549iHYsV7b#GBmWGT-+#u@tuLw$p?>c4cgx>5HIUbctclXY4(OYZ z`*qkIzwKF$)N>@~fI_7|%M~XYNTPL}t3yi_&UpM^y(WdkCccirw%L-XeYcpnRU*r+ zn>Wf=LRkd83cBayEXBDH)+CGt%okFsO(&!$ku~hL&T4;X05}X+pC6w*N&8q)EbkmD z>vP&C>a_!ZrstualK^OHu64rj5)f1^Dp~IK%4AmGLzObld7yQp7?#?G2QXaF8dr%^ z5%WY1%`KG@XzlFszqG*h7A~R4*m5y^DI!a^@-&6(3s1}<*;*Pz&{9P-wKKylnttX) zT-WE=o~WgVdOhbb)wTWgrJJdfs@a=w>xcP)&@n(1D0ZC$!`AtRY-25oQdnE<0c?{!jTdlWHU2{I$ z>~xM#vy%oFnCc&u`wzoPeIN7&d*yPSWS#a$FXd$)U*ovXWY;ARu!_Jc|*c8BN(o&f(Nu1xghw*rQmG-+jDIJH{G= zn55pJQd*i9lJulneByX_l7jwDfrszv@5|d0pS?`3jVxkD5oTXIP$~heBVbed(oDAm zQE#BKZb>Za>WX%w1kK~zpj(JS2HbD{GfBfpE<^mmG`$7`bnUj(sC`}ewSp9e# z=q%*dF1O4rwZ8$1&c;o=RKcvkPvUOJJkG7$8?{h- z_%N7F{^Ojj)}^Z3`x7Mber6)U;htaycYiq1E=mPslbIPUGm)9{^;B$E3MRWGgY_%5nA zlPxdK8wNYDc|aHVicGtBV%}91NLN;uzGw$Q*%Ei60>J9*%39LAxF`M;FKoAV3}0~h zj(a5CweBu6uuU!0;SZ4s48OFYo{|+Uf+5myl|U+jhUY5X6ID^2+`Y|igaA|%063>+ z@buB6q)!f|V(_b6`8;35AEBay_;uEQo&hPeMVW+_%xJO3p((%IN4IqlS869%@*DUMX zd7{^@UDNZdD$0{_Sok?418%M6iitOKL$wVcbbNMrZX+veJ~$HI!%8^gP~jJ~@P3%< zKHZPn1`;|c&(csOXG_SqkfJs+ln7#fubalt>hcbjR&$@H(Yt{G=Tg_~-)7Bsn>um- zpfneiyoJB!+X?xRN0o~ozvX;o;5rjP1|%kDWsbbuAa00SYm%S)oB=%JH%cQGA&2+_ zk^VhRcz0Ovs_0P}`kvbC>96B|sjET!h}La6mq*uK^OZ%+^vhKOpTnzaroI3LhX3sU z{c(A$18$mx!cJ1uqiGxqly%j}96GWzg%JfnTGACgg0)5%Vf4Uy^r{|bND3b6`0zFf zpRaGKov_$o?mdhK1GDy!41G~c=eE{HxwSlcv=HAMdLg?0985kZI*mGo`=n?DfRtU2 zG;5lit)hRL9qgi===9LV3ts-5H}L}-cN`$s&451hncs*?@xYdh-|srN8oy5XEb*;+ zSa!uXQm8?#5c`9m#z`gD(7#iGSq*mY{=z~|JVXDb%PN?ssSM7`FTOu06&5Q=XQ6i~ zq*CIO3ibyLu)ElD2&p|!8)|JoR}+b$EtFov2zo)ccb+;7AqwXKHv(Simrl1JIbH1J z486%y+bTx^XBJnOH&_5a2@PQ}YPj}GnU%$)U_et3H)+I>M@me~qk_ZH&Rl-~@-_XW zT6Rg&`xVsSb*iqz0+PBnTjN2MZlwVRJ=V25ko%Df4=kW3S-UwTK`l2NDlzru`*DA} zswUqdo39xL;}0;9M1oIWNis)jwZF>OP=1HA{gvj zu~sROe1}T7Ii5X+GqDC5Gbs+$l`meM@46>>J$pA#Ou`XZ&HQXSfPU>hkLedg*Tz zC(+LF4h0HxshyBkf_GFn1O33qNd(GVo_0>{6zFTDq2}+crASnoOt51}MxLcSkyD?@ z9NA)dmbc-Ho+3-s5Xb%c3xSA;L$eh!cuufOqn6wk%kOg7^-Ylzu;d>cl%@_E>FJ-l z*@iA_nam2!Yk>|@zMID`w~)ebx$>z9sqs?0I>XvFd^Z5swQVB=Xg<2r;tv!&0J~cr zlg{3HQrs;e!nd+1bE!NG2c95)~rK53*`MtVV0Xf_hI|2E<=XZ>A7{TVcadnkiJZ zw?aE&2O2LIptULX92m}}ay}yAZM|r<3XY|iB)pqcPaP}4c=O((YM35(8gIoMP%5~q zsIkyju4!@$1{I9o9REZ!p6OS~JRl4WG>+=1Wf74qIkV7j6+Te9j!shJ7twREkSXoj zy9Py4akvN@qhc~i#`o`PD);UsA>hQ;`-pRguGV^a{{9>PlB**bE-rm4%&ccW8XF8Y zeXZ8Xz|4KoX=NEX9vi>q`?2%8ALZK?$8Y716OoAo{iKyO)B^Jem}OBnEfL^Tkq!K7 z*NZ^`tZ@GXiA`d8vqtPhGF?!qS1+B{1v3fYB`+myE!;$ZfI_CBb%v}lHyZ&b)v^|X zG^I1EcX=VW3DjBVN`-m729WZod^t_cik+251vH zN>`jtS0UP4=#qX_UqOQ?0}PktNlpC=pK9&N06qLXj~fG7*`3nt^xXkv)pG+U`KY*v zsP2+?37xB0ZC3?Hv(dxn(?bH^3H`W`a|C z%OU*FzmimeX@CgLk?wSb{|emDwU;aBvj2E{P@S$=G$x~+3`AjZ$b|Bb#EE9j%$UX^ zST>q8STM_QK8$S9sQ%Ov*X&O!^>hVt@|Q@~5Rq=+{ZWO^Bv_EM`zE77|5QtKe*zuJ z@ouROoujV3njjP-guebHeloe&;`F=^T)cO*iMdl9J5H|uNm9dp7|X+rDgw|~PFjVh z!Ip%UF$=RQN?9D7Sin6bE|SNKTSV7SN4Z#)EL6jDc$QnOGIIE-| zq&$n_=L8hNA2d)qi!N6tVclH8e*%2*&%_*_j^9k;!UqZ}Bmc2KMEF?w+#SD_r@A8S zUH*NmzfA^0UN4ozQFam(mOH8>N<1L>FZG_mLdmc0;vyWCZblVOY@U1p3G2r}@Q?EP z5SDkDyKrsHQH!lK1ANFi9yE!{ke@#s?iCF9VEj>z`xT4`pZOJRj&en6wrJm)dk%4j zTcx2T;f_>aN2fA8d(zv^<<3NXGbC-vy}QkpT$havEG+5s_PrU=^1Y!^sk{AcTaed3 zD^h#u`Fx1>qxl=*c>U(mVD`IqZqMc_*@)!1REkDMrMM3`7Icv#Oto|nBTFKOwtH5| z9_xK-9$iWHR=FDdh`CRHIs#FwYdyq)qj?hwHb48PtR{pg)q9}cNv}aX0tM!B2EhAC zaSAEIf2^p`4J?f$7efZD`P`OP# z%3gHKpP-_|vS;)ZMejL$t%w+!gLL!wqjN8)dL3Y!m#=?1iFpo3PHyHj_ikd4;Mt7k zp78ra4>Uqe(hjVC-LbjP-pr}e?;!coS}jW<&YTStqc1TpP3jzVDc^i+{BiMzNyfdU zKI~Fkp*wKHZOvdFSr7F*(sP;98C9bp>)a1a)N&Uum!?a85(HH(aU$7kJdrn%WaoE|E??lI}CDL20S9{*Uw$7~u>+EFDr=Q6W zP=zeJ*Hk~M-9Lp0S{@k6uOY4Z4PvlPa>73O+oYcP11t<3B8OAuBKFioHotQ5KU$5r zLyTta$fAI$`o+FBjROl1hNe6ltIqUl1gJCcZPsZ|*C`^XSRl@x zR)wMab>j|}S8`ysM6{ z?eL6o;WZDcZMXVaOQVxK8E$hwC8MESW{F%_Z|T+W zdGivKoiY^CBVqR&r-1%a$^->IbFF>qnV6otTLpsx6jl*2xKMd_5GbuT7#WL_pVouE z!F_hlM9x>3H;Am`{gOrir5MZB( zjC^FOZ^tr*e#!4I&wn{by@kBb$4;A@ePx6{pk>Jj|EO!yM?&Pc)DkSolADY^nEw#u z^5PeH3NT;hpsf+->r~W^`~F)ECsC)La^i`jj*cxGv1fBWS;iN+V-yr*>@(QwI;Ut= zIElIeL|3t#auRGG%2^&Ah=``qqfn|qOWWraplN!S?o}j+tfZ`~WJ=v1c6NR#Uu4iw zi1?%Kj<(z4Qtf;IRpq=_pA?YuWMFN8LN)oPl~IXJc(Xb$<+v)+jY6jF7=JuzUHSF$ z%O3)osAq|asjOQq7N=6QFNMQZDn5=DYIAB!au>im2JaTtN5l7f)d zRJz3vtQko%J;VW<)89-)rOjK!!e!0?WFXSQ0?+a59**MVAp2~OS zR)RJ?F5$|rDzz#iS&;!{zvncr-gSh5wi~Zyat^M(3vcC&W_emG{j=FTM<5fhrW%-` z#CbKuOV?i2gD)4TWSV+;tuQ$d%aR|=q-b=7V`66dHoaVPK@sNH(L%6AUnJ5U9h5(j zl?>Ebi^6A`B}(dP=3RBy?F_O+LL0N8frb{nPv(@PZ|z|Pah8NavFBqKJLv zuB{RLCy$Q&)D{JQIM`ESU-5MT*x_rM;iYql`7%sDw&kC%Wvez{W!U9-4(N`&95VhL|{c^;2S{68h%roHqjXg}Oq^x#sH|n^Zk3aBxT>&SadLjR( zr@HGl^x$!(8qAH7XNM|1QBz(fTAwR=h;diuKaU?PwB5Y;0{o=%E-6&dy7-hQ2)ko#CC4UoznQ<1K2#gmsU~XcO|%iz-y2p*Mr9JY>sF@6qC5M<@6LXu zojB!LL@KOvqF7)2`119_uM5h> zi{5jqr4*{(EiljD=Ht^iJy=g}jZAU#ggfP+L&vuQ@}-xr=QBv&ER{o;kdC$6|4`& zf6z0Jc=|b)co15^z|0Fs~URO3XRv4U=2@Pz5pa z;v<~7Oxi+v{phtK03b5kwU->6z78OzF6lnjXzbg?8~mS3qKS9nXil!PU%uVNRej<- z(tICWm;+3z1(R*VCHhzEHMrnP)}i&2Qk7FC|1p3<{nkxk1HXCx73kG=GaL@w_Fx*< z%ND@Y-0I#@g(waDvMO#AvZJm{gB9uh<2Uze;b=uUJk{0Pigt}dAX&ki3aY#wTI)h$kKu)z%VOo5!5*hw!rbm9H1E1o{%zB(valXLLS}aiJT44$&djwQ%?8N z$-c9VmZjC?)=rTnsx4na##-Rz=bZ_O97Y9nj!xk5S}R0e2aQ9xx9fZQ%#Hl|)?UL5 zhJ~T?mFF$sm>BckMy^*e($(cWc^EVIN1IH;gSwMy-G?e{&Ew+VFaA!mkQu9pVcC-^ zHOV8*TTFDC>`ZO-7C5X>lT{NzJpVid5ae5nnCc@}y?5#%CT~HK#LS@F+W?4Lts2;Z zy_-^!TjRDl@WJ{PBcT^+jy`0{^68zU<7#LS=2&>=NVe5+Kv%i5VAX@Sj6V*@5$(6N ztMQP@iET4Nj3bFMol~>|%&*_+_J$Q|!JnhzD0PM9N-_DX9m{2PeNodB1c<5ny|c2< zT%We~MvjqYA24V_3xmD74( zgeX!ZxQu8cdvZt$pSsaMw_c!1skubLy5qM^JBI*Zb4TDqQA2)x;OU~n?iD(lDGiFn z5ds4}Cvo|i8~X37&n7WrP}MB4@mi2%He8+q>SAgwGJW!mw}h|C=|i- zHU5#&9}l++Q->I`l!*(V4Rp@_`$^v>OBX!Pw`}ggkMw4INQHzW{xS*3V^rwcb)QTs zvK{wZ-uz;BXIN|#EwRDpQ(@e7d)l5cqSU-c@Z=s(IVnVQ&?Qm+p?((@on@BAiwkLD zIq8akFVtKliOuwH5tuVsL&NG>tv~W>XC@cN{pmYk`^UD0)JUYUC)l{0*1A)m4s&PG zONU2PwA4UsNYB|i&wf$=Bn;bXsGq1upVByzKxdC@%1e*AwnbY|yhY|oRvh5pxXd)z z0&v-q)9kiT{}sdo&1>feB_xw_6kgT|LA0hyxN~XBv-f5+oa!8!7jyb5mZGQWZs*Nn zKNe=&@ihd&Mp3?;_<9>Xi^Cvbyp8iGd z$?W640aVwY`I=8K8W*pd{pM7Zu0!xbMe4JC9 zgzUOr8zpyhSj5(T)suqw@#MDJ6!ZWJkd#WzR!*YxW-X%dkmV6zif6BYmD$ePZ0_T6 z|FqcN6slpXG=d;+RQ%7Euj@RmgNshqzOp=<6x6sIAysPyEackPbY9QCVj}ErSEnQC z$2Kzb)oE-S&ky*THl9M!xl~e}S|k?913XmY(B$L0daiq9%Dnuq60W(0cZS6!qaAq7 zyF==84(ifbn$`4ejduAr7r)B&D~z73aUDs(jmO8MH?58gdD&Puew$@2lVjwUqsbG@ z&0Sp}SR29QKfP~-Ow~Uwzo+-2Rgl$o`8}gB=q); zp>Md0B{=m?tg3h?EDBJ&x1-^$(X835vdM8F7NGc*KhcV50kDxy8`sb$DNJg0enj$8 z|0pYe>1mJ;CBc@q?Z#Z#U zpA3~Mr_t3gB>4eq586uv#C&8q#y%QUk_PT%-1h42orBWsq$$5^Kv}gDGt2o$Ve>00 zeGjaUF_~sV#N#f0sn^5xDwv2$Zm;=zv3pR+CDYai%@eT^t!M-aO;VX@7~rYVQK?EB zQM{LW1!Y+rL_@7v19GN7qLjJRHQx|Ow4tDIec_!<+mMPQY!>1;_v6-R{F9`50*ho? zX(4z+$`;z`@0ccg72+Nl=;c4#JpV|4zW(eBb$FD0$Q`?lc(EiVCnZmX_P&B$=^cZu zJ*Nq5c5P_Orz>}o{zN+@@1t->#Ap96KH{og_4-P^kRvT$N~p1-j)G+5r?d;HODPFD zibiu7ftsHrRHaSvYx>9M`YBT(*K+ZX7r)kbU@pxXpxHnI zHIEKajx0{JN) zojNozOTd)JRi>dtcr-o`Cuiy3(=q@9l(-_=%5Bk1unQ8Nt>9;F8=Vd7a+$MU|#K9&Q%vU0&2%p3sa$UxpFM_mJbv=Gjcobyl=RFFvbh0kBxE;nr?$ zT*@-5Wr00#&%S&^Gr~MYJlOQtk)s|FuGl<^i+}XJDCBo{w@f>FX^tdgV;=5KRaXq9|Qo-}I43v6FjvQqv>`qc8i-ExMBOSqoH$*rD zWs3*EG+Mio&A{|euV@injDSjIfxxVHVCVVkSQz=X*)XcO;`qQd84?lqphl6!dGOrL z!2A;q69_0b(-9rVQ$cOjZMlnkLgV9v2pVGTdJo8}1V}0KDMuL4NIcRNNCPjb7%1;o zUjb=N#W@YONqi{}8}U(=Fh1q(Ny&Yjkthr~h5;LOlR+^h%XiXkWE+zut%0dV70wiu zmgS0grozUSaGyt?eyok?Oy4!HXm?|hqKnrMUr){tk5c*OY$2Q1Q8uSV7y6G$zR`#` zo<0&4%&?*I_xij}h@_=-L^lkz<_RZxRbzT>^#QB?NOea-Tr->{NyO$Sjm0n-cH$hK5L$_}d8#+W>l}_6~*fBA$EZr|f3(ct|Vv8vUloG#Rnbu@9C`woC z|Iy1yfKtsbzby(|MuE_N;AIXP9v(bV$->;O+k>G|zO19wa=!V3zA(G8vb3V%q%d$w z;UGJ~>4c#b?2?8HfqU&y4Wvd?a;Q#|5rH7+w>g);(xwrG^xec2rGGBAjDczJ6~K|o zP!`-RBAI&ZcvPArYHFz!Q_VRw0G8O|C}JH=hVK9F$4LrN@dPRS%{&r$LmaJ zOOMofkUi1t46t~CY*&6rGw)_+=GQb4;0w+(UtH5{!NOo+mlA^BQ^v~S2nd-bJj1>r zLam&P)=gsveLFrVsF24CgP>lLPV@fCtfm9Wgd=vwX^^=uP^0~~$0y@An=chnYC7Ox zb!7O0_^OVK0zk};%W9c3I&IjPE`9@?dO0XrIVV}i>hqIs5<5iYzDo!W|&4*qGjzHtJ25OS>16WNnxTS-64WS30-O^lY9bESla2CRs{w>M$uh6)_VbE%G9S?l`$lG%Jph zX-uWl%6Y6b*aQhHj=#WdT3Kd{0isVj3D!l&=7w$sRy|=IBG7h24uG2H(cP1GMAPhT zkKfWyT(hMYQChizqHc#mC)d2*(^T=+t<)FrOO~U)M^UHZf}Q=ks5s_YUsHFLm;e}* zE}iUZW@Ph|A}cmZTI}@NGp}lAsf?97i;%`>sVk#aQBDX5myeA6Jnr=;pS-4DDViqz zJkJv3PODQ)1xf9KEox$Udd<}R+d3YWuErRz`6)~vpA^xsb1NdR4^ut0?py% z$hr-cUP22}H84#US0P0ms%{5(H1vFJvanydG8gm-7v}pHALNVY#+5R~-4m+bZ|NS} z<49Jk*{)RYvTL$Mx-62sGJ7MOGs%!>Yc2l=)S{OwV^EVDf26o?9X9<%d}*Cr$eNIrSZeR*U)6hk~bCZio0?pUY2fQ#>=@KRDPymC(RkXkL*|;5MjQ$IJA0(t$AVnWg;Y+Vjaf zGqE^BWH^I-{z^~O8*%sKF3gOa5e76TcoR@U{4;VhY!n2IxK9Ay#vj}3rDt{6(1EjF zb=cF=5>(rcfIzWt5{^(v4x1vr6c`nzt6QD#pN^7!d`O;9l8|(ZvOmV(&4tSspzq-8 zOr`GOl5JJEPxCwi8WyF=F{7XJ_iinD2R~Av+Qf^pVhM({X;?aiV9)b4pd1})LNW0W z6!a~J^*xWpOi78-SSXGMMT@|b=_RbKy?*U8e&xG(+wC=>TFPq}MP*{5=5QEwStO z^J86^%>Nkl%WF-jxAfk-4bidAYfI*BQ|KZo?bNE+%TXOU95wx~Yvrg^z=M4Zs2~1G z!lQozkJ8U-0m3@QZxoe*RB0;WG-ipGblk6Y1DIa;XeLG#p)z>6cF_?uL|&fXDso&m zNN{<#|J0Ot(OTCWm!rT`)z~LTd;drL>EiG309+~m>`~(rXAT+zp&PMOrBU|=n|Zk} zXoDhiBzKi+tirr&i@?&fa)GyxhU1?ERS+-wp8l+$x#^h?h%x6Z(SmX$=!rN(-O|%; z1$*|pZ~#r_B`5geSZumlxHnCN#{o@s@uQZU5>1DOty9gvx5!4SqPlY~MRR=DW( zC`GJfxXq%!sfZ1yz=rDYrL61&zGoNnDhb|DqO`NoqAv&^*@4MaF{M!Ga)+3@y zkl@?TVA2-5kc(38@$vu3;1_>Kpu)}0CdZTGpAUAD7^{R(k&sGZgmRphC$Xb~*5zL2 z7;9OI7{jKOwSR!I+_F#jLaPY5Hc4+&B_(_vB4|Np%iu&E<(#xBpf}rbQOG;h5YjCw zxdh2t5)Zd7G@omUNZ#dl^5F2c(snx+M&7WW0WSm=k}@#L`%kP>Z1(2F ze?uTd@qyuyYgkb#E0q?P%P{N6V^ba793Iai9Td#V&`*g78S2_l_5LZSzUrOHs?I=@4%0)Ydq zYZO8;(t7k)!twpq-kNY@;I1D%eI~yu$6QP4Sj8#QbNyyi_bXm#?yoLX?hLzK_amep z4wvzJmNgUv8F_(JY~dD(V_pALo-M52^~bbo6usm(n)F`(Tu&}rOdKMgatGPpsP4u4 zZ@J1czhDgJ)K{UhW(8hJSjJ_#Urb&_U&uSm2;94R&5h5dEbG^hzyHB&c_X&ErcmdH zh0B*3W#ynn>4Qc3(nIL_c^3#w9wG>}U1p(^k)l}KjDVCtXaB3wEOrKe7eI576AwwLd~hoEnh1Xar7{Yt_mZ(Fbb z;JtO-LMb&6Ez(?Nu|m`I z2fC^tOPpwT29KN1j+CxV>&Tl;j-3ulNf{DA^g2~I9^RJ~ts%y_{=tzq8Xjid7OenN zJr~y_s!<6#GH$Ta-&0goJBousKjzoIlI&Tef}5Ph)DGOG=;yYz3S)kBvebIZBu8FO z>@!cBBp9s+KH^FEcKs}G>mn`*z3I-dnC~L@Ca+Lbyw-OC5*1+Pep!&r2VpBJB~v7n zJC#k#?+ULO4zymu6}~Hfrk-apEn(CUH+=}Yq9M+2HSh-MVkdwf%HTzyJl1@UXh@-- z03Av}h6Pa_GhBMq^Uu@gP73_-Vg6HKSt#0xdhF3NU(Ib;URqtt-I@+FNb#1)lDmHG zg(Q#2DMr1>(MUCg&|6ZI+NNhZsrDb$G>0UNqu-XFz~YK~NwAd;po$hE3d9fo_KSCN zcs!DrOU)NgsCPN1i;Ld+-qCq7i4EVwZm-Z$(^TUNZUKIN4~LI!O1fNYfB;)ownl_) z?5Hee@O6F~TykCk zS^)KmoHi+2-ttLQH1MppijZ`a){4F)BG+f1Eubo`=u5PV1G>YvCX5+IzD!O>0dLiu z&)pi>+WR^fO~;3q?$_Bbw4BvHGl|FnUE0}$*5S+bIg+shGm@_?&*cH`{QQG8hKN5e zgd-HA5lc9q#GO^KXVB0|CwE3$dEKMYi>=!H!Y%4{XytgF>vkUY3A^T>g;UB+T~VM( zkACfI*K;yJn}DO%wVnY0yyc1DB2vO~J_Tm9n6w0ai$o}~iwH};=}Ls^uDw}<@lWzk zM}y;nWK+GprR&k;1Rbx4ojlsAyhMl{B}h|!vR@l>o2XtnlH9HeQ~AF{fP8D2l$OZU)LGob#+y`|uw13I==%TPx7t^F7Rxz5=cE~qGN9wm*j zH0=EJY@Qg`x4`{WeaaC~Z^!TE14tF?f{q-P{@zhMK@a2CIF20N_ZT)Jul51e8E51KT4!3Ati*lR5Y6$S`*BZ5)yt!@e7 zBEpQg_^>vY0D$1Beq7vIheni*Ea}{cZAxQmy7yr>Wd*EuPpUz&y45$m8NjMhia^Io;gLvB#p1f{A%Ur|iP6Hp~M z95%TgboP>bTdH}Q+X@u63(SANtePlE$ihu7{XwbBcW^2sVNfakWc*WY|2&}l23~mP z`lEamnh-U>jTcg7uiPRGZ_>S@uluvt^FAL&Yy~p8YbYtd;FWGEXo2F(XM54{g-0H& zYGXe0lI)>*M2c!VOR^VjzO8q@4&8XeRcC}H4 zxG4B4dB6x(p*f16yMwA%D%&Ji09fNNeugr=PF&HR7&^l*CBWI z$kOqpRvt&v1!)GQSiqHvSg;axq67&HeC7nMeLAOiKrd@myRCn!$1X@BFGVnNa>iRR zm$QH#E;pSA#kpU$6lan_z4dN6<5s70*un^EhbfSWNKHBRa4xsnrH5LnHD-A6FO7#D z|Djtg%C&|=k(6T5H`)Q(H@OMx?_G^ugyTx6KbGNb4>7y>f{yLznNb<&s((n7M)F#ghu@O@Tzn6Tux_ zp5hCt8b#F(niFb>vucCHJ7!(kLq;Z+q}L6LcyzqXWu3NyEYr;^e@||EZMl3-dJcNPA*0_! znwPKpE=sRpW22z-Mr1v+oF|yGJ}*V2irDRUHP_{dy8%tmEzfBhC$8U@uNIv4fM+nAs;&L$ir-4oR=jGM_$u^^qQ)su#~yuMmp)U4A3CysHwk z8spl_&*_z%%V)m$nPiUVl$XVJsx=vp3o0#1jZ!ZY;E@%n@`@;4t5ez+h&X>&+_Kz1^HljAdn14t#*W5p#(p&g|p)@3|&XkHxC1e3oPJk|l^i9ln z!prk~K%AgS>TJ=y>&#Szx-Q_sVFktPA+1|@8?D-G6>|1UgsbjX)v}A9aC)wLsgVzU z_w786dsTA}oM%a1^c7I{`M{rC(-+_~TF5-iCy7j9#$qcR>+Nt_zi`7LAMi3ql_Jvd z++#DeSV3aLxA*jEmABa$?U#Og@!v0gme>DKMT-U#$RjN@NpH|Rs0Pu*-QQyanIz|> zB~pVhcl0>BmIAP7;AmO^X<6u26Q|>#UkdkaY-7>?;P$ax4ppgNLRdIBTv- zkO#Digu(rtIfMJ*&Vm>o3|cUXBQc}Zh_}#zbY-MPuxaIOTpvEE+duD)^wn1WrwSCj~#?CZH4TjhtcmrSEp_m;^o8YUq%eqk|rd{|zl(2mex) zMyEIt1=e&33L4eyj{5QuCif#A(Y4QuH2ZR45~OOewPzl^nor7?R@yhpyE6XrR9*VA z?)V}#(R-zKgSjemd9#a4L8p62bs|DXniZ334pO&DH6KRg;?dC9RBeaHbLaH%_-#V^ zNi8ZNK*ZVV0kNGjy{1~B)b*&djU(kd921c|9bErpTvdP;Mu9Zvdo0e zV$u*=Oat_=zO6Pl`TD?pw=#WGGFg*wxj01-G;y$`lz!5@x6_0Q^HCL0*Z4C7>shF( zN7l1ILBKil`akF_=;f>UcJdtNp(j_DXhz*yWl*xOr|D?Ch00$Jr|$9i?fSBclIL<2 zd2oGlyxqzARTVgbG<^KRv*xlE;RcTExU33n)qhth-YcaW-!kjX!pc;)QmtpJaBNZyJ z$`H?yQmMWLlBmu0D|x}x;40cFSt$a|IFuv9MBXBwBDb&j;X>DH4%b|_ zy0letQqhib@Mo?+dow9qUjFiK%yJX-1Px-jWyQ)-w5Ung#2pDVb$^u;OFmz&?+$6l z5%SZF*xuU<`3`xqZIt*zjz5SVcUXTXzS|TUh@g|@r;Z(*VBMpg@J3RuKdHR4x~z+J zyOV=swYL=H^{y*?%yjMA!7A008zo;|h+Ra|A*p0I*jPa>ato3KtzLygzp%|%iw6!R ztXjg5ByAYeZuh+M?9`Xw|KsXyzazh@JKw(`FYmoC?kgHI-3@m09%GY|RC?%^)Iz21 z#@GgzN+qfEP$jBVQcF&=0OJc8h#is%fshID#7yj&VHVI}j4{qyDfe1=Dg6ulCHM2) z=etjdaaQP3)$ie)efHUBKYaJzu_>ZOmT3)`giw4)vVtGbl(d!e_i>aS7L{;vPMy32 z;Wa0PiKXd!T>7&#&u%fq!|3dMJu))H_9pdg!&jqbVr_ZT3-+e!A)#ar4u_MUnPy#1 z@bEp|d^SqSpspp~DN-V;{ipw=-SFs{coQa9>MJ8OD02}lLqk`i;$sF+#-vlC>Dc+Eqw*riwoX|R+167Wku>Vs3V4CB~|UC za<*hLL=G8Y;i**~g%~ZF`R$NyO@U?qkKBm@ZfS32lo#%XHKezhTO0(7b$e9BF1^#Zj16h3OFa8rzc}$LQJS zb~u1nF!w~2dqRA=Q%h!O90I}@q_W^40>_RjC6Z9Z;pHFThj<7pWZ*qm>69wFIQVs) z@|lSOkp_ytq|KeoxP8RS8x3hvT0ylH1K5fX}U`6Ce$onaW1UJ?OS5iUkFc?YjW z8eEDXXovVA&7HHOJdr!c>21HyIo}J<1njjKI1Ow*I4JmAURs>eq7D^9P1grFi7a8( zO0Jv=4f&gXJ6yPAo~7Je85|hU!94nBHO-Y+PWhI4M1>LB3n9 z$EKD3f2vq$@YNt3Hmg^BRep4Yr{P5ZDbmW$&~xKRH^t;3;$$Aj$xLyQ@;iP9yUc<7 zBk0o)rfCf*jmP0-X^H}i_35N>%DdW+}Kb>x2L_U9`K-Ha>9xo`j3hC^d_ZD>i4J9 z@qVY|<->?{%JwW!%Epm&L*w)0H(Ck$jbK_#Pi#vZeg;+py|2|;Wf=PE&4VyO?loIh zf~4DGo|-unQ}whSunSACUYtU{8`-1_G1GS83g3gl5?s%iH>+QHSZiia|sMB7D`Q>S8Kgg3W2y%@1iPWDkG&~!uR-p{0tfzq@cKX zyF@azsq%o3TOK6CFTGr=#Y|&ih=upXfylG>r^l-1dw(hNvV;#2@u|F*#+KcS_74!- z;9N5C4QeX^m#doW48qRWNc%zG2{+T_n+3(cw6W+tK2lpzDOR4?GNM}Vr*lmh#{qFR z|0_Nf97ql_o?bseQ5-bzie7!d(?SC5bgA*1N(*PO6h}RJn^b!*DA7MoS$(Oh;#T&A zjtWiiw-$8ctRX725PQTo7hitRZV)h*>Deo_q+sr%3x%#ZEaex0>trp1T@}#KreELC z1=Jle4TRU%m>6@6O)&$lq~g{!aI{IqUVSO4ycvfU)?W{M|30s2&F=>O;k&vnCZ?Te4kLXn^gfH@7Px!Q^4 zK4O+??vZgl)#r6R1J30D32Hh**~TpI^opQ3FCci8;wWA~_7&Afa$IDr`r{bR*1}O; zKRHL>B(kf_FyqYUa6^@LH7xG}9iJXl3qtflHw-vzv)boSX+at6x1#-`@ zyk3H4@#MyC3FMQ<7JdbUY2o6M@`&zLWtwWRi;75RI+mm^)dT<-qFv0omZb$SF<8i) zT*NVhwW2s~YID@Oh_xUNsCNP|PVOOI;K3daLb(JTW_hCZh|9sgC+|k(@peQ42GvUr z(c>f%W9#uM)~kO9L@>nxoCW%4t+{SSIHG_DkR`JFOc0Vyi3$5ijsoibA@Z#d3VNKR zSR6yYk$AiOj>fIBJXN8Y{4AN7dbeAgH=Y@AN%JIev*2Qkv5c011|*8jmi8rtD+`*Q z2w{eu}?V;-xGz$2K_aRUw1Id90zoif5 z5U_Fx)fe=?%WEc)b1O;>e28HdSRg2pPE)ia&RBU3yf*TT>rnSWqZon^5J@zWQ8S&I z5*H;3bnEMysLY3JCigM5aQrelyBGf18dPbtU5LS7^{tub324oPNg{N&sKLsj22HULipHF1s%8dU!<{@5+ogu-iWm4MPzJ zB!hKToXhqFhprh-D`UNpjNm44q=|P>gf(~N9;Z7Ol|(O(hzFpGf*+#Boj0?Jnb1Nd zZZ5|W=Ozp5iQ-Z$Fz@!xAbkANB%6R;}yqD85TaK*u(vge;hlUc&{I=^z3@(UwlZRiq!G9+9w_=5tC9W1-~NpyXV)W zxiSG$@!@fH3qpeA`P5LcC+eDcHjpy;fbA6W1)q0_x?@n~wYo}_<7IJt+Z|>u3U)i8 zjm|gjEpK>Kgmo(Hh09VFUHqd$u!YRQJh<-su%juep;e00Yd>H7N>gaQV1iIah5?#} z9fi`S+!i90fKT0EYhC=`8XpZDMOeHqfo(nSj;|_+s2cf15OIj8QK&SNcgf}Pn`q~B zur*s_%AJp2)AP_?V%=JUh?i@1awEXily9%KTj(;Uadf%~3Zal=dg$1{w2H9s&TN7T zq{6Xy#7gSt7-Bj|<88UlV zqKaMrty1%X;V*4??(B+>ScgAo#;U`pe^ooe8q2TL2FZZ} zg!HB2P$npa4j{*pD09*c|Bb*geyv(fDq`Y=KSPfnoca$6tHYkYFhcNCfkFNXzPBg| z=c@@)P=(ZgU}Dj2UPI$(3rQRr20F7P?IG|^?6XnAWYIW)IE5Gywq5Lv@e*(kKaz@Y zXq+AmsfSS?PvR(w?3?D(s&krdZkZQbhcj# zJ<{%MC$`j4Nvx3&(K`wxyI|;Nmi??1WT<`=ZasyRCe{mTayW#UupywEfRR2eK|=d} zIe0~_!8p>3@mS66MTm&guhy6(q?)=sG1acH~elMUZt=6iSX%^^v$oR7Z&ICVjre}zBHmaBPOR7% zrvU?L)GT8%>vqyZ{$)Dcv63cZz*h37$Z9G8v|fKZ&c-!NB)=3BJ42s4z0F+A-2sih zTq=@n5cQKZC+7D9@2_qA7!bg36c(&#?})^HZyb<`B?hm%_KAJ?0FgjLUl-{-$Gh^Q zp1L+14X0z9?8dxe8U?X5rzK_MLjuhZoHrS8I)3WimCiLUh>qiw>akt zOo<__f|ASQ38Dd{G99*5nI%*DxNsrFc>%?hnuP~v$-S(fcFu_I3qc?bv9Ra3jU7@7 z52LQsvSb9nvS~@qp-L+xGcXz=xz&wwkcyCj>faWT)=C6TM0 z22q8m4X0)48W-S}-tjaUT8D2fNUf%rDr%|q-Wdd9PU})ra^@Re1PCd|XUK{HH^hwl zREgBaG|w12`qT9>WUEU?G+=Q^7|qs@zpp+!*^lT>Bh}xp7(81 zjbq#Nz%zJGlxD+wTIkSj*gMa`RMdF}qiV1fpJ= zdR|-NQN1ph4qv4nOwcHinFx}yFo7SaBfWNAjQ^RaA=Qv;FJUjGQJUhLVukj3ZON_Z zpN8Ly<3bA{XTQ_{k71KU4KFRa?X+yG;xs-R*vkZDl9yD|CKsFBtv0O$;>-reG1$tQ zL;zV!{cy!5QCj~Sp!?r)Y24-#JkMGok0TX`u+fPBubNy zD-{owCk92`PeT#f3O#oht&CbDc@*H|iezjC@`W#$V&?nbYxs1DERYJ$O>=fI1BBF8|J2j^-@9{Or`zqo zf!=uaCGB2-MgoJGV&t46cjfd-=`tSl%sleuBNc_8@RQbSGaUR?j6GE}wR|IDpk<}W zvoA(}6p5h0ojB_FFqhf?1r8HZc#Zzl8txY!iB2sC7`Yhbu_UN=wnBpL1Ih5C+oypz z7jXIV8@hQdI*X5MLgxCkTNUS7XxFR7k%|LA@43l;CPD3OShRY~I4=Wh@(M|n znQ=2z2#%C7O%NK@Dk{Lv$BB|+3E8=|&l}*F;{Kb~lib1QVkt_-91URnN0${Z$MZ%n z6j3y`AOVL%=2SBnV|Mz3lOVi1sBubN{>Yp}H(n~6?S#Z2;NLo#Eew8HC7=C3Z7@o1q@FH#K-E1~x)tQ6qIhz&rki4SKsZb3P^Kjv$oBIy z+j|g(l^80w0C^g5N_oA>xYUN$5i1KtK|}vS96Ml+P!BJI(@#lpsylC~jeCnx5p^`dxiOFGH#GcjuG0w2)%A$xl<9 zL*aBxXse1rSVqqRJ|KI>vu~{WTeM}DKVkE=R{ZqFFb}*Z*?;iGm!H)wxF3s8+xLTI zbn6Q4Z`LSt_>R5R0`DjXa!7cTq~ei%?pnqGsDK-rbv9J;771n2oV8(BnyVXLRkR}7 zpBKp;8I-A&+_tXA``Of*K;%rP5#AbS0-@|kq@JFOjTX|Cv7jHOwXYJVX+=oi)F_hN zwH5>?VjKcgjw)V?b1cWu3e{1(o!Kq>{eeo^`$VEXr-W+pj5JsWJ(kV#htk<%J7E9M zUHh!x1|CzRY#$dVF2Iq8evmK`i2Q<5(2QkWO0z3i4MehWh9}(wx{WBiILMYC#00!> zVzJ6^beJWVCp)8D}^Dx*k5@tl_ERoAi#Zg;V`OKM3Kh|8N{KIB3lX^8ojQYC zA!k;b>nY}eaGqv4feL&s8#hv{LLYqQncV$k$p+4H^1st~I(`_R&`lAb-V(>Vkw`GR z+z^d~Tum25=rWv~8N_NFbRWVEKF&)*W{P2)2CAYH4^`3Z+83}!3O~Ah(_*5PH`0uXv&ldi0XkXjJ4r)hC^m1b9SguMjq9emrNNN^#i=F(s3~8WvwpGmOK& z8)*8d6mHifOWN4ntTI3-S+rAg%eAj~KrN*(_%i^yTHXc+r>H0#P%(z7yR{Q&RkVa$ zNpM<4E>C`*5DIx}5?etP9)Mn&(i-R(#6SqB{#Kr>s!?o`1CxT*PkcpNf zyiaT~o4lv{JxRZbXrfjSL`>d$@}ZtU0W77!m^mpQIv&Q3Lq{NSQdi(TW*=q1sdbju z()kW0#>8QRZfDVT3hBms%x9xfCgQIy%u2{i{%3**1R6{w5oajYmH&D`)dlQ5k7_HX z@A>XgX!0%` zkPA(^Rb@Wo@S&T3-3Q|wVvXYr8^{AFDpl$G67^$$?JjQxC;-g~R>zLTx>@UGdn~7-H$sd$*KwLD>!4YC_K$prCP9f>Y zCzx&`fRI_8i^UVltXGP<@QBrMtzPn9i|<6xJ+0m9y%i50uWxPjQ2>qFhp)bKSDxGEHSdx;;zgv)?b`#^(GJ(HZXBosnhL?VUI(vY&;R9fO| zZ$A=}H>(y`GJq<)i>li}VvJOcizaR19X`)vbD63$ek|CJt;n@Ad*6;5&(Gr0=EG=B zYcddY=%#Z#yraJp&HLS5B#}O`2eIUG>#3>qA6#e;DOt?aTN(GZqyJZz--`j#f5z)a zhMxHhn;VCF_#d=3n^XwbFJYvCralhVq{`D3k<^x#H8>M(SQb}x6a|*xN-K~(R$k10 z5CsZO0r6@a0Bph@h$hNnVJS@@0(9kJM-<{vc$i1Qpff(o^PkmDf1@Wd97kg!#r!UQ zbOl1ib|vv1sd_Rmb5qe0|aD}oj}uyAa8mJo}GZ>_WRQ~8H4KGeOp z@fqpf)9&VeIpqnBxyJR^`On-8L2zB5{(4|Yw^CqM@K%laWRM;YnXpHC&dfw3^DmNsu%R_0=8A?NmIzLjf)adtJ5{O4+xV@6LO6lL z!2SFuo=GG>79kw1=R!VQA-YbXz7)BQVU0T!dV_{$^~z-GgZ8=e*Dqk$mjfE z6jlJgc6zCC<5JfyO;r+e1!G0K>*Y`TaK=j&>9Qr+oSUydCDu{-Qt|JNYvJzYd(4|k z=ytHz_~0lstv!TL3Ig?^@RMa8VkvO!d#G~+**lRG)r`wqW6Jf{fK(GbOm8MLD`dA+ zvSi*;XJ@aSPYQiA6nF-Q&U^h7-=#J}apN$=jkAz|RPvmf zmG)x!R^Dy4B+s1^yQ{s4;)3GNWfwn#0|uI@|grzerTqrW+111=x86mo<&V))|DcwB|HzjY-&YR z{h?xo2Hpj{@ejdR*FR~t7=bP-E7;#09eG2h#-1!Uc_U=QVPI;!$_MS!WC$?IXhCK? zY;QXMiATig#~uCe^O5Y{#mDBGyI>QhJDJy|VJG|L@|)rjKrC;1jNrEnu2y$qKLTDl z7uDub*S2ybn9-z<1S9X)Zq}#0RB`d0w+}o=T!JOq;+Fs-1pDcSUr8?{cKO@O@A>;a zsKAi8`^XGWK!JDzFFW)Vs{l4qb9@&XO!C*4KsPPv>mgaJ7(^N>f-O9TfJ?=Nf$z$o z)(^kAUwS>m#~0uB(6_qMB#?Q@Kcx{dmnf?F?zpWZqD_{XUuzE4C)Js@BK@2Ob|Gv?b+ zLQ*~Fiti=ugI~O-?A1vLzC7MKt?5;(@V_`*WP9?hk6+gtR$KTGr^0!$onqkPM+^k7 ztVRoMn(2UFI*g)9#9n{s@i(x!M9VPXJ}^}mdwC$M!&N3!LVIjU9XPouhlK17($_am zso6Rzt_%!>%Wr77i0Qlq+YyjgOadQs+63p7wP&W*HF+PW6{EM;PTJj}{Hn&j8Nigx zgKCVXji)B>s?38n-*cb!NY}tFlRuh=Zh$QvsD(VAd{M-N0+AHxwZO(5p1~Ggr@}fZ zB*>LO6dz_u9~PksBJCCOgc?QirwUce4LzYBDaP|JMDI{eI(C{#xgS#Yxo%PZHnCqL(q@s*dvkn4w*(t>m! zukJ!=->mn^x82UCDFLZ>g58@)_Ew~_*Rw;hg|m{Tf-(Oa4-WO6zL_aW@M(sEzpOMi zy$`uS-nxmeQ>N;5a`slN5stZHZZoQ4MEmj1F7n*F2Ty+Bxnq`b0HK1XrPl2APt)Ox zJIlMjxxRikLlSKPD*XM`Zd6TVq7Vhu60-Y1Cc^H`^E3HQcSY3>@9#ng3oO_i*$8J*2i7Hmxsioo`Vsy zWe){mQW$MlrTCK+9_=UkR>3hQNIh$DyxbMI zvpiZX1v$(_U3w+@F;Qj2yMyrF`=TNwNlSi3nHD*}0Z|oQx~9fJ)e1i%526?}@@rD* zAfIOK3>sDv5k)(Q{m)dUB@KZoE9Ox2Qyhin%$efUnH$8GJ?}pGiNy*xyNk_qe{R!_ zC{G*!H(|_ZriA(R&*Z%q^IoxrmPJ&Eei z-q2)G+5e#c5D+8{?-hXP98!Lq5<|{=3DiieN;sAn3I?8s(C?m(j`ZL!Jne5<)TBv+ zZ3~}L?#y6V>sd_tyd5(`-w2JxOM`btnW|{ho`@V@+9wKB>0kLiyxvcKuQ#e!Fe7^G zNdzbu>$>8~Y_Fg z^_p7!AdCWnr4@)DRuX8IED=;fQYG)%PKQ$GT|qMV#*lR`oIP-p>r{a6&5>{XB$vPE!fPi(v7Ha+)J7Bo#)ePQ#-3b z?UW@eLQp3c?1%6E%B%WH$=NkK2iQpuXbXT?5%tEOXWwd;?N^ttnJ5oqmF^9D`WyD> zB)s9;i+=JK^CZ&7$e^#(2HMGmVEM#ny)ywdzy#hIRLwn(=F*J*g~3=Ub`}npZUXsO zgf*do?Vf3mm;U6jihgbv!~|!I!)f z6_XmDdG#=3+NVGVu-4EcJt#qJbQM8s3a)t(qY)h^gQqd-f_E+L7+YtDIMzIIA~Pku zFLYmjNwfM|P({v{L)t$2UAUA(n(ftn3nJCGfz>?>RY@3a4M^U`>hU8q;E7QVU~yw* z*;|X+>tr5l=X$22{a951%vHLwKM&yah+=#tD&$~Z6j^fp=FKeKX0;PyQ?<@yP@x!s zay|TQ%3_Klvd3g(mt>33g6M5!TCGgp71Ged@yhbl$zPT%x$R!mDwJ7(RtsjRSS4IR z=h?#JWCRya8v;Eb@OWArZ`bft%tLGr>cX_cn^SAGMScZN2OSrUbRQlaWI#tCt4?TP zPzXV-2R->gsC-Y}N;BbJQ^RjcJqR8rlQwdbwM|sbHjac4i};B&h6piyRA&+=gc$b8 zw`-9g40?}tiGx18nUi-)MaoAnWy!l!iVB&It_z{3G&W`z^=c|{lprH&;%-VZbTp+< zmVnPr#aTm;WQkTgv8UH=J}a_xcfGz*9ScvWUWu;&E%Ng3OAbG<$HZ1)B*|ss1)U$1 z0371CfJA|&MksfTCFr5gmdsTR8A=S(?r#xl$!ISn?Y^a{J^IS4H^f~OmLbRz{=;3o z8aclHY=q;qNY8%$vwGkrcz`Hg(0W1N9#**Z9Jh(U_z9!en8pj6vMAmv^oSS|heeX< z#v9i@nF3()VW0}{&Uk&;)ipACDyzTxi-_^1NZ??hFlH!c>&?J5XgWMRT{=nu+MyI& zMVxO$Ii%ng<=(g(Ch-{}!W9y#`b$-%z{`^7`)Sb(A3geN2!FQ0!o6bl{+mo|(@PaP z|ro0)!pB{7D6P=X~|tgfhNWGG#8_w>9HnG*Xfwo)~a zY4edHRDM5xi}v|6>bX*50OZ+plzOI7AbQg14r|4O1@E83p1h?`;r02R)|}a{G=jWG z#lJH;KB+AV@ak6V!cI3Z6{;nj;{vVMXJqEXTC<=P}7Dph`5gUPWoO%VzD5B5U6y1y4{H4l9pc znx+)K4f?ce;A-}$7oH5Dj-Gk6lVY>oPb7b7hi9Jbrl)>|14|Ama~R8=imqat#f7y3 zc}?^ClXvw5_CZZ@+QdJd&PD2~7KMbg-)eqGl8)|&&XmhEYf;2dpd`(*sfIsrv&Z(z z=k+15F@7sNXKDlhLO*XGAQJVZ0M80D=}U^Y)J$Yl@vlYu;q62|!>$FuZ7I!hA|@izq# zc-r0#&njkH*(8iBjg_@tVv0Bd)rwDi^zmOMfA1oVyap>m))et_oO=r>KtCXErSOig z6s$WR0G=u=v`#U8bU(C3sbp!nl%&$U9635oXV{(bT&JOXU zzQLX$a+g*&8><*;(7Q^lrMadaJb7!+55`aE3BYCf`d>XEpj3)>@h{pwsbsr;}#6iwc&eFPY7#6y9I6-7xiGu&ulmVM#TNRPZUy zaxk#@d7etM7g@<#u?~GCu9bn==w1*js8^f>#wt%k} zs?rk|>L8Wf2Q^I)!-eCVCj=0kGQ{58axm$mWE1*M=BK?F!!!+vhnP;VImJ1t+mn+gvnw5@E$>8B{$FVc*4e)L_Z+B6 zEIbDK#fREJsJf+X+S=Xq4gEBE@4%@jMk?vP-i}+b0JpmQ-sS6NZFr+NxF+w>F{WTP z;L*i;V()gNa1*w1oWp<tK>Yx0Ld~Ar8OI@?oCG`wzh)G zLU=AS?cLxGEMC`K!P8rnXxY{Zk?CCwPw!e|Tj}3T1gC~&=Rw2-CB*a_vgUQIU{r}s z;{bPmJvgZ7hrcS7)`iu3X0CO#s^pf&VXF9F#BTStZXw=kf$tW$J2OgSdD)L3uwEnz z4D#>)qS2i8KK`+9XDUE1{Kq<`+DU45>Y!25Y42s8uvF8F$9}<5Ro&#m?pbbIEnrFX zqU~w>qowe9n#N6+d<3=~l$_a&q92L~%Of7@N2H?YAy6jqqewi}N+IXNgIKtN*i&>j z@WcXIb4nGA0!Y}TA1mwIUg*X-KC`aBsa}eDZxTd-o@51;0P%<~{z zYqidjv7%?q^o|c}`uA+2ArB`%OP!2?C1W1qo&_4ODRQz$WJ%pOa!mgFk7QJncXAS^r9{;Tld1FXBT~4x(N;7#nFuQ8Fh%ori>}KLk3s`MH)HJ4R zPyd;I=1UM9e&N#U{9S*ENuy5yoWNoDsg&_%Pp6H5QX61ujSy9-2Dk}4_XinoFK!C3 zqL<$^UGRhm*j%Hcj>tnfV--9A(i7sz;MVd4WhlSRUnlSR)7rSM`oY;@9Pf?wRn2hn zII*1^-P6OD67LDT0Q5Gk>I3$|&0aaV5;!FnY^k?I2!Xz0UIh%8ldO-%F%@+?ELqx} zz3f2&hfUiyXU0A(7*5>35U~p2&9Fl5> zGPiuK=JcZy8YwwQzLd(SxZl}BMPE^Fv?Z@jwm|PD^%!DB6jEcvG?nchvKeWaRSQVE zp$Kz$Ghp^oDe0__jFGW34A2i+MoO~{}PCJVhteHv};Yfv*ylTRDgEP<%l zPMFDPOr}*76w-4dHk#rxdQPT`0Qpf=4>Hl?5TVjog6NcU;28xvBeb^BxP-kh!|}}M z&6r6>J|KQFassPkR6p7sV} zE?h&$G-t`mN0iXh%p;6YP#oNR1Y{;kUD2&mi$WwqBO{0mroX0-sYG}lQeYtB^`74$ zIUnUveDu=Ol1Pg5__yu$2{APf6JM9sYOa9XqzI{+t4WK9fHUKQ;m1GL9pHnJdK!y} zsl)|6oYv?W)L~;}IzM6)eNoFb9&r+-oKVE=mjxHbp>up3Gs{kbdfGmyQr!g|O7%nH zQK;4_2#$^dlxiN?p}J@KF7Kiv9HgQj$QjjH)+13qWA?Zk%R9|%;89`%2h)fQeugjy zyzSN~HhWBRajMAR2`LRMj=lN3M-=G?o4vLrboG4nYsz>W&n4dNiIoly9VM}OA-2Y0 z^^E%^vs$lLj;U1QnX{!ln=#eH!=QqEdQ_h}C&|+|V=z==mGq&wDJh((>!eWR676wF z+dodOHH>3hYAg+%xwEjPyBZa9v!7!m_h!&^LGei%poy~T17mJEoP40-RVg!+6_dz$ zInFeUFlKc*Tf4Krkq$3_f6g@p*$_TjwqLI(CR@mNuhzv+YQa$@U2Dq}{c|x)9fBQE zD%-;m9vCWk9cQWHz(cDmk^Qh08Vw`iL@kxAfeVF2N8`BI#sq0OF1mvaPWK??KFlw>rR|H-@%9VrUVDn4&#}SY2qmJ%` zqfB=IK{D&<7*e*cX&zccSnlL%_ToKAcc$$ZY`A?XJL_SBs;TN7`RG~xO2(BnihxH4 z^wO*iwvf(wF%AzPDtTfr#Ny)q3XZ6YL+!WbnzuYq)6x`Yyc4!QV9}i51FH7>_D@CCH&{6gEaC)d7_YbYJa`%I z_+*m&vmB%*BuD;cySfObl|uBh;#(0|_q7DOh)!!=j>ah^P-Z~LCr zGnXmOt(Q?AF_@Q=&wytyO=BZ|erG4W{wv5G-+XvB`kH=K&vFwu>J@x5Zh7}GWe^VR zsNf|MQNpx(iXcJbK}#23{^wEwLEm(u%*dy1dCCBracG|XKU~GHQ65F@OmwCzPmt4H zBYmi?rF9)22AqORM-^|gRucJ86IlvfbZ>ph(;KE=m4~@Ch$e?M#{P^F4=>+(QMXXV z)B{?^0nb@$8h|&5P!IJv+k{N$?qNGG$X_x+(#}zXNpk{BNt=Y@SYS$Z78R{c=5@(E zBqOurLdya-XCcI^N(3XJqL}7UlrtUw!*=>#N;;y3;dcs&b|tmGRqO7ugTT@s1U_>a z7esqV)&{}*+%%lxQ>c{u*l%jhF8==F7nTqx{3svd)7TkZ5c0tnl=k-E*C!u#+=sjt zHp2Ch5eUv{B9@$VUY7Nog!qxA!L~5B~!G!G&UGF8J)!amJ;z6GB~Gn5wte9fl(+%o|#I)Kkf`_|ZnuB`QBN^~R7XdYp6VE;8;mK24 zT;|)g`RuIh$29dPKa|?St}Sr_sh6EJ(S+P9!Tkp%??gg^G8LvYlvJ_i`qs^Hc52E> z#>lUTXhD>@+H{2qdLn1x_{$$@@XY`edS-$c=tWev*LCRZzmN`KR+=F^v!D8a9N&UA z!~<2fRN86oS3K?j0hzz|2mKTkh|{J$8jT|m4Fazs#xLH>I=Q0#S`ZxPXsx~uXaNIV zzQ`C{Nnusl+?tL3+jc2D_2hqqfsw;3oa1}_O>=VrWDaum4L?C zV;X}$I|zea2gbz*o&`ZU^dryW+Gq3-987NnPjpiI#5f5j&DyFT4k0Lfg+dToIbjf5 z6M~t~xzA6*YEX1^eKN79pXvYws2S-WSwqah5*<2 zW-wShClwINq+ay=et=zdjT8op8#z`OThIr|6X9W0vY60fLl-|F>C!`yIL8TPdOUkm zKXZ;BdfsbP1TTmfukzM#oN>t$JT-nLEP9|k!|Hh3Qk6BKAbne=^C*+T?|SMP{YD@h zw@vDV;?3eEh)qN7&Cut4$5I0Aj#P3!cM_UH=%Cb3v5Z(L$uLyM3M&xTRBAG3kt-tNiX`)kBcv)No`CQrR;OHf6Bn>+zYFFiSsG?+3*d!i}M9 zc`o;5E_l8R-SEH{Te)TdP#j&9ML!>8frb^Dqaq6Qb#(jCvxNRf8w(C4%Nw0SqUD*V zwR~Z;6l|%ga>k-y>WAs4ceh~r>eK@N9IR^B4dQfrXb?4CNaC%!BN9Z@{Sx`XERW`{ z;A_->Fd*y|mtBB#2$L^l>M@F`LUv)Tt^0Dd{GB~Y~(%~6(^py6Y(l*+DV zfuX2WfDCDAK9u7 zO+jdXW4)OpF?40AgQdAhN#2882(k|~twxnUngbb8A{?B9XV>(=MfyF7y;!N()aW)v z%ZWmRrXjW&jU}{@;n43zX$qp1W}(K|W;Mp*4=(wMtE9jiY!7= zf;k-2gQI{qz$F~{qFKmf%Y?hMbMYbt9Iw?v`Vp-0O|NC>L`{Lp?YN&E$h*vPo;np zHjg?h{phnhAM@b(t=iIe2Dgf6i?mE1?d{TJvNQhz5(sO!8lMhJa>UekHAL1AsWq}CgMSsX3-Y=%5?p5c!bduZ#bx|QA^C-mD zr;vjIx{1}IM^5-rl$AXQeYS`1ckk zNmN(3Te7&N$|^2pqO7!c&T2{3lM^j~apd1K6-m8B`v{fGrA##6 zPe1J!ExNs!w}R8PE88?dA%q+;6x(7e78&Mw@l_3XRN6fA_bdq5 zRN{Wk)+FjJELCY)%kM`@)%Cbb_FMP(JQbDW$$N30aouWS8?jew)09XWN^&WUnDaLU z&py5&(P&7!r&#^~nP_l#hjDDJH(Or6HDxR%_s^*4h;HDe^mz0opk-lPJ#X@MF<>Td zjq(ub)AczM;h=@Oh{3a-l+Fj#W{wC;L5G@~Cuq@SL@Pj!ZI)9X zmhSLWohFY)RQr#ggsCL0+%%D8Pyv;jbl$8gO5Ryz!_V^%x>$m>>_nP>l&wo}Ur3v;!zgAv6oiVyotRFO! zXYMHJP1(i0QOS3pf`XNqgk|}SXNxtS?2qP(#wpztYY(gGl&IHnD z{wiJJw6iq+MmR7BK{N_s{9;-rfHa2GC()}Y-4G0qFYqrp*?wmt)7?vFo+dW=OX#Jy z0b?K)Qk2*oADRLv&jf6X>o3#4bZyi`&jlrr+Yg&p)$XPY=H03IP1MRHgc)qAE-i4Of8p)Lz=Bk@TS0a5qJGh%-Q149HT9m=deYvm1wZH= z)ZMD4UUc{>5FR{WyMru3!OZ_n?9l5z`OqsxZINR+*)t93kKg$39-K0v!l)CVUEgtD zsT9C({5yxDGcxf91g-l!U`s`73eR~>h<3>(r8QCRSe!1L@KFn595A^~g_nF%|7m!g zUcmwl4zG}~Ac5&cp0W;HOlkTVT_3$L3lz934qE`!nfLUTj-4q(d1%}(=WB9o5i%up zs9hufp`L{3>`$w!$k%0c2|0VW6XJn5QJS{2$mwoUiPw)VUO6^{sKEDSfpzW#c3m^Z z@sQY=2Ph4HlJ0{2mDUVUf`jS?rivuSJ3BDcf(OV}k-k?f7~JPml|7D5aaAs5n9)vE0U>JX9B-p6$!G4<7Fb^BZ|Tn(iV8G2;Z z^Cy8ASO1bcks2L*!I@bD3_rtqV5oPYJvd*D=cP3P>4n#uRRut_|VMSI~kstQ`=6D21X~#RJW`wt?HKh zsD}!y6ecqJ;TWK3kVOO{E1!g2Tx$5Cf*$AT9yKh4p)-t$GgG{G`JKxj#6ziWnIf{- zrdKerG!tj_JvP>sc&7h22E(|UM5X6oYR??iSUoX;Q2|zr_JfV^6QlzfwwIZhb!dJN zc2D#ZQjvpF9qQFrU()D=8coxoagdHwI!W}q84OS+(Kvf@i9>rCGs$W=c!Q(x0ZUzI z|I)S3<*`V=6M1U9{v3#LG|)ZA%e!3m2o?K%hj6#@^5p|mFq1?)U%oeq`jf?d;7hb) zRO(Mu=RglNfx{S)s&B5p{Ni=5S+TsPZLlg?6?Bh}xnQyo1OG`!b| zWY%;p-mAD@e5H!bT3W#fUa7}EV$6zHk> zo4%k+QA}XH8AZ7LL<*G@t$`=es7^Xb9sEHZ1_yeyeAf)e5yz;TJq%M;Gkw|@|LyWM zUB=PRMzo})3bF#MUd#wzqU8BS1J2OATm4URcVGtX=Pk51n*5y34n7wGbDseptbx$@ zUjD2fh!3hp2C?zt$IWnTjz+Oau_N;JB!UgYQh){DuS?GUQf_>#%cuBH6r^l;iKssv zg+ig&=74&49&bAYv!>1ut%ES&CvW3NHx0-t5jfs|X^WZGGG5^1jE0j%%3d@(&rqdN zk7R7*rF(t>(bj-I^O>8k`gu61fLY~OsPlznscWRjNBE@mDAH%(9^ZzjJixCTjKbNK zt~(rFSQ}UzA(9k;J?(9k{%+*frVn2+5^u6w&w1jbnn^yZ(5GqB(xD#f@ceaMsD|#P zoi>3~3A*t?IyZN^ev;rx9{&LCoCB{=#!3Hmiak6G4F`pg9qEXaoPWPxr zPetEjD<*_E8;_Zz)>ZRDE^tkVjZlMFE9CEbF^Gw#X7J^4LgsyD|5meDYpk0d*FWj; z&JCx*wr~||pB;A#G#5>GKjlB3w}=9AgCc`EU!M!EBM_L-X_N~gB6EYZ_i2xu?^$hm zNr}=4gs$<>GuhX2>VOXMA57C_{Lxb@2zSiN9!C_NAxfd&2UVZvR*O|{hdnrXD-#>U zh8!VUCq{F=uLY{Shc}NSu;^R_1+rXG zW5EMN?#UIDCl^jh+jjQEkS69?-&Gm}4T6#p$z<{`szAj764u5nrlX&rj{-DwVlUt| zOMl|Zng>ZkU$F>z1)BLH@>K5UJ4sJcp{n<<$raTLIN`n(Ppxn78yoWsTdbD~pY)tkJm{!rayM1cI#Kr0z zqnkKzwlYrQc1@so!W}DW5~M1X63{WeUsDVsh|htj@lknK7e)=$e>eU5TNaFjkTA=;?!75=_2$|6VBcvMOaD0Ll8Tgp>FD8H04 z94JJ=F61B)zA%&EkF8*9POoXOsUbH&`UDkTgN%OgDqF1tYuOgf6vmC63hT1Nzg<_u3?}B~#pwutu``5*zrp1+>0<)A&wz{riaX)7- z($Mpoq0o62pmR@e{c?&vcVN&PtP8Xyc3{6+TGt|^PF>&BX{clu4xZV&bIsf*KnJ~g z)JuUhMb-K2E2cH>AzVNqh_|{Kttnf3oM~N-hSkypst;HoavBgdACed|NW{8BaFHsh z%j*DS2pjY&A7Oxc{whB>va?7!g%Y+^M9m9uCtWV!n<9X|3~J4fT_P2&V3Y;>^)eg{ z0uK%FIg<-?c6!1i43w!(ceINwln7jQijq0hEc(sdE%=5m-vrYV9i;uz26*1dyL#R< zFfMW{Hq4_nZ2akycSW7czbxh%jC8ja+aN!{%d;^aW^I?HlcM;tuOUaCKZ<81|Fugw zZXQJ^@nU4suQ!S~4S+nDnkp)r6x&3_OGfx<8OR(!;_9ls_^&nDy0xDk%;?w}TU-T; zey3T$0ev0M09uW%z4%=mddC?qsyI|eH@h1;cxPAowr;0Kb$IEv+mQyNy6)2BG~fly z^eaHa8EjCndT75olSV0!Rl#stf*Fp}%V95#l1L_rnO^$M^Hce=Xz$b%3@>B98bof6 z_W5}@w%SyqQRGi|%8^;q59fw))X7Jp-*Bz~b@I|vWYH=q^NgaJVb6tAB4Td8)#}qi zs$dDRy2hj0(T6$2QN(WVf=jEBsxME))wa?PLh=zi1}p2=qVJC@G?eXURf(h^V$GWw zZ5>r56-E?x-Ip;MN!xZP#IQ~wAI(!m#ko@VoxW+gi>3Q*=Dm{C;aLzV6tS*Ne(9)} zObV_)38eJqUi4zxBFXfyylQXq&sqTvlk_ie{zUCWFB=9y?ybW%l|aqPQMAH8;A8-j zZDy9gPPejKInFFPrYOOZco~MM-mBX5S{B}4vl3>or7U08@A?(t;Cf$-_w}blnvL$4 z^b(i@uZ`bKidc!(pyCw?MQTg2VjCPtc?gNMH~}p0R3ZQ`a(J$n?A}o5QR*N_r%-7=HLao;-(WqW<<$iJ<+PH8tDJ^67hu|&pg?XW5Y)^Qraj2-+M z=8d-8%QrrG(~no?OHH0pmToC-i)I@}@28&qGb`fFKKZeOC_XKcD=q)r&nT!<|1s+7 zJo4n#o)c#EUt97CmcLlOl71G4c@|ODs{qiIgqPT!dKI-JO6F4|bN6A~a0Us1Q?`Pd zXn~4IKLLSD@1-gVHnFBQ6qtv$?S8GTxC_@LcLMC#@InD!ijBXQ zzN!VPf$!ATIZaNKtq5PjR807K`61+EF@gh9;bumKBM>w}k z@>p-@vcdbR#>bj!^iMu2HJYH)_LCoY9_nP0#rsrMhz6zFgNz-^FSUi}`VHLCVyJx- z8J4IVq1#EJp)M7;gxW}|7UNl095Gb5$=l%$ZOLh@&N~)JJCk>bITWaRXkLKkgO+$( zL=th)G(a^suKY#~_-Vy4gbxQ{Ix+SJnQK>CG+(ip=EOgGX1n_6W)TA-4{ob0d^uCA zyg=4BXX4+@!Q2)m~# zdGW8+1#v1!MMVM1oss*VIp90aV`OhkxXMqO0jBMbBC^+Op`1Nr>6da1hYuwfqAp2f z_erJpatL^hEfWz$)^+JiW)t9?HHl*d=Y4Kvm@nhNrBJRgnqD?4l6U~y1*0igPVMri z&SoB}gjl{GT&RSNe?fN(Q4J)kr!{I4Px4P2=&$~$1R?!;Zkrc-hybuwxlwNJcsq)`cnPA`7!G2E<)@TR7Zru3IMqr%5J}s zh%e>Jq(&o$hjA?VNsCwXkUwcn1(Us|rk{H8cqcXsQw%*ya=smTZm?_BvT(kpUU?@Y zKua~z37t97gY^9Xw(Z&N^aN$~y0=GXxwF?wJpo>|+Gv>pH;RP)_$LwTXi=XZlfl-i9PZBpZk#V@p6rl<+B{ zp&ecotdkliR6E2x*f_M5`;n0yaJFHi0Vl&ZrBN%w=Ox%c948Qh6c6@IseFpwvy)`v zz{LS2;eVUGUYgnYc1-RXf>=PD6bvg|f#cbi;D8)vcZYQ(LdCkR(5ZGOJgor?2jQ*w__YWa!>B6}&LGpDl@fQe z>+tN^a^v^C?rv64(;{c774KSBINiJ!39j%{!W|?ZB%MZ%)+5Gq<2-N;Wu#JRE7@3| zGXV#*b4NUDy&jOL?fK%}&M-K>%X+dBj?_Aeb!sq!3F#)1xoH{_TID>NuNsxDs0sBRQRD7hKMYqwD4iu0V*&Z-lW?P{57;x+PNc53&Yu^>UhIC{`!MC>3_R>#zWQ`*yORnU)Bf)rMVz&LP*dtq^D?Ao#%Q&cy+PHvxt>|< zISvUu^K7Sv<3MAgY+T{R@V4w!%#Ex+;mN#svz(Q|18a+G{%XFl+)6kG4eiA@Pdze0 z(Uvp#!r&EpPi_R$oPOWvAC%6jW`P@v1Z=q)LxQWW?1l&-2Ahoueuy)vL#9`*#j%t= zEt~Iw_%cHXVZd1xS^(w0&Rv1u!|l-D)gKA6q>hmpxVfni{i!^03|b8WG)HgC4k#0o z9_Sa{HT>Fuc2j^-#NXyWU$Y^_Bcx9{3A}<|F6Bh1H(q5ltK?l)%1PuQb$bwj6}W9Y z+$?^6&Zz29odM4*()qy{s$PMpKj+CdDex{T5`NJ?_v(7$_w!J}EK@O7<3|ZeW$EGv zC@~dGM>L{TWy32Ldd%m-m5zwiYN`WuF7j;-e$+FMY0%3Gc8z039{L;8A#$qwS{MSk zISwcW?culaIHv+6ts53a#`;IhuW4-^l}n1spCvmCaJpuJev=Be>_4r%&;f(Q@A3I z2#t&a>1i~{rM%F-0?Fx#>=%B}Faq>i#1rI5eQS@#KprFqK;UuP`_(&(d4p!h4$$1;Yh%Qp+@``SNp%;>7vDC-AP0Lv=3x{a3 zA?LqrA@l%^cyVwl#XNVL&^saoSg_itNHwuaU)2By=GwRpn5Qk^nr?){YImttsWO|( zeuuI%F}7P@{i^T63;Z>IY>X#A)r5P;yEyL3qre3Q^&{eDYV5QV`xrpw;(lpw1>!mU zK7a2$k^AZL``$75R#3q87`aMnwIU)2#dSW0;q9xH9|sBvmlDu?mBNbHmFOg<5TZmf zXlW+{z{~g%i9wr?EXg1R@$$IVTi~KJ0cQdAdaFfRw$eW|YxzmTsxH$@^Hn8=seW+z zJzcX+=rtRWxajPkFoA+VzL5Y)mpCBdg_w9?ee?+U@5LJx*@%36%xrP)GUEU}EJ%+5 zUzqPNC7#{Jmz$4pZ|ONvO~R7~HB|eL;cw4mPQ0zdvv9=*ClVtH$}MEg7yysm;U)>5 z>6w~`$=xb}Gr|vVP|qtcuwP6y`WWz~<%`mNr-l`WXWYecCLdqaJY{WaoJ_cALD5AK z9lldnAGW&Y5&~^t+L-26!ZNQ}7JwC_{UJ+~#TG*r`X$Di7uK zUTecM;()BUy9Q3Z5cq?wK_(Zj5dc#vn;CzlowazQd(bL3s|9*YoEsU9QC-*6Q#{=R^X?c z{cG%wk1pQRYWMcXsR1D4TmSxvTj5GK;>q>9o$d+Kym>*NQtRT z9!H(!o6kIBwrdX0Q$j1)7iBE;Q%S(rC-y8@YhRA&(t@3omM9umBYHb>eut_ep%p$= zq_U^mIFyf_sDQX-JSvWbf76MVLQy??s=5RzXJ-Aj2ZKG0o@!5ni2i|?$nB^$MFZd> zx-wMua8va5+ZAARK+w>#^?J_sK}jPcxIHh6ODsfuGs^#6X`Cn|vg~L*8x4wPjrt@n zUD^IRj@`Y5TK1l%!o2+d2BxiRXWT4NR+F1vkJ4TU@x;X1Wxn8WCl;9;d=!}VQ*yi zs2?{|3M9^=tE=>qza5XlV}vrA!&z^qvjvs2x&a6!M2$96hT7E$k)~cpZ4QM3I7;hX z@UmLL_s$>=_>EogZ5Abjpy{j?5|ZnX3f^K`5Yo#+sHrKUSDYIA0u_`f+W^D!#6L6z zWK~6Y&4?W_)oyzI)9R6zt5+AG9gIs#I-UlN#NHU>t@hX1p0~*qY!Q0`LoqQ|!t%?Z zkh1;oNp9DpVL+>3fx~Hz>`wO-nU~_DRA-3hgPS1kBi@EzHjU7R`rX!c7gwI3Qqv2DY6X*F9) z2hprC7I)$}-JQIB5GkHisE%V51MK}PcI6rUH%gfXIEdmwOQCzA{qi82uwm&X zE6*yMP;*j0mN!ajeQxc#dm=LM8Cq;Df z_U5c-NP7tUy`<@ShS5ciD2NTQ;$Ox7sUZ^p4v}_q{_*RfEWqnQK+Pg-e&C{F(a*gs z@{%)FT)wcQcYV#b0H{OoSl=oDSd2TEFs>)UJ0F9)XY|n&n^Nb8oopb*i+}ECt^m{WJ+0kzFn6h?77-EsLal#P zPJOzEc2dF*VS~^t&<~kMNfv3r5TQ*HVy2Rt$Z@A-Zou`3&KWw)@s(3Hl5y)MYD7)m z+>LT0P)pAnY@xeJl?VSp^b9J-ksGN%c!+DCzK{UvU}1CPn+T3aQ}9?g#(*;>|2Jmz zGb(S=i>PF6(!JVE>-i~^J5BcfV5k}&<|D498lbIQX(Nb;!j-YNHNsm77YFo-mpa4z zEAo0`TAAZW^DiF#()bFS3e+_J|Dk8hxz7GHwfpVebQx4hco4CqsD5?@P7R@^Bgu7I*V0td{4%53miipp4)&(W2E_l2@mn)Rm#%H2(>9eUCOeILH1qSQ z@`7(t7~F>8#EqD7L&p-1-vaEO6`9H@TKh;U>_ zCF(c~jtbJ8jlVHKkt{x#$>XiwPR7cFCX4M0`8`6+(o++?)!9X_(ED1Uw~TOf)sz;X z!|XfXbn%a+loPK)==gSQe>g*;va=Vca)I30xV{I^YaFA3@#r0kHE@J(6?b*gIG~C_ zIMeCyAgTySEzv*+k5WQ?^5JpfQ_(-vZYF9k-EPiqh*8-_(K1Gnay{6oHizT_X!Vz) zB~ok>Ey`VKgTXnaWXo^-fJpcmS|`%UlnPU= zrb{SuGmP);61N6aL-ABgf>ESKOikR{l^HdoEPhi(|HSrsVU*iI*-vy3YELbmI}60A z%W-?0f{xZM=w^qdjK*Pa3s1f%JHVk81~QRy*`U>mRnE&WeJLdOS0^`$krF&>6*9IN zlPX=_Tirxs$w=b=$&AoCJi7LD5``93Q7BPKnr~_;guv8D;Haf#T@1)ci^bpfS}hZH zHR>V7qzn4A6Dmk6Bn!FHX(Cg3p8P94<+v092o>Q?$)0M%1{}*2$5yIps3y|RJ&UK7 zBTAC0z+@TCT0_H(qPZ3nAe_e(ceFUcaalD`LbElDHB>67Zbj{^T!)h{?jx8dNg6VG zhtJ_(89vkuC=MtVJ;eh~mibP=;YyYJ zRXr#$>3W+Q6}o9Zc|Ushuo;hbeE?uk>AH$L1&@#-2Z@9_Eec^vhMC@&oAXh5G*)HB z(EsxN>V@&z#d{_%TWnf@pnb;VJuo@%@Zk9}lAn6jO>r`LH^*7e(DYG}C^U$O1Upo6 z>35RW4>rtt5!Uo#0R-rHOsRzX7iFPnI!YVO8#$m-uAJf+gg<<-^yOgn0>eUA_XJ8# zX8KW7KiAKqBz}#vD)(g@`IoD+t@;W)T(777LEmJ=>ZWQF=UENk-;pTv1Boft3JoXp z$t8H&XzVhlbVSn(D zR*lorUX>Rn%!Qc40wp4yMgzQ67Y_@r(%}8U_6EYbmsb z^mB^6%j8Sdj!mZRG2(Zbytkc|CIDI?bW73!8JZC}L2pQ}0G;QwZn(CQTm>}2Gs89s z*+5sdP&HZ`jSbClb{80TC9s5g9%&<5Kqi9*Mn^=#6&S?2e)HKs)8E@Z=6T04y%=Jj z%$7R6$#kl>A@GUVcsS1fb&-<#oj*0*?qq^D}8Lwc@DK#yOJ;WgvrBoz|9DYACt8u*QE- zs@6>2Iv`EOtWC8K0=iWv85MXh1lee3cVSMa9(t>9ctC}@eGE#C2jQi(g#c-Njib^$ zGLPz5eKPDW&8SortEZ33VChWJD)c~&dv;qm=E7ScEkxhJ&T1`_wYeIbc|Og~>sFjU zAw+l@z?eA8PP<^Jt&P;=yb!V;-TleC5Z5CKG%C1&qN|`lC7&zSRPq^CUhs{Bt$jSr z9%-vb&piJ?jSCEHZzvil=~Dsy`C?4T^O9w;SsSqyyhY*NL4TH*z;Pl|DNjo&y;|>R zy`3)dBq&=51#zwV z06olbxmurTNu2@^9pDg^a35rp9zi^Ld-78esxiCad$l!&;a3U`B2Q~-f!E|2+;F6v zdVg;Z2BB{b&nRK4M@{~F>c@Cc0JWqpj+4Kvu0jA>If@16kp>>HKLXP2F`%Oo@D%O% zp_4#*mHgBT&bvuBHB82xlqoI1e^2R1T{gSo9hYzTfu-lN7jzchCKEnUqUcN1u<>k0 zr|l?OO%M9o#DS8Ya;7IGwxp+;qmL*5qn-=6gG%Qkd1n3lCWm<{UmSLX_w*jr1KHQw zm?65uH3}CtTjtMo#1V&qIlQ^B6s}42A4>gob~ujK%D^4aLCnS1{@l}5b&)Cpn2Nt% zh&tYu3^Rl9Bk)?CO@8qPSgd~ZNfXPJE+DGr<`>lBNAp#@FB(`k#yh1&?Hy0v*1uFt z$WSP0&BZf^%IIx0O)Pn^*j)9x$lM%G&jOW*lac75DaT|SThSBPXeXrk1O)3rhVp}d z<3=O()fL67yMnr4TEK(|bB$DPSgNw<~BGqX8gl*N8@SL{>;aZ@i zGgg|uLmEt5tWfi`=F)5c1`P{mZB3Kwqsitl)+*HbKpB_%nTGeZbz!E>hC>#~m2u>S zb@9Z@*Q`keJgL|(z1?C{p!MoUhv5;%BJTlJnDsA#t8|FI^x{jo-4Jm82;an+fjX;I zPvLsN7H4c)H3WTe8+Zi$GnwD1>ebeyM9Wq2E0z1j@E{~=qqvu1E2GE7x$kwR(rOr{ zBFFZCaI|LI3AflYPigT(cqw}M2M*}(l_~^m(uA;`o@LG&8=ZIyYUTgSUF@%mE9w+C z*XX6@#I;JKs~ozCYVQHz&5^J68(#Xgf$NDDh^AiBo>p{mTva;T<&Vrba+imKNh!FZ z#&ZN_&1{i4GusG`9`~~L<3_|%{D?JS9W?;eRI8P6?}JtJs$ov1L4m}n2q;EQTBonU zTN)t`T7PkT#}@V!acHxo)s*E4s&@+z*P9ew0I-I*XZCoQ$%Kd#LAi7iv4_G_t&y&l};vI_D9@$%Sg*pJZWkR{9P2U`w!&2oE{B z8bXB3>)~=nuO z0ge6{siGdE+*Q<9^bufuJ~G35V!g4VPbC}L6CsDRR-M4jEF53D14uTJm7@XPkM4=8 zS)0_RkWX+jVu6y(6$^Ab++sWul{gE$qBnuDD$E{$L#1+P{+wW?*q>6kOY#C>Zs7nt z;FUp~+HfWw8V{I?Nkbqkgix5om>o4j9c__!R zSNP_oXT6wngG_fMH3YEwBQk|z625D{6aavV9Zng7dRCgXD7RMfX?31nJl00l=`W*D9JL05 zRR5BglUW!T1+Y4PGH+_tw~T90E>jOa8XS!S$^=R<0#0ygxT`yZ(+HfPSE!<-h#V+n zLj9AsQg{K?zEWOri$tIj5{&~VHX9su?S=+VkrgldG2T; zIs82pH&1DEKpCREf}|B!vV>YzLGT70Mx!4O6U&MyR^daQVHP3*bhAfZzle{FN(l%FfZW(Wu-9Qd5GBY?%r)o+!rBGJ47fL}5uA=oMluo*8Nt(t{ z?8oAR%e?QqHMbkRA~fa=>Q#6LuGU7#M4v=8-;Z4DDa|Z&(jCVo4eW(t8&L0>7o3Cz3$jFb`B3JFk|-6_dxOesLQQBrfOMLMfANw} zh1;B!HG8Q={%t0ocWVEIQTHJb0o@3;&d^)*tkN9`P6fTAQLzFw(z>t%D$GLS@ zzygO6`tb0ek_Mz;mW`xP+*m#dQ{%k7XC|M)cuJ!VpF(U&tgK{Hgq55UV1_S#7h?rq zlp4_?>T_C)3kRow%pX6@Ac&? z)!pp**Dj-B_c;R4vS^v)u*ttpep8*UsM&N1(%ncbXMT8wg1clFV3@5c=?xSX4T}80 zwBY9NYL0ufjvEq7a&PGjqggZuGyjE~RF*G9N``h+#zl^#3p{gRldqYaVv zL#-v{^M;{MNeoSGL%&BAL;YAJq7NIUrl16YyWAgPYc%O6!B53(__b0xH|j@ z*(sjr=6G8V8y6%9kTpRC{W2vLkt>j+_;yY9#QiL;D$yb^5_!oQ&~ne1SPR8eVov~Y z%rAG_Y+nll<2i;nnDUgvcZI5|2&!&e#`^TT7YM#5yDEd{vCc?;0ci^=J#ppR3V!&; zSrjYHv_PJ{-!A@%+kp^yR7e31c=M)O5QH~=N$7xSF^`olEx~ooihI51vwlzUnqQaV zHE5A|^EC8@>6~bbA;bEYm*4Xpl9vS5Qnso*SmMS6W5KC!@<-8wgFw*F&)9Vw8x_Sf zbg=JRy$;AWCt3LJ+Nx`65)0Q-db?DlWz>wNR&Nd;u!sN}=7!so$0A(mHWB&LoKK5$ zehr9RtH#!+;>sTG3<5f%jb-kvum zq?#>xbt0aho7|!)R7O&f!!JkeyJ@MlLH|7D?C>m=Yau=hi4k>gD|!Ixgp z@MhH*y7b+A^m5=JN>>h%1wRZyB#%wa5?CRV3u`;%lyVA4rtx$zFcIflA!Fco5epPq zPC->Y<)!f>wz}cV!c`io#b6(Qci@CKe;J?!#t>Uu49vp!D)X4OVsy5BVG z<3ftdmzx_A0HWff-ic8C{~3Fi=18yWT=N%r($Nt;>H%r$uu3M2b6t2kaR&=GBT-eN zSQME+CV&JInNVJU0(jUVk)qqOZb!C)vMjr$lIfOsPADvi6eaN@M>tv24q(uA&tIfw!>zi;og*Iw6mtz{f8(U(wr3b~s5k(DQDNKl3;9AZ4<%V0mes_vG? zz?mFKS;TsTX5dI5V6~OPK6*;dMmhW;!a5`8UkXqRy$#aiIf;s-aP(A6lgdiH|MSjA z36`iP@ufLGp>NR5X>Dzk8E$Z(P8N8OaZ#mQo;y(MYbX8l;&-FUZ~ zFlezNie7)22=y}M;viOWef`z}R~E@JaR zUgbv24|jTLmD6cg#H0LJV~za~I=O1{o!;aAf8&c@F!F1!CVB;?aqLjsvbMUhsH3B@ zjAA=B;1}%=YICm%d8_g+7lk#v!FdQ*fZ~C78vrw&TfyVu;xo!sT1Xmph zVsA>s?FE)==E2ecaT-kgwd?QuGn|1+a^f7QyXA7~v9d?6KJW_`7MuQXbFe=qmKASC z6FUD?DkqBoWLbsr)mcFA_2sUC9HxODPU7B>FHk^8y9ePv3b}^al%a;*iHZ%qboJFN zY4LT;6yGu0DJBexpejxx@Ag#^_CD+|KtO2|e)^r>^kJG9tjj-poG39fwt21FUkH;G0~6pC@)JIG=n#nlhDlA$T7O= z`7e4Q0CSW3nym*~iKHUzAJ>fAjV3cWcs7tP>zVE8yZZz&6^l;FP@a(6Qv^`kp)p75nDe<3Ewh|9TWS{ypZNh3o16-Z6};|X~5KVlL%aSh1{J-ISTAW_u}iXi@{Wp zr2iuIvsO-OSHJ3>6!^8q_g(!#Pm5r~zm*0_M`aouxSdl>61ric3U)FpI{7lf22Ur# zoHXPJW`Whc2wGefI~#DCH+c0AQWw5n$I}lkryo?;ism_~;ia{^)AzzUf2;$kq$e3T z?_|`O{@PmvBc};?`uoL6fNTVm^fJymC;)HgUfBir2uSIX7e!hIE$ydDBkDN^-RzPR zofFb{6b>NrNKAfzOXt85Re262^71Oc=J&VMDOQhMT3t4;jq=%$e!rLL9_;Cm?or1h zlcKU2@fv!fR1<1h(biH3OFs%f^R>@dFP_q!0er>uJ^dS({O!D&$q(ceh_ZUWg53ED zXJKfq6UeJ%iI6k2f@AOPjh$Fc3BeY{aYX!MOa zh?5&s*qzau=0_Qec-bX2wO3j|^Z{_CCKm7CL-T1%L=NBcJq4N<3R`l(?=H5>OfH~E zY8@S1A02rHyQNNmI9c#pKTwl&+Z}T?>z=u6Lb;_Cr*{2c!|IF_M!8D~~ zdAdPrv&!KX4_Z-svWohY3@NUr@83e1;yxbudSbK?#Vg3B`OP~UJH9h>#GW#otUx)R z3zJ`e4s0<_a5AZorR~JXi1L}|)O3I_3gwZ@AwB$q-JP26V`>sXwcFGzS5Ly_!Sg}we;zkqsxurLwcQ70r` z(W7@u!7mN!-Yp~G;Vl(0&G5GL;E-`B)P%k;xr{6^6VeEa`UG~0^1vhZY8JCQgsKlP z&N?ac#ws*peR>kJ@brC*31Ys^@DXjR^#Kv!Us`LJV#~}97P3fZ&2=4KG%3`{x=L34 z+WwSy6cp0ff`L`+4Wju=AMzGSc=P6K$vx!d{ooe;WvbzE_2cWeMUO{6%C;4%rYVUQ zuM)9c$JM=1`?eu9G#W>og+~*v#O?(7p4U-5Kgm!PlmsIUt6V~9a+wjM;;I|Z=>Zq~ zpAPTQ!||s-)-Tw4My82?R4&>bWs+G?Okz*&ISdNd!lATI0K(|+?9EbB2=B_U+j*{9 zC9#$?ymtOiUD-KvHE;;5yteP@g@OV6Z&s;w~ewK&v*Z=?j^ZC-j zuEI9KEe|6V^bE7vOzdL&FyyUt4?OB*UAtJ_8*VTeu8MAzxg>`tx^5KhfPH1}mf0$- zv{a7yn{+G0==49RX@{}JBllnSON2hrZqcV@oWOuOI)9uQA&foK$M+tH7nHLtQPJz~ zdM%2XMf?mAFRU7YcdZ8%0z>*Se>qW)ApfI8qP*slP9}4O_wX+_s@=U#*?#4^+tkhY z6TRtA{nJWq?0}rnz;9x+#0ll1N75y@ub#wbgT`yYs%pS$V4DxAsKR#CB?SE9wk{lWz$1Mp_`_$@BqUY$G|u2#2(C1I)&tr^XW=7dRvZi#l52zK>G1pXXcbTVDlB$P230S+Sr zDI#)Yf8(75XjHk;bSEG;mhJU-eJw#0rMGrgNoa5?{$7uI zmBFbucJ(l>CCwJ~AZ{&LM(aZlVf6>8d>GTq3exy7+cZ+@rB2|+7g+S=#V8}|#;_NQ z^WQ(ZDY6K~pvOml1f@jt9$sR`<@yF{<|G*yocjiVoh48jO42iEpDTGZg z35;+bNl?{ny^tnF9?j1$n$Z5|kgd78&<-;fIS=Z5|SM zh(f=ce!=jKPx>~zmqLt0w<5%$!w7R1sV5s;pVc@SB|`OXvP#1&Lexxgg+zSD#5>h; z*8d_;4M(Jeq6t-Nk_B{f_iNG~A1*E85pmQix=y3m^X8M!=y?kZX^EAv4hidlo7IsDq`5n`<|L@hvcs&3btmdJsyF;nLRFK`mg)Xr*_c?0BT z4TudHKamWmFptpwpxtYFgYMbLbn_5;LE+K&iyEH3bCP&C@D0HfXpO6nwcNXnwL6QU zDmZQZN$$@e?1rc_4tYO)NVLa~CLqLMLzQ>xTXd)`ZEMi!kt+T4tw&jVe4D0PK5j5G zdx_Y?hQf6=_cFpqAIb6a{_X8j_(jr^{;egQp;n%GKB(NZP6{QWOc8%woE-=+ZnGMXFzOWp(zXF7y;`f?p`(2$s&aiVzC~Q!2_9 zoZ$`Q9iGRND6p%0Eq*vD2N52Yn9mu2r>y|Nda<-9=v|xx>@YXqEy2#b!l&|OObT4( z>{KUbPf6{xPabbCRVSXvT98oE(W%wH7DB$U&NaFEXACNZC8M7w7ET?8QL?(`0rV^+ z6GB4|0%(w4MA#;2Wt5DszjCGeN&;b}@ikx0NW~^JjIh!WES`^L|0IPW&4dD1vKLWc z1ZD8(dgN;KVR?BqwmvE$L*CQ3P3agBbGi)jM{V@NG=K7TWnaLp;{42 z(L9@(iKtIeLe(EQWe}ndU(FCoPiFeLMvM%fzMInldAoXA`m>Y|u{!PI=IhUx0Q*D| zvd#nh5;rPN@c;Sw|ND)U9JNQbztTi{X4T?d^4Q6F)(680s>_TT3$K|3`j}V^~eS?tbVk5S9fcLhXyrV{#l3PxXBE*Xdvm&JDxRe8NRmlp5C-X4E6s z7B?a*#szp3ayxLGdKd$3O@U=Hcxi~=p4N6-WDY8fXKfOr`CvnwM)k`O*LE+NN|8AX zT;gbVf_SRXy{B(Ir#E42dU96>}>vt$Osm z)dinyM0elUsh<)NPRUB)Qv__98~Su(eamlRz)4D*Wn!nUg_JQYDBN#M7)hlllLlKS zV#?apVn}xsU!>O~nm$Z87CB?Mgay9w1&Dh6_IGL>!RTWf84t9p!AsZGnOF@~@E6GyM*_ z>B_^n*crMSUil>fJO3!jk>Y_i`u(g_E;UG6q!5)_*fxP4?p3wt)%`&!7U+Hbrk_TQ zPo}Goq8y1HKxduEdhgbvDR4At-$pibHmLNZ09We;4@08JY&u6)co%w=@}s=AOe4~s z$!E<5Q7?7EEIcf=s8nF(-j7iRTCE#%o8)v{Eid9UwuXVH`ogdkCeWS-7D2#UzBc29NIsoQaS~DNG8_gXcJJV5i6!d zB19*p>lP4?H2-KGbQ+9II==H@Go6q2pfjp-OcYhCjiwZQbr^|lOn;_z&NrK8#*BOJ zP!DM`wE7{@NHcm}yqrcu{=z($N2F$tAL&uMgRq}4o054DGl|!xT9UMlNgtW@f>D^4 zVh*p2C5@tIb2#);ySKieb>QPta;z70r~h65X&t+XB{x*>N3W&zyVKqET_ADA++g&q zw*sQeX8<>bMA09l@pe|~^&#rWl#c|+F#L+#e2QhTiZaW*cj^Lz^tzAWu>7GNq&rS^ z6u>Bn58;K>2x{q`GliRgZofO%a9(c z8z8{~7(m^1#1p;6!lQZ7rXNNG*Q?OX#!{{&6u_|-JTE+bL`%aOk48Ru)d5+ScoPiK z+NTR>saCr6)KiulBqhfQUwdJKIg*v0?5s8Yb~0U9Av0kxO11itJl{ zGppw~{SP!St2Se3obQaPVbiHVXe1-=xj*?of1eh9G?4M7q|XtBfoMR#v^6@8ORwq# z|LUl7o&k;#;pduB!LM{5KR9XqMIYW;>zoCa6c320lExwrQ$vg{fw+E4JIFDj)#I1A z8s$!D6UniQWCVXwXfg#+ztPMEFK}(v8IZ9?DZbV9JE>Gi7Ix#O-zgz3a)U*9VYXoz zc=ZjBeG>n$t8v;NRHYznO>;~m!agBpRI6Eg~vsqPZhs3 zJ-&-HFI9a+(}`N-lTn<0 z6hi@YMi=yzq}y`_RTZ0%0Ziasf5my!gSB`-6lPMU714IW2aKPWQhUy2!A00SHdQn$ z_(`7-Incuv7BfP$y0Igcd7bWlCg{1`EPd#t6&`p)Ob~aI%j6}@wJtEVVp*jilY!Y4 z7_mDUWe>5@7?_(^s_s4h5ml2G4K|);MHa&mMma4R1x%`te#GJhTdi>!b{@`^I8tHX%F_HW>^=T}x)!%yZ89nLq{GY#}8r9O$u|LS&qB`j6 zH{%u51jTG~k0T_hc&(2LnE50)E3{@NCc@_Zp!mdFn8YQxOzv@<5oxeavQH9IhR;^+JI*O4 zqlwK3bsYWVYxIA@hME^4T?mqHSVa(aQ!XH;8SQs73LygpV7$qo+6(ox(B_*kK!Nfg z1jrqa!#ksCOTZAG%V-YzWf-_@f+&N8XObLbq+j7W6-UQcRljJO=nTT3IM|Cw6|F=9 z=-1;u>d3lO{pq7h#?tC1Sqq9_p(-Hg=O`E-No^@V*EXDYKYc3(v<^z5vxFP3zpLkd z`lCl5n??GabIbvrsJ$xiQ7&C$Zr-F_N0gHl;`g$dUV2R;wIP*Z?z~s8_icn}@{tBt zQ0C}=@@b+RRpTh2(u&F_SxhB#hQNREfLp`m4cT!v(?^a z5?B~vq2BIgsfWGV-W!74UIlcMuL_JVCeQse|J=+F5}rG(++bTk81Jb z3Fa_S&&27Kz5@jRx##}fXmD=YE1?*jxXjshdWtU2 zxGCCd=n*QM%N23At4b`T`xIupArckq4j@2yn z&CIh%y||2->WdfAx8F&-nqJM;aWG1xr|G?MI7|bJ+fO21fBfug{_tf@7 zwW&vBq)_@WnD(WCraGScm549}T+A-F$)xDMeetOqCQDR%Aj50itvwq7b%ycco1Epb zm^5A-=R|`czvV8ueX?gd9DISiE>wt_d=v}$JjWV#<_o(-pi1eC5Yo(Dj>bS(`rM?r zH7a!0?n9s^O+alOgphyr<}*5?_L1MQRoa1bu|AV%P!jV@Q!(3@Mg+CtJa*j7=f?It z4%Q-An$8hADM`LZaNiVEowS#tv0uf{W$c(2aefi9N+WoSActVsF{4ax7N{^?I-=GoYxNE^)!J*N?kNcZ;(RG7h5UBivknZUr(VVU($=6 zyC}8XB>og)5T*?GFKP=)rIT8$=#4P}A)y2yAWPg_Wei^FM% zIHY}#5-gqF^I%!?62?qN$|kp`F@>|KE2!n7|ACK&@Y)C8>=n_|qMy9cpzC|pw3XlfZgN($XRD*Y=0#YPrZF|@7nKZgw@?fc7+TTtR@GJ%vuTdN_e>{PDy%FY z3i=*NDMsAp;_AWxQe-e&JwiMVvOW3$Hz7HOY*)cVK+(faPq}4n*Oru(5DAA8hgCmY z_{VQtijIi>2dLkfFHGDtjQ}B#M~B);FEi{*C~jbBnff}HoXNw98H&0bt;-l!^~z2C zqp|&>sen|`sgR%YA+IB|tbpdq)Oup*sW8Z+W##W!$m!TTJR}ysVexBZc|&VFDEUVb_HcGI@w+qweuT9av$fn6P7 znH~(i33cUDI}da%eN>Gxt~Z9Uw@h~tq!@vkWN`u^X#;|Yr9{tH8|Os#qd2%)rR+X! z6}x1dZtPl3E#9Fl&$skcZ6`$>4kl6q16qECmv$#Au0`|fum8_{00~_it7WFpuUA3R zfp8FhD9ObNHhl~)b23O!r0W^hV@`^W`R23F#|cq~BdQXGln1Ai;g&$eI5riJ_$-RW zn2bQ<1{3h0b4UPY9HvtDB!YuCUwMB{8($}G=oVPJRk zS^QU6M1Q%%T&m<>ga|ED#Nj(TUYOjh^7_N+LoX_<&5ugI9{Cjc^_5zjF=3{el_W)@ zI7SgFl@3fR|D~6ov2J7J21kvnQB8{JKXS~;yn+dZ>o8wXf{HQ*$gJ2V+QwC$`I8qG3)_JrTODM0XG|)@~{V>1t;?|q{t$~7;S~~fT@cyYvBdeP6rKv+>^#6-^c=FwR z{IDhlfWM@EAQ_*gc~au0%|!n4ZP31@%ff~54Yur9;l6-mvhBpV)1OtCJ6(iI6L$ts z_(h8NFg!GwjlTVPRn7@a2jJ*`f6nK7g2M-lQ1{tfdR}-?vb%yxG1q9;Ch6>Dl9Hkc zo!>_wNl}rUceof0cC(2*A{4D9BMnIrA*{XfDsq+tY5a&SP>;0f&UGr0kmZYq7w{mT ztgVkCx^43Ir|(1nl(uAQb*js;6_q4_kkeBh%E-$trq&>J&z%@5XZoGiJRhm=$UZ;$a=^w3=p>8C)f*;~s@0={vN)9& z=lw88UcY(M92oO!y!z6dC`AYtFAoFmC4Z|=(8)TOsu!!J*YNmksxO8%wM8CdQ6Oo&D4DXp6Z;I?+aA(4>mE-aL3gpj~9EO~y^QFK4{ zbp)^|$5xzt^^;O56qOjC=r=;uo0MCvGrf!HIS#Ta*5gehBB+}8Kd2aa`|1OIcKR+6 zNs|b3A{%pUIQ)Y)q=!W{39ujepFSSWR}Da564$s1?E8tgv>H-mGJf3PE?94XRsKwv zZbti0A}rs5z_C#TpD@Ao%$6j zGiO82g90dR(C@!uVF$y9XBo;64~w}UVlBtP!oUClq3bI$EOgszoQxK~;HH6jG1`L| zKQ&i;n^HoQiP7C|)}ZYodvp#gws}V?jX1C=L!b{&X&g|3Gs5$sf@wIFG8zoWY}V0w z7YwZS3bxFse3p!*p>Gj73K;vO#`(@3J=7o6ZrYFAIakF%GNWu@B+ws2``@~iXZ3&S zaIfF+S2&DwA|>V?CJ&^5yZ#1I-A}GQ)OwakC=Em7_lnLTsPgDzJ$JuI6ZGY*`2sa6 zF`L296tqwuCDraF6!c9Qvb?Ge8KH}z-i;_-ZVZR5i#2RENw&@TT=x&e6XoKxCiGW` zaumLVVXgwQpe%WC{rCDG>KO2?;pIglxxd^>D_J(K>@M?5lZZMU-LgVUrk(#HV>%Fp zUWMqXQY~}(3(WE8Cw!Q4s?t+kC00?)b^YUl-Bl+Ve?wig3W#{1E;zZlf5hCPg3$jA z3jV1$rDziM9N_5~Jp2ppa5&6PPa~00Z8EJ$m+rZYRG?) zijh9Be!R1~BX35It&ZU46K3vy09vRjuGTc>2{vhMVgv6f>FKS)KctNOkE@4}E-p7Q zubE&%jn>H|$bw-ie`py5QGUWlL7X-mx41h<2n~COleM*-5~eb_<~se_JhatrQ$$fb zW)Ya@y=T*ZlLq~?WMHGB7oQTR3((cgl(HfLY)-!?h$f(Qfz;LU|d!?E?w$GXh_&i}XorOi$~3aFNnxfrxuy_R^La zOEpvFJd)6QAjLS*>VX_%CmiW3LXY`8RL3#XD}1Tj{-5{+_lZ1BNr*;BC9mc%09ybh zJBivL8Rln=_NKomZRa+CRvgrz5D)wJpXT|iAIB%jtwI**=Q|vM;1wpBnrB-sVcE#P z4^}SoYR)lx{QWkC8`+73!$nl!)9ItHJr!LdROwPDQVeU4ptY7#<9^o*n6KjNH5fL< zvC0S>Es{rZ{k2^D-|6)Ia@eZ^#-+(i0oj9I>Wt+<2KgE!So6`Hqo`T*Ge#XyDPj%{ zE-7hC+4d4-W-AP^14S6L#33O3Ff@aMskFR`8TYGPH#Ow+{h-DI(8MgMg*HhoQ>NY~ zUnv~-N8x#Vt?xhO&jnPw8=>(Hm5Q5Er!;!-cUN`UsVl2??_+9|(U&T?P-VLEBW{Ua)ZgiBdth(gC*nPixy-5K8M=k8nFQ$VmOJnjg((?ZLyNcL*nf=M&EbaFB)|nYpXo_K${Uh~a8UR=DGBIiq z13$`eLW3@00U%?84UIvObxmWNaFw(b)q-4osI$NMYGj~&nd zW6c-nl4-OvIKGH7H^L1W5{q(De_c=pNii`Bay&aBtZ!#p2TcT(A)<$!j-D-8{wrLzFI(nM5Z0mUYD+S=a_#iq))VT2GaVy*%;x}PXh7dInSjPsOuKR7C@1j)~!SrKTK)C`pq$MTD~lCMN6pT{$+ z&j+arEsZwTLC*IElLSS9eydF}yUFNlOisjo!hrF&<972;69UndGv02ODXX#KyAA&x zIUGS)`bK@C7jl=XU3b8Y&D!4J&m;b;rrV}F1_}0i=mwots)1+)G4nmJhXyCT03aR- zkEVNyDW24cTErChpM6z?g@aTM`sQL_ggG-WLM6FcE0-OV{IEvN zClS25`AklJBluI=88aeMJEw#9L##kc6s0Xo#gpdKH}}6kGPcj#5O>hNofAPUc%rm(e&_)l-{dPdmH$bUk#39OH1os zjz;a}YkH}n@o5CYJBw(uYTEo^*^~aUi4$aYZ&Q&cBdmrk5gy@riX3E{6DrW?6X*fg z6HqRdX}9ZmEVWJG*?u5d@IGi^+*`kFdxj-Ab^^WheWl&(lkPlJ|ik_H-CFfS{z0 zBhrS?#pH}StyPv(C*H<%D&_8d_;?e=i3#EcAVLeYtfzI4*$UJOI0aFPsCpUeyycd6 z1UhQkWIg&EcvD!s5mBH;i??)G!RlEa#)ee@5^TCQh+wih(`t3psSn#MI;9&PKP55= z&_Y>95@SiD4855!g8V9~5WNg_*w8LQ;wxB@AO}?>xprUCf&y|`TU^kG2{SAFc9?y2 z`5@Idm{f23`A^7364%98siYT92vT@?q>CMngbg zSPckW`B7Y?PM?|*eSvyDnO}9Knw4IAsmm=Ri%=`TiF=%?kkF6c8sf4@j(zq?5do>r zr>(fr&yrN7^~>}Z8N1s;n9$ZeeTY5$%8JnX>@73;DSc!rg6?e4iRi%Q# z4W>aD#k~bv{&6N4D07;eBunDIasqoPiMmncjV%qWC3s2+v1N|3jCXHL9DS}E1h z7O-qT`h_kr6I18v2l6TO4@3wYfim&Z2uTJd$WeDoiJp4*_>Z(?P8?S3Na*2@ba?;i zi@6;|Tf3ti8WZ&<1W598^WlUMBqX)&t0h#LY;grx{F;?Xtz}y1ln%2y+jkbd_BO$g zT3imiK^CsRxa?R#*ZeWoB5*@=w0#V|WJxch4K|%bBGWVjx^R;I9d4$6 zTYx16>>1^%aRm_DWcC6ALF2QK3eP^D-5$^372j|QA<}qNR5CmC(7`O@Z0ieJI=ReR zvx*{!lK{0-vrnpi_ND=l|42VdP0;Jc;%QsWWqJv4KOk2udm@h?pR}}%Qdgd0hpS*h zva*0R;4j8KdC^NXb)?Am)dwaHko2mo8nSTwdo+ES0ss7SK``|dq6Wo(-5Jg>CEO3c zqTR~42*kp2+Ak^*d$~0x^_lBd7%qxfq{rBgY01&iY?Goikq-Ggx2VT1MuO5`T@qX^ zU|`%tfN-`&bdBX<6nUil5H`aP%@OfO%y?M|aXd$l9+qMq-m%unZlKYhc-Dvm+^nSY zWjFN*es{H}RR6flxbDZCB@-SHZ;gE}VD4*?<5p=e7=|XBS{X#oh~*7`CzMREITp+O8Hg(CDXr~scDVt z#N{z(u1`|3Bfyo+%yAM}DHIP*-;+D-|y_to#ygpYs})fkD# zCyxv)BaQtHnjYxpFwU(^<$@EQy#ECgfbq;a7%p|{e(Et0ccu9c-l;F$K?YC5OBa&q zw>l7RN3DP!g@Z@E8e30~>+I7X`!Wz$uLA0qsDzAAVA6#O(RxB#giYVv*U0Evx$Ne&T_=T|p@hA)qsUy&UP?%$eY!6QpBT+BdPvGsk>U zNpz*pY)hE)jQ}372;6cAZA?xXqcfJ zqucU);Wufct3UY2IP3sWm{^KajVD0Q^}yF(zmYEbHaQ({lHMlVX{$hK1j=gl>6Ohg zz>)(%ROx-AYGz&i+%1gI-&jLVg5f5%bR7tq=QSVf<{-}<9-M6d(&zLzc-O0pGz2zX z$bg%Fk)5d3+$wzgIvhIP7rg0W9I|(YG}Vl5G<5bu)2NTlFq191#sgWd>im`ShjZhK*9!-56(Y*OB7+qUm6fse^rvGnDq2;Ijz+8OjCWjHH)h)wnOH<^4bmG zy_HTlEr5H1Ct$53iycC^Xma(U(lfSZ^XEWWP!g?QPTv_v4{RaQ4nKx9TGRa^F-0xqbVX&jU7s`dSsz-n9 zW{m6w{B7#A^jngO-}9G3t)(~wGog52%4`M`)$pqV{n?ce96>Duhv~tG(k<8TU7Vx_ z9u3l%C2f{JZXtXnWx0#v(53+s#cNIp!0;&WrBwd$0xQgT6u3&DG_CcsSVhMs80D$8d6%9b_^Dk{g6xaN9U=% z@WCt*fZ0oneSdNwYwL3XL}L=*??l6;4jzLgL>SDqsF^->df)^DewL=YON9NB<`!A_ zfwQSNUdY@hZ@wC|6}YSx;&6d2q#0i%(J+0lOprbAMD@$cw)r~jyCAacipWK9zS&70 z;>>kpj0^&vh^)hJ_}p+~9eUlp)cnyW|kk5k(3T%2|mRp0G5L+Wy)il#7TtoG!BXv?9!X%IG_`jNv_hppn_l2L+TIuX_b^t zH(Y(L8nb{>qU(D2LZDo}dHsFA@?K|noPkShQtiXi9vMaek!o&TIT~}MG9mHp6}VzG z0}I^y^jx6kKEo$VO2kLETOx!mAJgmk6f$X?OoDGEMqr?f?#L5QJmDom+*XFGsBYn( zr7T*`xsq944M9W|*G49Hr!lt{_-9y#@a`a7H7uDySk5qSY7a8xYALV90(tTRI;YgZ z8+_?mZ?M$#hCu1I0z3jCb|#rHDZxHf2x?)I^EgqFR-;PwD+kH5MKp03&MtK-3J_H^QFc;fM-=)e z_WeSY;aCo;CrG@G?y}sG+|NCrq~6cytFop{haL34*A#QC^ae>!4@VaWoV0o{v^xL# zqr~G&>6Ydnm&C9#sj1Nrg>^t^dyu~+$xs6T$E~Y zA(dVmYUAGf)+%0rF7g+aFh|U5>s{=1U)aw?8+#rtVKfWSMfn)-zy|e-sNa=n%)=sa zRSm;-2iqhpOfpZHT{`w|Cu7%y5r>IB#FG_pH(L<^d%m<+g|K=IBCLKw+B%kLM%3u_ z5o;z`G>X3HMAp5Z(fDF7=g{M|$_CT!hU!_R-4m8Wz$Am>5t#qkHQ>U$S6eL#d?1*< zN-NBoPKta8Ok}f8MNx3F*;hzY6R0U|glq19B9$SZX9>3+_)JvS%U66Ry9DOF2TCz& z!9y)Pq0l}JH_AFt{G|yS^JD#hqt@l6uG&$KD*#&e=nXw`O>tezE`3*E9j^@J4lk?i z=9(OQIT2;Rh=k!}i3V6T-87>J@?q}iO}G9xzxv+vk@go_-L@;56ff zSYmW|t(7X#*+qbOuZ#MFfKB---{v|TR39+%Vi*8Xcw*jt`59eKVhz8qjY3+F0+qKj zP_2U>)L`IR4Os|;x) zU5lh}a5{y-rPgoy8%!HBs+txEr4imh_cZ0eBEdo}tcOQZJcaENgOm)Ypa}Sbzo){F zc^Y^~)Rfn1ZfTJdbg~z@1>_=>ird7SORj%-{X<>k?aj>{y_H}Eqf$){{=p0j{{z<( zbJ1%bQFIeg9zFdKepiy+!Zd;@5gzt8V|+#m53Y!eYO(4QFNwU!{h}HdHlwQ3%qA?P z1Vu(^J;9uCmrrf3M6M$WR@7&|#zZkQN%Lr5AbQia9=EbB#lCgKxa1h3=%aq|0~xb?0he^?)v_GlRaxxC8y?XcV!JIYwca#?){X{|tE@KQx66 z66LnTuc#RrjKU(OIqLaNjAG)aS6|5zuXXTFjIJ&7Kifd(>s8+EL;)S2UTEmS>y);x z5=AksaI*XpsBo`JfOdV9Lk@JL)_(Ju8`?NHogzNbo~@AcYn$`8eVVBFgi-C5yg6@O z-tkk{XX9XTHkqInee!O;{<;nW{*3V3^Pl@%@Y-KrfB)(uA27bQ#_}SNQx6dEBZ=zg zVj^W^kknh^&F8ye^#N*q^Dc)p64y~kjRglmx_-EeIwg%zJk z(_jIIL8Aa0L18aT*B+}&Z>D928X@S>+&c!7`6XwANajJCNLz}XS_estQn0q+Pd_m3-|;0n z5$f2>U7my$M(_7(Ds8iOf0Dpht?0m@Ecilg>FJxViYu_R>(}T>6iRj%jA@#P43XuS zGaIp8x~h4FS`w(1s5FGnlIf4pv}U*`p72S^)-T**r4<2ACHnNq7%kzi>}6Z4vo6IJ zj?`jJkEg%XCse$fJ6{+?9D|D9h!1hDk;N#X*D4`};ukUS5;QsX_fjIgGy$>m2VozQ zCV#9R4N4t$qe}$8E#*|6ahsR1lpOd7=uz#TP9OcNevN@Mz z-bFGRm#{#+)L9wKNyN0a)X+W1qgbQP?6?)S73H#}xd|l2H_~4^r2u&t+>G4B<@SnJ zTG5*mZpJUxYnQe+*MlA^!op}Xqi_@IT=#plzsnqj%!$*&$^>g%@5s}DnPj`B`AZy0 z(|Zn^wyOTHHs9Avr|;JqJFNQHMO+cBefn`pClWfXRwlVRGub7v_~qXZfK4CRh+F%xyBnLeRDNm`gJ%`p~e@VYQ`p~h*0wx#W_)vA={xYbHYw=vhy z+lBk{BL+B?y#n94<-4-AS!i4QYvNd=SyJ~`k)vabSjnd8gJ*+UArL+{u)xP}G8L^_ z&!jgIp%9^nHWg?13L_&0gdp^9z6RUc0r=Ss+epx*>+k6;3$!=NPY~fGrBnqB$on(@ ziL5Fn?5H&VV+(jU-1bw2Z2R|1?P)t80ijku?AI!B6#_= z`}+K?ThH-7?FS|yyRGbg(Cp*Gh(!>8iOh|_dDg6oZ4QxEC?%1jbri?8r;2ikju)Hy zK8cr9d&DaN{N%svFG{~~(8@F=iB+kIL6fARj}dl2#RsCSgf0l^gEizO#sAuaLN3*D zmKlakleM6T=2qm@xu&TPTYK2fTu+k|*MRh=pScXiT0bJLS#^j{8lI#Xzg%ZP$Z1#k zp*~BI9%g1dZaMsP38v)t)-K{1z72K3wY-rmsXb=W-D$fI$2RS zJ@YGBWT`3S*I!SLfRMGpN8_j!v+hdIyL#@xdj0Tu50QsMCLl&k-12zW}&hnXtf94>|#H*%>69ew{HLH7>lvhP3W7d>RcWexYt`V0_)Sseh5YiTcnlHz6{fUB^aWmPDSOF2byny%`4 z45T7g>z{h1vZcKn#(ztQP%na7gd!RTg!gP^cA#de1{zeG(&?o24T_ z#b+LweS%TfpQrC&M8rWinD#5i(u|>q&t8hC5-6L!jOa!Af9+jRD*uy;r8$832q9d` zRm>@YG-Wxy8%O}!diZbHEkqekT8>312P6jwB>)c0jDydR4Kw$s);$k#_-?r-jJ4Jh zw-*=npT+HM9TBmeH--7q2S=TQY)iwt_PYuDGxAM&csO39IF3}I-%0c()d*+ZMy1XY zoOg!VjB1T((dkW&TBixaBxJ=YKHkb^t}{NTU!)eakF4|NlM^gq3*Zhyc@4}n%+!b7 zgIW<3N-}x?nk5$&=k6@~dxw+SKbLlQwA3@lFu_I=Ykp44ILFb;^^zVBh#a6nFJ)e4 z9eObDF6ikPSwTz18fZi!gMF(BLGW5i@@ox&DJ#e#AlQL120eI~H^sv#OoS+PVt{sL z+6q8WLoEZvXvQFA7G26ZD4<%%pagm?=%IT2Ad7-&{}IQkSuA;9nx)l0tiMASs8Ey# zrH)-#)A*F~%*B8`6fcNu+%5iS>t#El9~cO_N4|W@XqR7vjfB6ErAem}gyMM`IioS$ z;0?2b1e+enTWcGdO}syR+5hoC0{5dusL?^mFe>85v<@90!RT~(5XDN}_?XvLH*^T( z66ICBtH%LjhE?7FEgOM){`)7tcJYyGLax>50f-vK)apjyUa#+ zY0v6qG7Q>xyg>vZ!;v@ce?g}qP%$>UKML+I&1u~t_@_D=87d$k?^8S{!tWnx;*kbb zvo@068R)L(yQB3^t^;OBf&7Bnfcf|5-Oh1X;%q?m!=QBi7m71V5Ilis`aSv)Wmmgd zG&{XClj#)dl`SycHKh1nalsmTwNF`*H^SsPHi<_VT-gRanM5%Sd23Fd2zaCf9+dI) z>Vxy47l!Jsl{17>gXZ-3)bo zDfx17LfSxdGz89{>|!MLQ!Ly=1&wtRMP!`T3_7i%@>=~sSE!mc625nvzVu4e67g(Z>_7PN8rM=frK=h+EmRGw4<$R*pzw^l%mJ`A9TthCGcC@?j_k>m++)NJ2n#=b z0dnm{{NSLoZ+l#A3DuONwgL=%XZR&Ph=!)O>v+_ds-KQ{=ulTnpsodb_s;-+;zcpQ7*jEB42WUTR^?mBf)7fO=NDf~cs0Uo zFU^=OU>p_xfdeGj%XhF~^#to23`srr`fsj235%=No)ZK~3^)eq$v-L83a@W@M9NJVnc8O1_B`;*b|bsTH)a zbr@B@$F&>JynDX*-UO2&^fWZXY)_P$bah;R3YA0Bh+jbJ;KFE^f^`cMfZYO=liV#<2R`A?0;=ip|2n<1@cJ)Up;u31?F=x2mQP;9*d9hi zyxDJ^VjJd0PNx6j?_G3S5yul}>(I0ppxRMtt^3gH*)*d{K7bi4K-B2=h0nJs_jI&H z#{0?DT%vyby~WbtuLr6?pPoJ>>#84wVotQn2!x?tj762QN%nB%1)M1}sRI4k4C?Cd zLssG#nm)p(a}T|R~&N(k%X4sY|vT5#dtag(+DIA@CFRwU?f;L|o|5 z{yK_ewL~?_(2X4wYiYHe9vkK!7v0NziK(nFr~RoAoGb`Jl&076rq?+Am)g~LB0tWz zl-vpSzjvjO(rnWGI`Y9U0kTs?InkUMb~C{JdIJKUX%^X`il<8W``fgT?{&Pzpo5M- z44e5DS(vy*itdZX+Bi)}A{sq?I2y!ogqtPAru)>2fY7U-_oJ7{RECZ4xj)x8Pq?@u z+1_c|GvR%gK;6o$cPZFp4B%8}+k+alYSV{I4emLm)TNFFwR4!13{us;Sxvr5?xby^;SJ)Qt^!T*gBtW`MUgN@ zAH|&n6@8Khc4%56_uenKfJCzcpGl@=vEXV%TPT{rYRz7$Zu2H3AMtDqlxl#0%Bul2 zni^&MA;}K~T&{m~{ia1fb|_+^VqPdl;YH^1ZYH4W|T0_G0jk%K9+!z9&%70U zbJ&S#!QdNXuti-IQD)%g+KIBZ#QFB~1@dt+;=kL8J{(Vd-kdoX-W)g-tU)uu0#SpJ zgtA^o6>KJ~W&aZ@hu@Q1ur|@bHLSBjO&@y5H2Cz}Aeg-PlZ!H95;a7;POoNyNGnn6 zo&Hy|`N?T5?_|*ok@E3LY>a!!-vrOqwC&$~Y8s$a3qNRqSVvw_Ty}66Uc)S1q<6Oz z15Ig2z71*UHFFfDeJrhgN~5zS{!gd&^yi`#kh&va6QyuPNp(h~yE_T^Q6B()Wdf~t zdb{#wG5|y*0GRUQ@sYKQ>JqPF5=5+txV{$$T2y_seMczH)JpChCXQi|{7Bn|NLwvt zOFMVdeEPwDj(J9q^xGlmunG{TA_7k?MBq+GlCieZ-=yDbH;jvWt9rF8pK7ekmp%Zi zJFA;n$G$)Ppq(Zi9vk$@gv>VC$Go;kB=6(0b{|@&Pf8$%h$nqgQtfJKrClN_^zq|p z1Y>d#Nnr?M2I=5;pIUQ#QQ#rPHM{zaz8MLsQkBUxHLv2!vp|NL5$Z(?o9p_|WE2H0 z{nlaLTY98LLkrn9Uis3X%9+r38uDx=B)>%x&99joT!cyo`Wgrs?DYx_S)oI(zq5y| zFR^{JaFhk=VALWbVj60lzzg&{lxX!T55wYnHvZ3qq02KJj8joXOx*&<(t#>uv|bH2 zlK-pCk>?TEpwENzZr%*I6!O@DcMH{!@s$<&+>25-UU|iICr(j)DR9SZCU%W@Ob(^6 zvGkYQiY}{)0_4>}iA8IYtB73*t9;F%>NuTStuHV*wsoF^EQ5QcOyh|bZXsBWF zPyInw5|28-$4vpDKo#>5#e&+HgB2dIPV3s(CS$-KYxBYF9?+vNzJm43MIj5Ux6r+N z)==e?8Mrd3Yg;h7No0VKriW*U);KY!+S&~}fS;B_{^?nuzPo!NGnpQM0aY?}kG?@H zo*E}}%1TGBsky7&x=hH?$*k8C#n`FF+GJV_tSlN*uphLT$T=NwL@qAubeS~Yqj zat8h4l}SDulbN7Q6>GG2j#PaqQDGK9MR5_RsTuzv4S=PXyuNjYsG*Jh?0zYZf$Ee- z)XqfEnhlk*(SF$ejG8MI_vpN=9fYSziIw~jBka6zev#dO7Z#Tqch+`new-2=fn`n) zGbFV}kOMEp`hZBqL6nz@=!>M9o~I@V0cm_KY;_}ge08$_q(5?7Z6TqX z7TpGqfH!Svfb&`mM$Dz;+mWIlp}$x zU`U#*Gx>8Ob)U@2QZP*-K8KG(UmbsUi*#HCnls9Y>nJfsz$=iVVM^1oTMzH_l{-z> zonIDLosUpCZzfn=^72sl87pAw*jZwC5LB$G*vi6oB;IGWV)oXFFCKQHsQkE_sLe;; zPgpkrO^!#1HzM99A8xEpFVqk83fuK__wW0!bUdK9M;fQ2*}#B+_LrM}^Yt5EH|<&i zL?&3kV+y60s0Jcfsn*HMde=Apv#EJXhq3C~-k~A8u218(pG{6Ff%2YeE~PJM zF>WfKohbC3&JNZ--6`?Dea0K5az;~luHW**air3rqYj=Rxvs{`FNH10d*~VJLNL)T z(_u{#Z~Ob+#mT9eKB@tKrEX$k1$l?&XF&a<4hZJV@}s}^0@R1PBxF!c47oMK6qt>I zXXC9h9TIBXV0W2ZQ;PVwx>XuFT8pj-@RgyAkMUZjq1N%BYUJ~vG&LdtM&LFiY?1>IAmxQWHTe&?*q@}JA+WNP8)hiX?4uepBgiGdAdNymiVpqgN z{7|MU0W#HxHn$f_gC&lA9>+&8I13=*nNoz{T+{|C^(arai@1 z!!(O>elO(dXm~5IU*$o-o5dIc@F3QqwQ*>Awf1Lhb#2EqQcoz64_k}-nl|t8dFwX?=0xI-70Ea)RgWn)~! zJYY`WE_?xP5zw=ziO`ELWw67y`uhO?Ju{n^txvx*1AD|M&ybwaW3JqCVIbq)G)SOR zgpw#lvI`{+K~~>K8IjHmIUy~pHyQYmnuZ+{#W{{^-Y5;-dC@^9+DBoRC~Kh&;UbTx zKh;liuItlU5V&_znr3oiYpgwi$Kfd+@-<}=FCcnEjslEDv zxWH_2mSUeHuFYc^Rpwt*{ZK|v@FW#chZUnr@L=E4tvWcRETc9B z(1{65&{<&(fawwi*bmwgk1Da?U&E}ndg}m&wB@6$f)(_%doq zD~84iN4|}QZY8}MaAA^kP4H8H8@5ucmDsRZX<;=MYv56G#Gsq*`NGm=syY(JJuhzJn} zk-tK(3YefnwX|KE5S}o@%Y%kV<>7#pQ~e6JT*YvqSWZ%7-7S&q=>2W7tC`8(cQn3##K^SE07cpfZ!~ z>Yr<;vHLe(yYKUPHToyWpN~gSapR9KV`Anve^aL#<366Gh{MVocC)~gNd*c6YD2?r zsXs#VB$kBR1C*}Xw|6Iz=fh?Dq-rezBY1$N$YMVcQ>xD+S1M<<>d!_386+Sxw*%8@ ze4N*;R_*!`V@C{Wsl3sV8zyo~Gj-@*EGA!FUfGH3xi^ReA6>HdG!EEnS?d&e1g_Og zae#Remyo)bnp$X(S0MN5Hx1#*K?H=BS-v}wq3TeTx*s(@yVuDU8g0heLBd!O5#8L# z?O`kHy_kDj737E{KKV$KkJN*(A*F58^y3O+g{E=Q$SG3*VFd`FfYQD?Gv!{S5GNc@ zUhV~rj2C~z09tPdQxB9;O+@}Fuyc>l8KFqO0)XHJ5x=xCmHH0KBB-K<+0VS}w~pG8 z$Wjy25T?-wi;v;>@g<#rvNCl>44`XYDHY}qj|C*lZD|6jVmWh}8+`ZL>ofRLk3V3qK494l!qJ65NYt_uoAW%Kq7&DxG7-hUN1J?J{ z%2drAyxVO;HMQ-B3?T7~7(w^@xzA_?m;=N7I))Sagdn?!Ak(=t3W7FAwHwl%A+Q{M zktWJ#XBN_*3E zoGZQHq&n&m@6TNQ*Q@vQFVhHj|^_^96xG(B(nOI;oXJnRrQ((eC z7Yzf{xSq$?jF%!t)Qd4QV?5nUA#+`7r>KS~ii$?!ren7q0yrv6VHAI3Ta8!!bFMj~9)Qsy#e*vS7xHP>Qh#_xQw{JfJ$i zO}04wNl0$s$Qc;b5tWo9vXz8{7KHVt8?RlcLke9hJjz9l*O`M*Y91&IYnMsX7*t&Rc;c@!RIY}rZ`e+R`lO<*wr*d zPLSa>@rCp%naxu?14-+-)gAvWrat*gM4Rz%_4oNkLvPT$x)fr>LU?`v!jY4WhB!>1 zii#FRTvn6`8jVY1iELvh+iye*z?dBp}EwVB6GgjCB^#wt?dlFy3Q%# zb&yJ0g5rfW`t+BQTqYJ>6$*50`tT^AvO==eXb{K@z}cZ^Xigf}hr5|D=H{Js69gcJ zsQT!`c9d>R-wn4_MmeWSGR8M)o}23*&j#HGJ|l;&&k_eDlQ#we22+#s*{z03z{9S; zo%HD`vFGc~x_n%guDc5-yDH&CxERN~_&oUG`ZwFf-#< zXmf8LX%{|GX}0ChkxVJdgfK-Y5MIB$bw?+B*?she9(v=mH@y_VGGxl*MUA+yy9nwq zKiL#|L4O$xvtEbfIRQj-&HCQ5f7&RJaK*O0FgY)NgQ}QHhYQv*Dl1}OJoUHwEPbJL z=>8i0AH#WR7DvGwnp<36-EfO}=@!h7;%#ej-GgwQSSy}fQh?!rS-RK^f5)QeB(2$= zy7z{aPQvd{-jv8g6e%tXA?OIHU;#}kdIE>bk8jHG@_|zYQ>O0v-*AqAsI?$QafAv~ zpsJyiZOAn0B@hHNMUl)-+vUhHpx{rYj3xIohk!pvc2uV`$@}=iV3c+PQv$|ujTh$T zLw3ARSB@CajY*uQG6m^iu}qcIl<@e5)%X1}RQ8^tGlKM`J}>7ZWRhAoO%kTIpxQ?Z zTa9ODn?BOv;4;IUOMFHvifC$Dw92pfi=783k6MMddqKr^*#P1oQ&@ytS%?k-zX_F7 zW(p44X_8}c;qW0>be?o0Oaqhd#GO=L^e$9yuy94q!3zc9)SEsuO`d+mi=v;LT0qA_ z9-o@rAl_Ci5sEGom!&qhG&-wlSy4+M1tJBy0nW&>6iQRoqy^_sJiNZSq2*$wWweww zmTILDQyBPJANusI=)K!dhFk_w<0~eWTbr%vh7i;X6~~Qh1i(XAuN6rqemb03w88tV zm&9dCpLMcsTFH%@pO3b{7y_7{(n)LZeCRV&N)580a#J^~j)pPP?WM11tqckC@iS8+ zZZtu2A;Y}jjUc{1vc-w|W*rL{=f$D=bxN31{v2nN_#e#*0u$uJBlhR#lE6phY6V-9 zd?q4!m|@sSf1&~GvBa80U+(Nq-{JoGHMA;85(ECqt`$I*2%9A0=8#%MFBway8}d>m z*8A~Waxi;cfL#u%bH8W3Ws~g>$R&ECu!Y)xkZ#brz@2#^8cCs-l+wAgxP2M{m%i7% zI1h=E<-7V^Bo;AA#ler;2nV40Qw6)mGFntMnoXUoEDvRGr0R|MK=WR|>HX0=wZ`y^ zaGP~P9A*NV01eizUX1LS#g>UkMlS!nv#W0MKBQuzSfbIbT&1@eo;bo`0NpJ@FjPVR zVbG+Hxmlg`^TAOk$L}EpiGpW_RTHi?VO_fkLOgn7@+dk(YjY)}r#)FjuvBZ%n>XC=j=xN_q9|W*0uD+W;|Y{~m|$@^4`qqz$YaP+(I5LiX3!i0D;` zv%YBh&6Nu7wc5C!BJPYfEe}cOYLkOrgj3t2Xmoq)&rBBOH&|c@!*$OAb)z(K9^=dp z@($C!AV|wZUx_m^e|x@cp#B_E)gLrrD6YvJ>@Oz{`U&|+1}5%wn50fF;BEeYylxq8b~kqxt(!z_#);_YCN*yINC6!~5_r-_}E zGD<;OWthByVP_H$S@kaQdr6CB*Co{1Zo$IFt>xy&%87Q1C#LU$3DFO*`XKS%MEi~i zP@ZQu^nk1=rJ{W>!iFDHQhlhZb+2P#v6-Q_@qnu9YN)(`{0?segE<6t#09>26~3H0 z!lK`7ZkRqwRmZZFJjg<_k{K>AFtXLVb>iLC_Yp@hrw=+Z{(n^DK|7^~amrOqP_CvX z9&f=l>O(4elAdVZ)4vAHjPwQ>7xS~dblk4~EBgE`Ose1Hn;s%ZBd;$KYL-W?mDn8+ z>R!euS15H@Z$+lB#hnlxryr}qkXZ4hmv8#mK+>q9P=BTmDIq111FuXzbKox(O^sOH z&U)Sfg;>gDwJ0F@>ZGvn7BC#23+7(t2_!XxbAc&_A%8|l(;r*2Qx*vhi98;wY2A@L z&kdJKcbrJO;irRBW;H!-D85*#L3#gv_klh|vR)x9ZnK1TMpVl*8KAs?pLXN5@U&yT!7OuK;Y58 zX7>6N>61YT1Qg+lVh?vOYfPSDOe|UtP{y;Swzn9Z-0i2rT9?mk+Q{!h`;iY8L8HCb zaayyH50>mnAfi$jxMK+~l%f%kgCh_F0t^p}3G-@jkV)WPtwficQ`Es?pH_L14iHgE zNEAOuav8$?N^1dsN-w-1{hfKA_V$cA<4Ib#bHqUW_bp$s927^Mjox|7Tnm>$tzt(NESauW!yPyLN{g>XA%PK zBe@1c`Wf`7bYs+I$`Ru`H&jT5ek{TOxVg;fvqtsKkY1yQ)pke>reEEjX zexG!WFXTK-OM@G%2>k&JT5bts_MlxsKa*Wafb>>Wv6gz4=yqIbUkNJgLPI}y^BGed zVF2XO|0tLjlLVDFq3>2FN79z|`OJ{Zko@^5Mj~z8S)W_n)(b%FV0i_S^3LjxygI?e zMNxH$G~ZG)QIY0r1!=Cfv^61j^>Z2bqSj9OmrUkOsdsrcscrC^6P${;c#VzuMX#EG zzJ|J#HYn_8-#ce|Su)@nH!bK1w`Juq(faV!{S<^5LkLk+))CaMaT7vG_%}KH99e1N z0WzX%pbnU8^=gJBg?1@bB9q-&Iu|oH+8daCqARmGOIb3N)md`I2*BFxLz}tR*%ev+ z@lF&&7KplHGLy!q#yjjM*jH%4|OKpj1LWwEhfk`i+-y-4yvNC}BGAXcLt?tzpf)kT5S zRd1z0zsLmfR`=;5C)B#XwNp_1z0&+!Z##Bmkw-2VAEo^1pdATHH11#Xf-tW2(i+zy z99!mB@>`AB1DYigsnF^A?@KbE(+!PCXq32d9yk$e0)Jl3%6iiqrFk7?Nf1Etw%0k* z%G%r`pS7?>NiDpc(BU0zpkX(W%z ziB>rOe5oP|gA5f8{>kf5tLg;#n@TC$hB(e{AjJDux8sqc%ZRxg4iYDFGW})2Fa+#9 zOkF;GJQuVJC*|s6^Ih{V*JhysjD~(EOx63hM54`M+q~Xr!}#)-qrlILv|h!KAkLV!tZ6 z(4Sv_{VO>--~~bPwNQFp3)drwxO&?>CKOamd-T0TWyrFbK^ztU7bIc<_+Ok(1j+mb zjL%q8p{z=VB4BG~CRMGU2%y8adhwlQozxefdd*i{^076IK?)kj=7(}OnP3ivB<2et z;Oc}wwc9CahJL1T3DuII^z({*Eorb{1K~-eD-(T8PC(2xr2~gVLSwahz4ay>%Q5)DO{kC=4ZL)<0heUWHse6+)`-c!4#{k4oEhQO64xE)hFc@D}@ko zfTQY%p_Eci)78hDE1k5HNz^W}_En^$L3;+|c$X@MRF-PUUkrEQ?@Sc#;W(=w6n4RU zPAuNG!m1($%&Ad;elMRAI#GW(9#7wcB%%yx&sN+)Yt02>XL?1pvGb+tcOp2AEqUGw zR0?u7Bo!xoS_0So4q`81By@_w5F>J&_Bc%lJxhQd6q)?tBe>!9!mvH`ycVc&&$AQd zmjU=t?kDdP4iD`z^BPAPT7{O*bpku_`pu_q`Kku~StUuSLkqabw1g~^ECErxyed}= z)O-U)H6#+lEhP$^28*63R`i3`36n-KRFi&6>3!@f1P?aBcNuIv7pwzpgF>OJLJV^k$$+?h>i7xVqm<6r_|b}&(xWl%3J6Gntns7jWEh^9dKYo|tB zbozxU>2DjF1az{tFVBUD)*D*qE@JKItA&tItCF_8qQ%?rY^zS zYp#T9kZ)c9f;J=sdqqP;gY1$FK*fUx@+(J!YGpJmDzQ?&*?oLfg<<(GBU zYXIZX%iu!w(1>;7+3I2xf5<&CLm=aLh&MF9vN(TRw8#D{6BD{oh#BC}g*3{@F{j|) zoiO4rd07z(<(7Og=zzdj;$jE&j}i~9QV#_n>yUCuo~sW{m<1yBqS(PCL4ZKEPBO=g zpi)`Vd&M^=s$Klf)i7c7)*ZTAw%S{?C)IhX`?MZ5{pGQa@-t8B9}J+>e*oZm&%5-u z4UGpB-?wVJgr&C8q9VqaakF7V(i;M&#DwFrkTVNn5iE%)1R&uAVW71zXkkEo0E?*N zlbjDgX8Ac+KV?h5)0&=s=FhyQt$k85{#jkqd#M4UrxYDWzpKX*0;-8JI!5o1@}}RD zp0v2#G(i_B-_~!yjOF|OMVB>i0tlo0k^GJrFAnEJ(QksI=s(Vp`Oxno0IOc*++-Z}qB!Ay0voHuBST@s zLm}vrV^vNFIh<^`7sf%x7=cU^k4RIQHgQTXhRlBc>FEAKV}kQcLV)>82mx)MvIm(0 zFC}2?olp^x^XeacoRK_K3O-lu^^2Ay1u<*F!aSW~x;&s$LbN_Q>FhcyC9VU2Bn`@D=Mh>z zT4?xpjjh$B1%axt;^$8aODkdEPt8wuv%sT-nDF7vA4(RrWdw;|M`Hpq77okZ7=LR0 zc7RDIIEMciW??-fV$A9)T#_|ekvcjhCP2i_-(Z9rDY(#&2nlqc-nTUzw9|)g%F;BG zUKUy~`-d=b_znx8=Sh=w`5i?A}Ux&G&tzC|`t7bceU<8I!t^ofs}{(E27QqBhH z%S80lV+<@|dedJ5CeWttzy8!M{TDMOu_scWS08BI=O97SG++`NW{Hy$QA5sI?F~>Sd(0GP@~jbZ?{j{6TW9l5$KZ3+3TQ(;QZy zyG)y6vMGLX{i6~tp_>BZSWSF1rPsSZE9)jT$t)Kwezh@Yvfm?QQ#urVB{P}36nFZ| z9DT-Xz%>$L^6wA3M~SJqjkxk}@4CGiJ6#9I)ipmV=TZiYYs1(7aQ)`&^XU$8z$Cj1 zG>POAK5+7%`mS*1g;v#Y;%0{9E0(}GIZZBLiY*RDF8>%W)bC>IW0v+c!}zwW34foz z<>zo&%Cw7FTU1)nSXAGu6Gltcyh-???MY;F2$9i))C@9B`xn% z9s!`GFpIay6H6rEnv{=!x2^%be(VZP`yj;N>VeX_ULkQ z>(~K)ZXcl#(G9W^1Tk6}*j$@DKuwAar!C50Yya>s6SV<#;gv_Dct!b4uX&V}e_WgF zb!Fi8v)q(KN?jX0Mkponc2p!*Oj4+MTFTguN_qomx^98;QF)jcebJO}y|JY?bx+Yz zH6S3PTetyziKouMt(}AEhk3-yB$v6v-a>CDRVh$EJ0g4qRq zul}(jCOthvDSH3la{tB$qY{cPGA1d5#qPi0Is6tD`L$%`@g%XejU}yrbz^67TYtED z^C=$|oL{C)s5>3LkZcA2X`lE=2m>YQ`t5ltf-={(UVGyov_fHkHkY{Ll80MEaiCXr z56jihdlB0@52$%@LXEh-qH1+rFM_1~(g_Qi+<|-3u4SB(8ingK{H58-6e~gUJix1e zx_&#iPj^6+%n!B8#+O#TiY>y(;+#hQ;t*rh%fgdur;I#h(4Buxw0o2agI<{JRAK*A zRah3q#KegpT!#CO)Bvd7L>O2ef>{$ckioi1Vjz;S2?fibyf-m$$WNPKq_W5~eK<-@ zhC3ABQNjCQm=%7JX!CYyWR_R9z#H^B{2iK)mmzWYKkwBcd6oMsAOiS{sIK0of%blu z^1kNGISf?dZAs~vs&nn3TH{VFS3uvx)i`>Cm7U`dSPP>_(WEAiU6qgi?KiC7O zREm0K)B}*LGAO#>&*TAWCHSTZ-|C4}S8I7ZN{B2n#9sF?!pG|dh)oiqHb_?g_V^?R zfs>x+t&Wra0!@)+h-+yBuxJm7HUx_N`OjrI$_+-iFaA1$ypw<)e5hp7(WGRS5aTaFYU2 zScUgwST!c-q9CtildkC1@5E@x?2N=Im6{oRm4TE@Em!_za_K4?1S9PFKMuLW4q&=e zNp>qEgXM2gaoWnfFW0&>*JLxvz0p>q^s{ZOubJ}1^Xk0A`OUSE`E5#Id;-+NqNWjxmQN&Lg0MlTT5vy!(dHiV)# z%GU$8ndQYX#ieTO=s&}Q48g7qqGQd;Bt~nS9QSIZU{#f51y8a7GijIH-_qKhDl3>@ zkE0yXOOYYhJE%4@wbliY@m_?}|L9B8j83R2wc$kp=yk@Z&nh#b)Ux970#b)aGI715e$<<&xc zur&U3Y4{r=HaaiVQSLEczF$)K@FitD$xYNh#E?yq8aWGa(dR$Q0A~JWW%`b&ORU-F za{lNIod5}`R+bddFw}os8f1L1eU2pXQ}4(oi40Bz^zLkLcrX7SS#Q@J`B|NPeuA9L z*{tU%jtOos<{U6kdY4;osMOuYu^&@!l1fskM3qWv$#j;%7?Q`lgpfFK2nkIf#F=px z*kEk1VXaj6T5~jybT)I!7nrXwziZ#uzN<5nmBebP>i@s*eeZj}UVCrb5)ePt)jzQu zyenKmO$7t?b1%d3kAy%L66~>?twZw(0%a+LWlYeI93u@`e~`J&Riy# z62p^GbQb>PC#+bWNpC=#T`9E}5>^@&?$<9*0zmNMOD|= zBtasYX}&s{zVk4>1o?xpU;=G%P_nd&YY^8gjS+?o<o4yF^3vBPeiEuTe-u2U63nv6nUp75RxzlXD_b z=odD_r_m$SB*)zk2(X>x8*UG)Sw3bYHr*3_-8a-_S)_EA*QOts?s!|oQ^N7s+-^#GtWCTw z%o`kZ@|9lDN_I6+Q9}513@Pac69M~HOLdcLv*3vkJ2#KhN!}4sBg97^_fo{y*e}x# zf}v)-*HV050ELPlh!dypoRr?}E{F?xOtc4!Df@p)P-~iiDs8D+CIFrS4scZ+ z_vcE0it!#0dy0s-d5uLO1@w-LAU~&u4@LQ7aYqp)o1}{r;T%Ox^DT!*HrV(pd_lxD zHiw#uj;V-M@yQaPE%-!tijhS6Pq+5>9!&_g_=vEeWkcphrEt|VlkyRDb~6bPu+;7N z8_P5cwJvz_Bfgl(#g`-Oon^NjpafOem*ICl=Se z%*#k41G5w;_4vb2lHf!GvacFZMjYr4wHT@PZCb zOOBY)cWsPdzRM^KSJ|{BuXft+#vTNuRI3p&1ArP0(yWkBWQ&5T2@l+8G#lobw&GKv zmZ@AV;5IfMNu!A3q=nX0=YCouiKH0hqevEZ3e@#Ej?s_XQNR8WnRq=gwvK60_1~%8 zx7}}+qM@TG;-lIi%`yevC3>xuI48Q>t?9}1#bZ7VcBgL0jpv`#?GzkxR=R9@EMaib z4mJckTxl*j8dl)dKfxovvb^sASgR*YeXDN?$iyRj6E2Vcs7df=K0Z1YQfE*NSK{-J zB3YDbuC?jAK*W-KIwlaCdDt8q#hYHn9J0Z0;*4rNElvi;ivK|9u0%J3O~;v5jp9hD zM{l%Q1~v&>3K3^koa`D?NMo=u()tMm~*Nq2vu@cT_)q^xmH(wL!!l^@{Ht zy*n`r;<`T+u zv`)~Een4qYh{elDE#A0NBGu2dqIR=0NaEwVeqpj0x@_eDcCg@B0bzSZuM- ziYZ1;orH$f>Q|dPO*0TSIDHYtV^2Px)3qWNK=YC7*L4fZ`PVA-xmK1q3^4Kwn`*M9 zpGTaZ%!3z!i;~qh$FlJ8QbNL&l2m_)OiRWqukK&J?2D&BaURD!cfF=}H(Df6{2}Ji z9Vh3(uovM|^#{&;_s5I$h)JEXLe7^n@k?biS-MN~!2==PrH5q0L{63FAZEIS_ROT& z4PVB8xReB2U8mw7bWnk1Ia^1-)D zDx8v^QZ7!njKr6Ooz}q-OAA`oGXgj+7_e4-pYDvh8kqc%OXCCSNxZK=13%Y){_KU9 z($2-!pd)g*P~hn@Zu;yXT{eQhl#|vh^cI$AJ$W~0Gl9=~`x>Mi0uaSxG1@(fP`!pE znf6-5Vrg|A?ec*5UV{ec*>)wq| z|B;V^`%wAc!BFZ`Q#81Nqp7avIH4m0%=yrcr$5!!hD;7g@u7%)IR?)0 zG&5Y!khj-&uD_!Qv3@!o4!EyoA`D}Iq5g-g&l*xZs=;AA@Py7Vs}NH(Upd4w+N%*d zWUwBurUYeM-BB)r07XhUG&|F|LIA9i`uw7gU#gy6X2{`ifV!kYT~+=wA%;MC80ty5P^RHMSS^IkeD8l5h1Pl{s8r^M9HEbN;N1Xm zga?El?N^>~41Q&^1r9fq2b~P!s0lC-6&Lrs^e6rhOQFzRd(cd)+(Qd1QDm5uf<1xl zI_A~V+&eUv1qt~xr54k;2cletV-<+8*VUfv74?6~V8_gJs#B(3vsP+rh+NgYA%6XF z)Mmc;F)?Xmm7{QE86+CkM;Y?;**`PcWa|Z`l&1D8xuFFDsL1xO&FUwuz*JHgtS)## zOeAq25mj_juQJ{lq=8c`(U04D_0W=J@DQEGP;sJ>E}5MZtF>9spG zT`RU8LbaBsatKMZ!Sw#9Gee@y)2U1_VvF6FZ*W3b%p{f=Wy={8MB4X{GA^xHo*)Qm zgO7V<_=>lIgh&|3xpbp{&A6Mq8kinDFbGfkAs;ctKuRH+Q2^}dCt@eFhE@`*Rk14N zR>~gqe5Edsi2BMIrt2V>a1o21T1H*C=Th#{$)z^f#)QhQB?v?HtC7F0kAL#db;wV> z@Pw%YXu5QpHgzK0Yw<}Wv$ko2V&LYyAwXA9zpMnLIYS675_fpl81CbS9wwZEPISV| z`m0!dia?~0h>VEINcXW|>IdJ<+PCX(3wd$%ex95n8^k3-FscYEkA+~jcD1~?5tL^J zrT+p5HMzFYjyrikvA-V&YC`G%^nd7j$eH?YxUkUz(XvTiE+8vB#ry!+XH2TgY`=nQ z{hH1U*1~_)DiD!jxMEdPrzq0j-PP}LxUw-#o7&rSOI6`i<6>Cyqp;RgbEdDJ1!5D8 z8L*@~3wjU%&@?y#1lRBANQw6UmtG1eFtlmXqjj{G`H<#Gc8;P~{wZ+2(x>c`pE3m) z4-=+f(msrpt6z!_ZwDW04dcTHuj(kduykwm?d6rkMakiXdD*(mFf5_43@dW~r*C1{ zm1hi-Ip9$Y+(3w3mME3ZAF-7w=%fCD7SJBb?zOy&tD+2O^vs>(qMNxmGy)%DjL$#z zWHKCuL|0M8LGgS#7SEOOSgafH*lA3^poLixeJFT|Qn6f2P4#n{r&KeB54jRi*T%eN zoSN$}S~65Zqqd5QXQ8iS8c>j-r$z9sK1~O%ow0_?ZiYxobc&s!XY^=7+^Lr@rtkP= zbMzAO8DRqc3OnPwekyYFbU*A>o;p{fZ1N$hXu6F>jai`u@u3)2G~ca+|+Bk#G!-l2?VCaFnza7 zs@dkYDrYDvu}xAzdWVAk&YQA+E)?iFsk=qovK_*BUdM8OQpLX_xo_90KPQtOK?z@T zcY@RKBeBgvRdh0GL&N~W(tf6I5!%yA#2o!_OW|E0S^Ig4b31op<5XVPH~&NM^6xVK zQ{21vu0HT0X+@JUJ`iQxTCIW3FnI&D?DvX)L*^I=tp0x1&+n|MM_UcaDzi>YEE^2l za#$>Z8?+rIf#s(nMlrE!NSF-IxTF)n184(sXoJr;!y}S(qZrSLCrG|U&D^jW#4r=8 zE@FE6vTAt3dYjV0?h#Pf8ED!XT~V3`2~_{>L;3)M0>%yHH8(Z zfa$CI1!N%~Y%?k&E9+Ax1?Au2N5L<<3Oi+&%t z>$sepI8BNx5({^wb4bX+dsG0hcDyZBGdgXtKqN}^Fe3 z4DLUE?8c|{E)Vsp_+5qBmCg!F;`%i`0&zsQ!w^$m9etQPp zJC%6TWkTCrX3T{4SH!~1PcFug3hyPocvmndYYaGnmrsWRz&=_uCVYOsH_p&1tC&RL zHP0$#3MeA`3WynUCagu@9v>F`A#+Y=W0tw${Rji>M3W@tA(R;OD$n`{;=9|MCMw9d zs1Q4FSs3<3+V-2{XRza@$#-~nQu}^>G zY^&8ezN>yz;!X9xiqO$PYd@`!k{GJElIU9}UN~wo(lr7!jb=7Ry!zfFcK&=nMgh4? zjvl{HiCtQiOuvk1KW8dL5Nb&~j{}?ayKZwnDVbmrI$>e;f|v6l07)oNhTvuTm-ral zJsQ5o_2>}(&~wqrpajY$3346gCrLTDtKkbT+%Si4x6-#GH(S$#VR2*K`9tk#3Rv1^ zBqbX$0xpl6Ab&Td1mN0}2<7w~HqXpZ1b?1?sWxI(7am>SjFT+VP_;;XNtwNNn*JJ$Va5|dLc=M&RVqUS z@G!+k8u{HHmpu^ijp{&CUGaiR4p_irNX5_?mWPQSYrsw=Hy5F%Z zO4OFweEdZ!8@YXsV(2)?MJ^U|PqSWb)0#xTa zTjv+WrzvLcQNEvm8xkDv$yc2SojBUII5q&P2%AuWAD_>;<{nA{Z!P%YA)pgakENeg zb?2m*Nnv5GM-o7?8Jc(&fIS3SYn_ z!~+kf>a>)0(Ig<$0m0kwdF??gS$%9;XCiCd3hFUHsBZhYbsSs{%X5GL#&KlRj8 zzQ60ZFQ5sE9~oz(3o`5Z!kAzapn(w(Zv5l-?tR3+1Mt!dv@w1F{m4SN(T*)_FunWX z*Qzr@9B?>F7&8C~#2W^o-la3X`lUYp^kbj-oHgHIc$60OSWVVacFflfQVReem|UEH zII3}UV92#l38tayMkf+`i>Q;=zKZfl$-D!SB-8@D92IT;42kdactHSTGpB^ zyjU<%{GwdQS}QDY(DOoUU^V+zHiws}cyA95d|f-{c4jJ;3<0$~^b-prHz*$!s}dUK z`F;d>NwSdo9*yzieUix~4!N4OwxZvzbeuFqnQLf_JjgPxI@mELgw6FqBTSff!C zB2La@VFGXj!3g!}zhua3X8_heJCMJmy%Kldylwyo&sYc z$%}j6B*X{(Zt9-#fZonNpB;Wozvqn68A%)Lq%zc8kH2UQ0?oqBh~D{9{Gn2)sEWHW zee-09U#s7-t&Nxxr5~2o^mU|Xs()orEF3ihs)P5Z6I0|1sXXJjG;paj7$)HQxkt4r zZGzOQCFIyw{Y)EeaQE@-bi70n3xaS9sb-z3xHS^b+bi>T6xh(`G5soS%ADa^k_l>| z+|fWAmI5lMfrJ{wjpOG=_e>87=#h5cA1I)tpVVQa)`4v)qLX*?j1hVI(k>j8$&7IG zYGobP8D15LbpoAc0$PUDi9C;e=9AtE*9nli8s+$7sKX-pSX)de5<74kkT6o-KD(!% z{;c?k#_q6nXjKWwPN@c;=jF-glTkI-iP~M7x3DgFM1_q86i4i;@4S^Uy>T4K}zyswYrZOAmZMtE^(ZhOvuZI8GRL=R~H1H;CF``**lL5!Hg4c zJD^6|Ey+CT7%hJXX&xp92TsAx3>&7J27eD9aGb~vBD)n*shFqeaGH^wJH4z0z~7n) z=LT1c`)W#T;i#(xs(ERv6}I24)wP8bAM%2JFB3)UrxRIQZU(mDfB7Gu+BT^@FB2Ex zG@9hKBi+x2ns@XPWB|z=%Sx(6$oBcmB-Wx4-SjuJ0ObY?;c{TOz)fF z&PSIp5&A(#@tM#1iCCLi*+4s*I{bTO*w)E}{~M!v{PFYQe(D5WW^ZI3^Db2uw((i? zdo6|{so$(f!j|W%XoKXyK=dS|DYnNsdYQO4ipkUjC#+msm-0#`S$ZjqV8~pxbxwrT zapOjTFnr?1W8xRsrcM-BWAku0G2DKoz+?6^4tZ=|k71)FyP}GQesH_IGR%W4FvP_1QgHq| z8La>ehZc2f23@inH)4bzoC9)9%tt~j0l`>knt9^K$U*dvY8vLH%(rOqSE73lzL^|H z@`pW}g#u&{`3_%E--3mWhJT`tR&UlcaQ|kz;@WV!BR4;XdGE^tTc}2ScBpGn%crAi z9Z!FjqH>ohz-g{4$z0OOpZ-(5LQ^beMPH*t7J=!%hFqQ=EcYHav>Ntiup3M!i zq;QlnS(Dp1D~MUGF1{))qzgzK zbTKb*CT{l*p=nE<%hvg@TsS;(zkeGM+6QtGkbLBYsv4r}i|G5{+lS#T_9LkqO0COh ziJQuK6w@OxkDV}IJ&xv?M{3aQ7dA0Df_TB|2_#3wGFUJdMW|q$JG>+8RR*IOubr$v z8eSlZwIXoYVf9SZzaYU*a)f7fj{jC#>+I+BQ?$Ztz~wICesuv7_i=wtvuqSnb2qbY zXNS;XHUd^35jt3e*q^0umvDr=Nom=fO21l@2>zo?yo6+kvnYnJC|$&*^YN`?m`&fx z@Rui8r%j+jO+O5DPDzEfraznhs`3>PE>J>M?x+u_uOe@kOE2kgkWL%1)d+0H)lv{B zW9GDT52uDU$cVT;picye@7v9CZC`l&Ni9X7H~Ahq5U@t!2}kD8mksPG$@UthzxoGc z1Tki>5G*|V^+zdT0uxeOZ(dO_r5o>$YWy3x4zHpa2J1DsTo1Wm;tt+XV}d9J$;-U8#-fw-{^Wbu+|);ZD5_ zL6ZvBxCize;gh{qqTJ=7!dg0kXU2{oi*z<&kD^(}#+G@akGtukQ@sZfZT(C7U5!I) zmz7X0{tA4VKv{~{{7aAy$q)N|DJ5?&2BS{j>0XAbwhpvFR7%EE;1UiOYE??;>8G>z zDF{Ezr#d9+2pC`<3MfZr##*%w!5)u&An|T>a zm^b-B&4Gb`)ZS0Lt|P7jikU~;2KNk!B~f-!#X$@(|42Y>Bvyxyy`8ZKg;H+{Q{AgCmDBvHc_ylHC)`0=<_8G{ToEozxJ-=g zGebNfGn)ELA@;qx9DbYPi>bF^7EK;zDd*bcFs(h!Z|mO`Z$JM@Eno$_wVxBAx|eJF zrOt4G3JHLO$$!fH2osqQB_R|X@9*X>)R^jb*MEQY^UTBvUG()xMPP2}h3j9?3f;jf zn(w0OPP4%X0Tv7uFGc0k=QFDmj6_CZgLn~%6dvR4uU)_D z$axWF;oe9uL{*?^xj>qXyp#^%@WHEoKgUh*WfF>ab)&gs($WgL#K>G~F8&d;Dlh=c zcq=|bTK!tQ6fnEc4;n%C=mat=Kw)>aYuZMlP z9Z4i2QL~O8nDgBP-NJ6Fp!|JYnkY5kuJ!}l(hOgLwU+=A*!|Rl4K?V}<`$2;F~dSj z!%pmm{V11Cl)L)S`@*cJ;i7qerFH3FtEt{;ylE2H{C+=!+7I8DzNJUDNS6ll7ShZ3 z(w>0c>0yoSjn$21(-^pQP4_95PKHaPz6IV>UxWvE7%m zIF4E!A+vhfj!*$&w)Y#vaZKyn>n9=?&K5N4@4UZFDyB-je%mqT6C#ZIBP7PcMzHzd zR-{xg8BC%{CsJRe_wxFIIVLlpy)Ugkn_uFrrTX;GVy{Pqb;yvfYl(hZs~t7h3#s@d zAZ5juM8->>G67?-i=K5UTUwT0iR!ev-4vc%EfB|{cmlU~l(n5m-%=l?Go!yiKP+Da z`K;O(=Z5-LcI-uM)cMH3Lef6m!ckka>Z94F`liD2_x4l^Vyf0}5EofY4OHba ze~y^1wcG}_Iml#sgO$dVJXY)yhtK7!a~>Kach*qZ+)P8NB^lk8u3s@F8R^%d&MXZs znzJG>CmC$HLl*lG*C9m{W<#cl#kENMB-55)vH2Q_fcFubOpy1l)Y*`ZGZAJKuK>`h zcOaxJr>o!V01~1~=|dfKDP0j9@T#r?)s*I4k{<0)>bR<U1+W3=Xi&$OV!sTO;a3+1V%^*L}6P zPsV4Cuk7Clv`*gyTw&S5l9e@4KG^R>|Bq$>|H!%t#U8evdI=X5x;W>7LZuoE37ZV-6-VI57-JOP61UaY&x zJ@OjKkWK6A>VD|`cf2wo$kH^&#HJv+dNEvJ`gh{3=(HN^8UMio69%rrXGjUkVKXVV zXQm&0!y`5blG@y+NE0seFynzPi>>eaXEbZC0om0(Lu;P}MH zy+2mOBzM09o+YxMC2tcgqOs)a^$bw^7o;E7vvig z$5}4jB!YLmH>Yu<<*xMQw3>s~>As}@I$N&7j=e>Olx4N(u8X-EJD4*s-eR8dG zAtF2)VusvBCh4yviA7)qI@3~{CzO*&n&^l_f5Z|s=kn5}&=lRG(gI2Ae_=5(SUm0W z<>7JZ_u>6k=xuBq!O#6^1PNC8k}Ku1ow09BGxPcLZ5TGre_Q>axJ+<>VT%dWNxOe6)Ma?5zzHsA)KY+44Yn9Fs^P3w>tIOUcNn+yjWa~p@VLaD4%F8&Y3EYX`j?fd@ z62Kk8alFm@yR|AI^-b-u8Dqt-*JQIgN5RD@@ollwj^FjK^kcx9Zm zq8Or=CCY&2?Lv3?Fq$lZTxWR`S7iEbU@3X$%FG%Z_wSx2+AxMGVFhQXmx?=4PG4wc zfRY%YT;EbhNcENWI6ghkTmyD#kLaZ~NyD!iYHqj2bOlJiRbEgilayJE(E{RTw%TMY z%ZVAMh91aK=uKe3gV+#(kiLmc^&y}O(?I4l43h{Kh75@nAN%wl>Fb(|e|V-|r`tl!1~^k-{=f_F6Q2 z3)!VsM}GZQKLi`M1Ta7{7vK-7!b}S_Iq(zo#?n4+-pJ@~_J#VZ@O$Xc5APA2kdiux z?B(1*XloTwFGU}hnvfKoxCLwsb*GsieMWLO5l5SV|7R8#YGJpcjff- zWV@8h>DO1(#Ol8bXf1DT+zL!10FeM84q6xOILcc)i~a=BU(r6IVcud3fOTnb#Kh%< zZ89f;r}#_ASs(ENeaSN}K^ws$UpJ|^B7zfP&DQ0ESNyGw#*SVkh)sU|i-@~eAtp~v zN~p)_%aHt?TI@vIc+*2YM-Snb0%N5+y?;ObfF`oX!0GfZT39gu#=F^kw4$%EOIh?N ztRilnKT(jO`XYVz5dGd-1M@KTc}#w*YKNIc(MXZDoT4`gj)>F2tWoJB@q|c^flED}oIX@8S#TNI=G7l$$w@i*`!;Wljt? zkwGhpV5axe4Aon^1=43A(}83!0;B)r3wqdcsnP#;P?p;GfRKcWy-inzmoD0~dZ?90 zN{1F`6=hm6L#-gl`{WEFzuvvu25zLzyz3*rztzOLkFortm{$jWxOSDp|>^am<#xdm_7FGnLsyuX% z94U+(@lzQh65oUjVXXNycQk`w~D`o^61DuK)4*5?A6RnN%9L>zNVg#hEl&TM{6!&9li4KPk-mrpU ztOaO?6Xqf$YVn!j^j}g%p+lfRMDyhi@PBXXB;b`w3}Vx z;zYxd6I%ReA#@5cJUbXv%YEr7T|crBX|R0Zk+w!C^J4WY20XmzrQNH3*;=v_k2r(z zudkEK`o{I^SMR6SsZm|Okk4o)0v%C}I%QndXFS2p%*RAXh|*M0PTvwy33j6{L{0@g zF6bVWbON=Gz|_iN;|Q)#jxVx?PC(A9Uz-Ki!SupUHNPbXfZE=QEnR=az!ns@wuNhg zf8l!iFiDfv25)QA7^x_o9#V|3F_xoPY~1PlwflmosE$g%?hZ2#to;^O!=IC@#i>`|C}0%#4?hqC4j?72atA zskRT!&QH=QEDaUPU|TuouHD;DRlSR)Mzkeedx;LL%xWvql_d{GmPRp&T8A{5@*9=) zSK(}|5sTCO!9&7WEVd2#J#I&`&ixVysOAQ96j4}Wj|V?EF6_}Dqy7j!jzd>FhU{3+ z1sY!-wjO?6ymxDL6}gr|sZ9mjFbB&J@)2YrDS-dh56O$~Qu~q~pbayqutB-Lwl#fs zm*zUroz!~nSQ`m00^7^06tRQhhET| z$ERmu9ZBxqvHytOEeHlI%4^MV{HE82s zdJ3>UZ-!)Dcx#zNcU&~AwW^p*k*_JIiiR0GzNsH?g894iKOm)PK!`3jYNG)ct(V!)xaQ3 zYAU^>n{*2IUFWj)(VD?H=M~z;P`ixHxq(zLY#O05&98E1MuT=8>{Z7z*$ZlJFV2T) zOqQgC^Om;aS`|aa{{Oe%JTpD{fGRo zDa1G95VciREo-Z19?`YqVG2nw3fL&}F#^3#xzF@xwKY$J zSQzlV3^N{1KS<2}^wl~vGF;lf*Y+oLnke=3tBSW`G)rjUI9KruSgCmJ-z8@Lp8m?# zAN!d3g~(EAlgYftRFsb72aIC9__zZWL;_2XAz<)d9hceguRVI*kN5 z>y_ner=)I%A&zkg)$pfsaxwSyk$(H(_ww*S6Q&MIf4ulWq($Ml@H7^IO4I_O29qAl zJyBqs+|O~shSGxx%_{Bdu;~;Vt>R%^=R3&8<+3F!_NV~QLPiZZeDFqUvjtVwR(pS# z6YdE#nORh<6uKGtH-kD-18M>u_Cq5a9|T+kHe0EA=&}f^Hm+Z`1W~-bB*k*g#Mk_w z&Pkv!x#^hx^29{W2DdG3@8~I5qgjpP3yf=T@EOdp(tM>qzRb_7SqRpXKSEguXXe+^ zF*fzeE+91j_85GNegs;hfP%l+WadwWCai-%y~sy$Cm1p)&hL(jD=@PuwxO0EyU1zl zLpXl^&CPxd>d+ezT#bLf(W{M(kO%xBodB>bGt3x;c9RT_@Ab~7KiRM5?z}ch$jH}r zoxCU&Ji3W@wj`Q|NJ^jUI$*sNTcF)asiDMa1{5%p>VxJ2uinluDX~<-F`7H2eJwV& z{Y(vd2@Xe^1bW-+=-@@1cT(Xm7_yY)0bAkkE7Fo3{)o!?1H^i=R7y?f{aePm+OYnWvr`#^eI1LW0KUNuk(*+n+pAmZ27pz3irdf)^B4PyBKkBh;c0OtLuN zC(xq|1a?FK=y_P}wrBP8Mw`QGVjaANdPc zKi2)xL!W^?lb|Y*tsJq4KhOyut%xdejFf1I68%~c8Gyv&1b>3p=^aPF&MR4L7XF|+ zuilH;RP471JDB1o9+h9+I7M65&csyugfQMDpeVtdi&n-a=c5!f$@RB<$VkQMKl^jg z5oDf)1JHZ|>`IU8lf*UlhYXyM6i`K1x=HITdXne^@|V)(Lu4OkPFl{q0+;B4=rr)z z=RXtUaUwtvNqTXRAZol?54uA`-ZpihDO*)xf;R^bqpZDX(s?yrtP0ZMAX$=Sq%2j< zLmj%nF)JXSy@P~J25T596oN=PN2@LMUouB)Gy0LRi4>(yhIGtn@hTYWmyxMwhB1Y0 zX|1uW&)_v8DZEYfy){CEIB@um$B$-F7QK;}1-N7Jv?PUnqVJNjCqGTl#uQpwjWS00$D{dpZTWM44rPkN{ys#ME$&(zt zZaI%;r1w+O%K<2`a@}|m7{1&fJfazduBV#QnbaHejfQ``x_|Z0{*f?1X5r>3xF4rH zj1n~NeGwcU3Hkb0CGyWzHKRjjAPL5VZ%rI>s)iUTqPu>#jtA;u-}zoQYFTZYOB6EU zu(j*W;vt-6wujRb7!@3cp4Hon)f}8uT;Ha$*LwdvAyVx*G5>QfzM>WBt674Gy)UkuLsWLVpt9fPk40gkCjkCeKTv`r``@&?v>^b#^r81Tyq z{o&zZ=wn1hMh=?O4~X9DB(MU6?C>hLdy!i_lxmZ~P|Pf)yY<!<2V{^?d_t^5N0~qO|9GueKjOOl+E1O2B-UN8{G?yO&gz7ZV-U8(c`r^B zSHuWu!qpS3ci6@oAuvKgeodWVk(`Awvn4=gFB_APAC}_GIGAdVBxFl+i*9$N!Mnmz z;zrG1HL2w$C8-ffq}2KfL@yFJ>NmK+d#84;DM7IQ_-Hc&0X3J-P^O0+HLpx#3+s z6PlHNY_J`|CmT>TnX!e8ZiSovl^0*qNBP7{pSOCFWWzThEN_aJC|51MP(YQ`OXN}c z-h2qqeyJJrx|O3%J$gQ7(AUGSrPBGw2qLN@7{FVz2?~VHjSh#Mv>yJ3VrAUDekl*+ z9qrL1A4h*~nJgAHx2fCoJDVI_Z0V?-@Hsgz5Fs_{l2TYAIwg#Gf!ehNJi!)rWnKjq zCip;5k@Ria(2FQ;zYZYZD6YNSQNyBRCYN6;G)H+TH1jovc$V4uo*NMPdX>SAw zm9|GU$9d3-vCr5*Nhzw4$>E=-wauIg0z60g*K_0nx-0ea^s8Z;LM=iS!3G_@(1hWm zt(Z-*#ud<#AAY@aDdu-FO&SgB#77Ut4w~s>*l9o09lNZxo840~fT9aE{6#5Idd<+K zL<)KEc^+Ax6vn9TAfo4GGA|6IXh$`SxxN^Ev!E#f|JEQbG@sn)ALTu8>-5T|nuSzd z$c-{L0uhUbKgxidw7KMN2&@_aCMK&w1Z`#d6M~dF;#~$=azy%3)h~?O(w}cP)>aoX zl%h5OpP|~GmT1GdT(5Y?X;)~tn#r$g=bH}Ld1Z1tyFkj{|y`oXh1Jk zlEH0rKl4!^2*90Q8O+rIPc-*GC4iu07d$Wm$QIw$@mK9NKBj3Q9gJz z@(~?;eEsvr~WOOz zD-%a3+_Vh_)3;K<$jHW&_sfV9#wtN`gjhJ8zUdDN+`+4p@fXR{fv7kQ$0H__=ym9u zd>tHHI-0_O^l#Mj@f_7}#Edk{Jw^+gUYz47_r_MrG#KU$K|^(5TI9WwqtmCp37 z!NY6?h^>Y2maNvJTBn7WW9(eZx`pXm{vHTwL|c4}o;G<^qelROwv1q=V@3kF!v!(Q z-_M$_)y&7T7-C`Di_K;(Y@Y~aikQ79ev3xx%gd(YvgTxvSvM6L)Mmg@W!enQLqZ^u zLnxJvB+Es(=7h3?UQRwz-)UH;u$v-s>0=Rks_j7)#{V#6CimBCqmw8%&6`H+pcvG4 z+(+N-_3zrsh{6(L+C;;|(l}+qEI*tlUjswd(?a?s;lT@8oDb?q62U0_{0l%BdK%Va zmC<1TTbcDF|EE}RT!fwNMuHdVAYJi~Lu#Xp!hY71!|c>e+fT^n_=IDr6QS{h zn0J)U2xif&ntct(mN&7EEt)SQhfLp$4A9OocFf!3pf1C|L2CYw+RH*}Vm2{>`x&GI zI~f7;0wBrzg4G3D)pXdT+3z5FlOYpqH{lAx8=-iLF6j&VrF}m2sXx5@Kg<}2TSADu zxWY4m;*OH?7!EYRLCR+3IR``pGzKr~%Hg}Dn!oH0+a;OU$*c;2BJMDelA!i{>7?8Y zW5nww7{k0DomZr!r@ zJamRU&*#8^qQL+2TvD^0a06d>;*%m0!3j8ZFT<4$##FRQjJdgRQ%||IGq3-=_~KLA zWg|=R>};Ua=Q(`@j{ID0bow79{yTR;nB4Sz=JvW)OD}jj)hjUIwXe~!i27pxi%=@{ zi4nAoc8!t5W68&@wumx6j|TC%s?&C)DR2B~5O+3r9A=_EQ6Gc3ehZH0m!`osy=?s> zBNp;US`CE()dtn&Q|!>5t`{0wkXzc6B*5(IuX)^udLQGFbMlg80tQuRQ4CHa0kPqJ zsSgfmWbc=9F+=jx4Ywdwn6r1TzI@&ix~4SHY4%R*pip3!y@wz8Y+)Vs_sY%|SAQ!? z`r8PUI_DFkq)mL+-&#~0g6x+6xk6`VpkrYv_f!q6T}#D8`qnq^*d0Nud-FCMit=K= zZa}GPh9iNY4oP!Fq7|A`fa+JZu`5LpAFq5$FdJ^bbM;dmS&(T8@4r*4=Ex7AImL4A zi;TlcY6*eKSE94UXjrM%!!r=2QAueZ3p^*ERL{6^?}m7X4qr7(Lv}cJ4`*Gysjs%D z?_>mE4eF#qQSvU$;!E1rcvo}`j1aN8Wm|RC;nS+B1r!u7)hq>)?^tbU zxoajzPXO^c%~8`9t(*{38PwmaIn{vKQi$j;-x@Q~_9*ikKpwm};$kt)MO)wRXVWpI za51NS8JmmSsi_oZfP4A>{@ZG;O=BD(yp|XcjYag)qfhq(=740)47$$%ly~~>%sef~ zY^IbaQk7Y*NoP{5HE?8~hv|%d18iAaMbZeE+m%Uw!Hjy*gHi%GW`WZBBH|XXh)|B) z2ybPtAJH04StSC|o(yO3yVCa9{SN;Cy#?u=Die1O>tg z`nTiD={q{=ql=TMMvxv;ECQSIYo;(s2*usS6nd-$;QM|6>C2$x)@)FfWRJ{NZ30{e zZkv|7H0oPp64*Lb5ar8BNx=7mEXd~?Yd(Px{^kc@g5;~FZ<(^X`;oiKx@_+u3y*PhqB**#cNfffMW8^e(NWLt~=eQ z!reQF&iF6;HAuyz*};So=#)c0;=n+HqALgj@P7euc#`goIeeb0hajLuWTZ++x5h;x zJ<1-uQf&F6_k}4l|DCi-g)PUK+fygbPU|S+bl)f-O3?AIm+p~gz_Pp(@Xs(#!OYUR zpc5uzxu%#wv+j#Zp%p>*LR8Rk{1xOkH@i4@`q=(?i)3z=N;$&cVqi357*3k!Yiey! z;A$$iqIr;ize}qK9~Mo1&cPk}4{dl@&!UZNlApRisuE3(=BY3c`z>q&S>G@IfBwhUp{dJ-DCi{J09pWftUk~Bgl6_y7UD9*GM&96 zL%twjWU)_{6ah|A$`EAf3ocDD6-$CgQBE(IgDJJR*x*_)z$V1LPc8Buzo^h=k{cKC zR`udEooIZK=Wz97X;o3PWVi|YeA5ioy+8)&XB9w7dV~_Y530a#A}qsYuUYrYA^Y>W z;3s3Zq(&(ANeLw@c$%4_m{O06ohyg&(XRoBR*ym6Np#d}reHTaW1p#>uYk$^dRCsy zVxd^2I-&SdU9sKUWiinMA$&VGsz&-em-UW~UA5@e_UN-)dgrwGA5*(XG(vZ%A|0dp zgL_fym^@xQt{n?SbocIk;V^Xhglt~w$1 zjN}lT#fY*L=c8LxX=Xs}84!-a3cLu@QxA&{^pb#|xTi;QyjVpUE&N9u7j-?KBieB>xGTIv)`RkZ23e%++uEnt|gzozd< zPLr<6zeMB5+SCmx`itw=eC73)+EZV#f+(~oc2@r|?JN>i^j@ik8=8c&nQ3x>-CFeuqZah%o?_l*og= z6*R}9tTlr&3v|c)IAT00qd*2jxcI`4uI^v`$VUNI{e&h3z~6NVgM*-}KGq?IUu)JQ z^Wb`Ko#nm9ZukN>n)9oxe%&%a8j*!AB4x~pp0hLD=pq`J@Kp; z(TP@@tKsglFz~Lc__b!@!qpNN{|ZM91lPb z!;q3C4~`5WX?$ELw_zdqtS#arWk_U)r=@u?7-Bzp)zL|*mAYoF6**Y$DLis43EG*Y zwSn4B2Z}*Ns5WiC|1i#$FSL$A(#ik9W}ZaikYA6}Kt5|nT2q6EG6BCOK^z|X1l!Qc zT|F3s3}O7~9aProKlkoEwmsAs$w_l|27GL%%fuC^T|h0&}5SIPHt- z*lYSv$sM7AitI<8ROE22b}DBeijztY2{Kl_rs@GK+=&9W=|qeSFGaSCYYQgvdMiQn zm|ryu$ZZ>gGA>Y_sM4xORtf|&HX;NEa6wUd3pW}fA}vHEGS*gKi|hVjaw%%9M>T{1 zsJ?Hi5lIBvN{^B)#|AG5MV>8l6s1oqg0#z_D)Vx_4vqohvQ~e=hgpKY$G{cwaB9*B zTNkU3LWmIKKJ}Xg5PIHg0}&6Q6fMi5de8w1~;q5ocB%5G~+Rrq21kXT|#Q|5w zAzD_kXOtKy)fWf4H4GUAf{QEahhsRTN!cI>E_9pzf+>YQO_k5dX7Ry%U{5J$;q45@@!0%F(oBM;PSq$7T%6e66t5wBq)uy5*8wF`kH@>ywG^}*enuZGRlam zY8qi(9Rx*hiq3~1fQTR#i~3=TZ%8`AmGuIINnJ3+tYl{vvD~9hR9nHaE{2PYqT+zF zC4{_5Lmx(*O}+MF08X3N>RUn~eLR<}9`$uFiNVNowiLr}TLULg|*CaPP+BPwIPU>|;O-qm)9tv9^+!uk9ldX=X|Mw-RL0^dU{BW!zhFZqjm9U9RCHGneqJ*!^rr(Lue8ttm zn1DB3Q^Vwx;@zOIf3K@Yw@ST@+sHda%*%^G$O4kCA#^Znlwra(NP9r|ji_DG_Zy>u z<+q{%6Ut7JNnSs##p7;(k_o#mM*4{MSm_PsWknjtXDr`w1{7{1!F5>h5LaV*w7HKwm52#C zM<0$xf$y7q1Pesr=}%LDot~3IPfpsdNv_@LdvVGqQ6(1?#}bY7*yt=RfrgxT{SA69 zfHIF*O5Ii+v$0NzRQ5mN4l7pK3syZ5D6CEn6QYQTwsB*r+@r@O8PYUl zZ|{uM;HSqRwKR{;w3OfW`BAkW&<(e)bb%wejykmpA(hb;opPd&EQtJ2&Y@kZ zE&IJ_s>^2N(J)!vM++heXHDNc;B_fSG=D^2#>Ld1cKMKn4>=g5D#n{Xx-;VP=rRPL zC!))~8cW<_;AycK^~=|4Uwq7eq&30^C}b~!(g&8UCzfKXQph;{$zFz(FbQ7AW{SwM z9=L^(nmdS+n*4|C&ypG$pQY9JFeZ)-&mR21YhELW7(|jP4h9~cO+PH7Z6T0yRNP{l zE=5U{gx!FC=$V)7Mt31^;Od7m&U68kvZ|)Qx>2HjRCjn7O*%zVnr;Va&-xF(sef~o zGL{5S2CFs$znNC0+J@;!Fzl0tYB^GJe4{umZj}ENh1kXYVq)BQyn#JvPL-SEAG?zp zy;0nCph-Hu#E0Ej4JI|Y+mKK)fcMgfkK`jUd>LroT(+9fW-c!texU!Zk#_aFuD*X2 zlQ>arnRy0n*2=O%Dmdg(>1+kW&3wp;Av^b;fAabG8v(F1!v$z^6Algy!zApIxG`$` znSySI*nM&b>IBBt%Eq(d5v18(@gnzWuVpW9d)Rehk|;uJ;M0D3VLJ$vn($Oi=oKyP zmio-=IqG(UOwm~#P2aWGvr0rdI!>ZyD#SF0dht80>Hqxw|16w}ccyQ*P7*AuTrM&D z5`qQp$o2DzzlbFC0pKX%^z>#4$%yIAdeADS7AFR$>hb^mrH`9?86=B>G3(6V+Fsk- z+R<%YS@8$TY!gcWkoVjV?yD&U3~5rD+xmJDFsg|qns3gUA}GqJ{%b$?lNfHO3bh)C zQM|>gplIOeqJUKhUAiR-QV7wbwXdIr>voDT*FQUl8L;@m1W}V!ePjiwo$<>6uF(9G z7y92o(*&(A6;9KCQlJnuQkGaI^uISOp?u1*^;l?kM?@Jvx$|> z4e$!q*0eE<`w)`&bHCDyqmC*)k9jvg<-3|QlMp8Qqwp9`)RZ~l(g|P}>Wg$VuvMQd zx&D53S{hA%suw^;+X;;`)y6nmU&akqbi>Yh%7MdCjiae4vQeI_h|ZJVr3?(@T5Ly^ zshZb7U9CADlkaW)sG=&gifO8@BE*J3h58`}%m5?$n;agsq@olpx~w3D_4A)`NoPh` zljz!M^fw~1cjmWtr|%}93aE9SWVpT`l1~o*oK3(u-4NK==*Z4A?RM6haTxOaKhP>D zFPi?eUM}?}*57#mtx^5cR`n7gLAcL8gt=k6ot_hSzDsUUkHqWjXCIHij9HxW&@WDW zvdaM9NHp$dwhcpte+Sr`kC6jU1EA)}u)h|-D0odP6{oI@Tsa562v{TuJqC7L#2~U( z!BZ`5jyk;vTWu~a`FM0STBT#$~+AxvM$( zCWnJPle!!I+rvuMw@IiK(|vYn7qUVb@XvR>#qC=glEtmVVW87H^!4?U(aHkK`E%Xc zbRWk1MbT}7-TP4GCLPAPu&2{#(nW-RMqu3IQ8pqv?H^$EMT+u;mp#BXqegOPQ<5#BnYTHg46-l zIE=8yQQO}W1`?efFjDqVqGhe)U>f{cva?$VlJH_ks%Q*euC}8%Wzk=Mn z#+Qn6wpuNB^{+*Caz=<=(|T>#HV?v%G+V<)H%(|r;3L6Z#RaQcklE7yi00T0Q;08I z(677{(M5CpxV(foBS72hL?LvH>QGA6T7?RK2p_$O_3=n)`#tLX9>R4Aa18Mr{Tfzd z2vecLJuYtD&3z`aIhx7V3M<5;xK6W;%*oTExr2T^|!9SV;ZkyTO~h|izht~U+Nbz zO)0d~7{|136a0nj_GItaXG#1&pE)P$-cDQgzrqxuwxL8R{GvuX9h?2>yPcdRgDk6U z({~SO=;LMDf+(IBW={n>u&A)mA`IV-3nOiJn)l!YGgSA~gulbGeVlSWQkYJEqrO=B z%yx+~NiqQk2txNsITx=)w(OVvBn2P!DMjgZK8*7nWw<9MI+M>fPes<0PFP$kZ~mEH zc#Ka!|B@ateXDjw-)Z&f^_!|zvFl$yV!m)sAP*9o;FF)U(@!fMyIg%o%n*#pdgc0M zZ*>D#B#|C^oVt0-I;2>aaXvXMgdSxvO|)I3UGM} zUcx%&b8(3&vJutrG4|;otshTX4DG8{0ds$5^1iXKpuMSjw3lsSOVqg5#&Oscx)ujt zG1zMqQhC!fvnr`hE*=eLTscEpGY=hn)eJwi3Ti->_(H_Cnwz(_b*Lz!b{cPU=sQ}d zRA2#}BWtY)n-9X%AF3M_HTp7DQln5zg0f^FYAy;Dy+i=Kt7(h%jt}Sum$}7fP>YFC zM!g5&74t$Rn>m(IDclO&O|cNyF~W4%kWkVr+Md1}QifRlaV8e%4Go`G1N`#5|KQc^ z2ys&(wzN&7oDw%7TkoJ|Kx}dGQ<<(SSX~1BNa-k>eBnz^S&i)SN>o&Ud+Cg+C6bxR zI2F$#F((H5(oes{LTBPZ>Id(+0q7j?R2aQ zk|`0D)QEEu;hV0l&U-K9aKnAV{TeMX86d;~{tNMNFK4cIw3lG+Et(a?P2gFzt~d5Z zTvWXJ$M?i$5ARbP;1{exSpRqvSJU!`{9hLgkvh2O3*4+y4+$p#arWn%2p2Cx)1zg8 zS-xY>De|f_oDi%`&4j!gWrIK_G!bqPJ;tqYniQDR1+RX|MkURn&i{ZA6k_YnkVXrC&|g>s{v+bj zp}eX8Ud){KCcz|s3L((enYADMzz>9g_)PwXVy{=f_l@KbPQvX*I0G)sYRuKSbQE^b z&VDX$2)9TKP;ea(xU}W)UEHX?+|63-CEvOZZYnGs9rr|CMI=&4?jk|GC}JM{&>oUq zkEo|6om$7y+w)6VGLHnIUvTPspasM+PBUhQD57){tP^eHya!qgWjJ#IByGPK#jTj+ z1FrBSi3C!6|1?40ayP;Pp}RcXWjdaZHKj${fObbwjnJ5DG#5qsgF-2}4Ao~;)ZL89 zDqBa|SX^c%Cw-vh$smWteH)qpxbgGMsmhz6v5M5~;HK-ns~Zc8&*}#yU(;CGRU{te zS32ma1!8=bo*cD^@wr|E_t!S>_+3Kb_u7G#B34k7-$cGO+9b3=pplx&V~J5f$*Vx8#K}Mi(~??lMVP-Ew*h;+cxL9Xn^hYh zpoR6#6`0!(Yv6QbfQinf;CF@hil~r`{#(h@L$})F_%4d~!Jl#2Py`|fv4;p878iGI zacReVxkFuatdR2^ls@ zFe8OPqOrRXsuK!a1RLvnbOeZO3AoNQX(|4LnKCX1=BH#0+YE6OXVB@TKrLBeyVb1` zccNXjzh-U5U82Icoh^Jftyk@GOxxv|93To8Y>(os1pxCT9`hzk%6gY_r&OHk(=^N&!Icww1nHT{op9gEs1AqK*j1PQb{T67g>#hFsyl+{wlr-Q{dijwT)xWJqv zZm8JS10eX9VmLWJ;HP>k;|c9{iMu92NU0xl`yw4j`iSeJqcpRh4YO8j2h&5pTo4*C zotN#!9UT%5VbRjR8ksiEaqBXBS8ulxMngOKQ6@gHQEMX+p%@h|u5^Qt3Mi30_p-Lo zlf7)0*CwPL(b&1(KK(pOZ@z@^X#YKoabM7ZYUs7|C!YG4X}DVo^~Hrc74<$(f0s&v z0_MJOqF{knd&H5v{x zj;>0cevZlsBcwER;|hh?L-a8CI8Q||zC@1)*cp7db_&C$irkj_;w?_!yz5IkH#y2S z!!&Iab_xAzRvT;nrD9y#%`H^2GGyjTEMG)oKf9W(*Hl2CSy!Sb4pE-H)VZ)falt-* zL+e751x%?&9kTa;GfH|-uM%M-Cbd|k86CCt)AUV@p|J0zXywqH`vtLJA#r1bh9j254K6X}& z&S|3-$p^%E4WA!ggc5k-u_s0S8#i9?0b?*l1BI|_vTZ9aT)a>5ujHXaP;a%4jN3-% zo=7z|G=0Vtgs`;^Z@qPZO_u?_X0+nSNAx zcTgNYh;A8}IPUGq*wQ;_kWST3CKD8%Cc)(!B8Qw>vI6i3wfl2FXQAYSKH~qeF2H6d zE+6-o#0*d`Zvq1><@@Fl)5y48x#8tvFk8fjahBMrn9e>-O$C6ui31>jTGv0hj=E9y!t&n{)?P zi6qf=H_w_VM`bC_kYU8>WNSW^8Ek#%vdu&*YNO-stcxMq6Iq&h z(h@r4kzxkImg-v+tz+q`oV@cF6&~H|ciweOR(W{5(FgNMIrz z?m0lTCNhp>W*VBq6xYBH5Mep1Yj76g>n*@F=_@QyTRj|DVJhHqIeizAA~2@;hi-J{ zWYVbcZa;-GiH8zyHq#S8FYWc9SCL8;6#eY#vnj3@9naQfYEJ`qz!b++4TAaCsOyMW z7NJ1zkI-33AaL>jo*GugL3&RFgo zp|C^-syj_>pTAW)D|wsb*hZPTlPQ28io}1U>t9g>{pz>=DD6}0n5b>SAwEuW%YuwRQRg%5l%FtIKNH$S75B<^YpSQg1?`Q$J{H3TF;J=?b| ziSA%-SC7qrUi}HnSh2>^$QnK+o$G}c@P3|;I_dH9Sa%qv-@D3=*U%tlb$g}fwJ{G% z3Pm%a6K$Pb2WWS9SN!nAje9<~C9F;pk(_$9&Kvz&^0-K&`BbM=gtQL*@g^Po!^mWe z2p+c{JAEr4Pp}K+fsi3B`f%9m0tG1btjG1o5jgw&OWqx| z<@#w=f!*Np0&Rge_EOZ1&obb7*dGz1O5H*^8J?ShoO&=ry?kB`61|Ggn?)2D2*6DU z!_GmtAXp(~p^Q_?ycniHD-++lK;D85U5>F87!89m(^CL@5dL6c@czD&@hXr0*MH%gNSuS4l%i3< z7#}AJVGk>Pvc^M|msjE_hyo3|iLQ;D zA)&@mWE1C!=!-Rvh&r1>i6}`UO0HNyFT9?$)r~KB$x90J>+5v9Omk+L%r!$MOI||# z+x*#EM9AWV%;LD$Fdq?$A*N>-2HGB#ld2ErzO{2574ZLx95b@gIl zl2JST))HC_Kw!2YRe0eyGFEsaLO(fO-A4#+_=SPT!M2SvxOeaAG*CdhapC+S`ZY_3 zoaUQx+y*mEqzU15)(n0&;Dye|%pqVz%Z2Y=}+qOX#T z{uHW57QP;)NvlX_Z#`?J^53X=MYzf|wYkmBHLv|2CE^rXYP#R%p_}ceq6Ha zztmfV>S-37De<_(UJ~^m23UpcH@DP#BX^jd)Ctw*1=%(mEqYAgFH&lM{6xZgPbgU; zoUgTkV#OLU$rea)^-}{x5!A00U%Yx(3iH|9`u|5~5?lSljKepXx~<<2zO41r#cP-i zpx3Amt;iJM@0`0nq6A>vCz90Pl2zZhl3i%bbxT6fTQ=-a+~$ND8;7G66Jb z(@Y=LR60Ds;NjxP*(MC7UkyPY#|d(YD8jX*B6!EVWaTjQnWd%0Z9i&xpJZ6t@5&TL z1Vy`N4z_jz*H*$WM$FGVt@${r?i2G=+3<0FbGPF2p7_|u^oyroih(syIqJJi3kJXv ze>d+f%S7&nwt#E$LQ|b4O(WMlS!^`%%E2KPYhKZvt1!74$CJvM&S8V}g$7vMAhOMR z^I=?LJ686Ne24-lTI*3SaKCI@B3Iv`d-7odIEyxA+`UQWI2ECNvB#gisLvy4OJl3x ze<-BKHhWQ!KorFro-YF)ycr~Jb={OHr_+JSykNYx&Xi(46}y^#O8N97y{R^eC~G~d zJ6wu(n*Me-ZfcS7k=XO^#gfmW%wK z_x*@n{Ot)dARPtof_YK^jqRI%1^E~AJwL02=Dd=n=T#f0Pp-~4c1WiAOI_k_-Y(*m zkd&S?sH8m%e&fI4-97Xy{W& zxl%ANYOYzwOyeB_`7?>>G7GA?0u-9SCf`kVIy}P9EB67#!RdN=S8vqbQ!CupaBJv& zdCF|kZ1vZQ(Po)F#WBtz5SVUg^qmZd-yFso|NQai#ZCYD+Z#{mzo?oJ5iF^o(#XzD@9Q;pFR-=r z1BW!oxTAsz0RyLnfR1rB4fA;|ah54zRy%h4B+qI6A}Zk|62`p%f2c&=oYkjs82Kdq z@%C!do53E4wEs$ z);rH?*RA4UDU?vWyT%clE^aSfs zSLbiBpQvE9+CcB4iaF~{>P&w+8qaoQ-knhpYhSy#{^s@fD$IL00R8Ugl@-e9NL5ZW zJwi9%GM&M0W-3&;4OMcOE(@zok#(W*MQ=%--SsP8xbjLGAS?EdzKQ<*HYgM>W13_D z0#S|m01A;x^U8(Id0j2SG=UvMYG#e_>MbwqjDUivzu#)ppDu3N^k>6aT{DgF(so0# zF+I;KZzMVY1+q(y6QQFMBrwl*dN zwdE&TQ5hInT_Ir1@*E^$4!#`UB^%?fEFHBnw1<|4=JCvbMPTX!w>IXbNlo0j*8cJS zaM$V?(M}41a7@K~E>GXBCAFPNJGE{gd-Ifc^!d1ic(U3pPIhEhy>k>-?Qc86H0`w# z^k1ifi`ACP(ME*cJ-{J0Jf7u0gs>zBW4CNyRKdliVB9UZ>FY>`G2A0@gGw%b#X^!R zfI^sfTL4jmd8_)M4y`0g@<_mAcMkKCLK2X#6%w&jv%b%i-d)ol$Dy^+fSInP;~PLu zlbCW3HFfpjz$f{Z#b;NWJ5Ar5h^SFo96@73s$^QYI0Ms^Ke9Du|nz#5hVOEr^Xj5$cu;n_&?$pRB`ef#HOe!N9{28P!p) zWZ1XCdF?V>9c>-99jv0>>|PT=2@oT?1rxRDNoH3qGbWoshnElD&?m^3SB+3GOIqag z{y3xX%_Z|uap&2T-y#pZ`h3km_KK}P_qosc{McG6ym&UFE`h-ktu|FJ^|}#EtldSj zzFP0^-z{}`djReHx)T6Ebqw2}cUe=cq;7_BNr<;Zw)eRI;0=EXY4FwSS1c%PmZRng z_u-GKgUS286zhT`5qSis5-7!kGF0sn5SA6Asxg?(;r*^)$mPIn#p(p z#7n{#CoR+BIAb3wD0Ka*_9VwHLsJ_54NKPE`Ut*AjMO;~SO2VlsCP)|ehKpI>@(Z2 z7oiqbzuXG6nT++k)r55ejDPgtRqc#^$h;HB+Uuhd4?DNtF=MO^NiGrkNnbffsOB{< z-9i92`tyr-d-Qw4Z<&N*~0 z)6+slDAM=df8*(F5&)Xen-S!QsGyJsu%KSPioQ$|0a!|rcQTq(krxAuEj~*Vinw=> zyL!fl>0M5ab7T}Riq3X^c!NkskoAuH=z%KKiB5mQAG{UHX~wlh2n!}que%%w#7xs^ zYr4`V5`Y#$HKk$XsN^wD7?7hU%QHnDWUVWa3p#oaV6CZtE$IRK24EAa;Hq^E)r5$yS1zG}{Bq1OuC>4WI zKU?u%t#0YurzD1N@yak+^YkJoyWs31_dBF2O}X}vLh9Hb-3XE%LSMdo{aw>TLQJIR z=32k3*ZG)>U0D@+mMkgZne;47i)8F8HPd|0FQau!#qmTsx?beOF2DrOZu#Jz!x8oi z*Gi~_k%zPN8M!u$qYP1FK9Zk6ONp$F=8l+Q3k#q`XQ>tmQx4MXXLJP|1Ol%1GHejM z@r#=n%5gf6eMax+-f^8x8?v#d0NB##Qyrc+fi-zvX0x-2Zoga5F4H%`6ct$4cf;C$6k4P z^NadipSsoJ7(D#`{&yr^tVh@{orOvi z<1V{5?KW6LjqtW2UgYF+99G>!{pEO)btu~h1d&C9p>V0fhAK08`+o3;mCd2q-ila z{)?s+eOmi5IIa(zO8;zjBiv3FD4li~_9mvemw3l^KEGw0VVY|{VameLUJaPkw7E(e z4mgG%w4->wxmsE5QU4?^L^z{E(iLslsw`@1GJ2@zKG%OX1oK~&<>VejMh zW|hh!L(QjI-Q{8ncX4TevE)2OWx^w3t39=0@qv#fNzBVK!Ve1LtJo`@qa#LNc}f~d zr&{E3$jFjYB)3F7{t=suUac82k8pW!n_&DXY~aU56}G89IW zmhfGqGOwd6IIITeM>hoG7I6uQiezyaEgn_pWeL(a+CvSV?#E5;?&faiS{eKatLf{t z1mRDge%@y*W6Dco32FtL?hEN*eP%u4B@qdGg1+N7eAA@OF=qV!2&M}KBu*lcp2dw} zf@K_~#4GfPCL{IyQdpCj8`rPj{ zESz&^KAS42`T<0YU-uj|^XpU%OZMM%itGq3a8{ZrJ&bVCYY#!n_)^_sB5Vwo*uqD} zYQC6<0kMVr=ObWG`6jRqDQ!a-M&1`Io*a;Fz-1}EW2w}2V8CR^T`$FK_8RwTEJf;D zB%J<4MRg`22;CW&6c03@QgaJ>@L4bNdo(-qqPAok`FS$SX2oypxpaRBBHvoCp}FY? zfU0sMSOF>3X{^rdbSZk#ML1ZK6&;_~h(o8-egz85Q|_dNV8}2oZ5(`YfzLTSmzTAa zX^icxyBsr*!wVnN_BV|eu$DOU{=+%F^#6(U71_Kyle?d))< z8dh@$XE(Ew7U9p#(dgr0to-NOH=atp9#CAeTSe28MrsAnuDM8hGjGFUH*7&)%^h`; zb$VZs7d`0~zF3AyPyy#*3I-n~rnoVg{&NpFbfV4sKwmC^zwwG)&9qxQqhZtBFggcwT6UvIDono3&^_joDKoU}M1n-m&grZ ze^RM4^R1OE)tx-bMh+6Us=iJ6O2(#k{p_Mw=pI<6w>2+99?-d<g{NNQWS>0~E z7?lr-O+bSbWaGG-O!)=8io7@-ihxo@KG(|nAII+mi+jy9BPq>6>u97ut2V*e=-8## ztx;z$q|q-eH;|cU31?xvnHn~Hw;HIoMx>ovS>6B(Q+l83x72NAQ|tjyH~_ic844_v z63R<@#s@dQ96bkoX~C+y^|Dri4JwpgpyS})$3B$|gP`WPYYisD0?d2N-t@j91;I(L zYgvtb@hP;DTPK2)ra~X%ERHkt$6m6w16He;bW$eZ9lTzl1AznReU0cAW0LLA1n|Dj z5?)SUYYkM3px|E8Ttw9+03ZG!V^sa_!|&<1@7;UxrRV)Q#zAvrejm6l_50S!@quJ& z>m?W1Nx>z@pupmZAEYBojaOuuC_IxFujSoF0b@^A9fa!C)6sDrb>kPW01hbSI+wk~ zX^an*((opw%7d5alnqOJiHh+O+3`at8-c!B(}P#Dh-bYt4xj$GHV1$B59bj&QxS4L zhrr<(-m$*AR;yhbT}&}juG%e*Qzs}dW1zyOLF-32D*U~06JQxVq<fpl78ASl&EPKJ|7d$Yi!J5p{Bx{9TfTc5 zp;K-!oKwODiFzq(wK9-AIR}1KKf*R7@#VXf)dEI`~q}S&v}oEF~p{p7B*G_ zD)jUah=c5RcK!Edt}cxaJNy4Hdw1d|M|s%!|Hm_9vv8v3Xou=(eXqh*#Z+$kO&QSHhH>x zx~B&Xp*Ne&ve|6*_j$fg^^pDj4Vl;LT{YEDKUMXdbyss78mcUpFm>0&6*qilu$Oc- zXRvNub$a;P&y*c3Pr{HBH;3~9<%+%M2A)|6o$WjI{Aa-n0pf3Mki;u^0=Qgfe%*O+ z|801K6Tirml`{za(_p{ldOd0x)}LOH{oC>@?7_x&TQkpa@qdsJ*K)RUa2jM>d;Z~M zFsDN(qQNyw-at`Sn~zN$x%Il0gNt(4u&w0WfI4?(8Or-TykU2+$>Aq22lupGb?xB# zTX;8JEP1*|p1v#_)Wz(!ULT)o2>l_~P0N5H3tP1^5uf@br_EH)um8ltwIuGPV@EA~ zdNpUD23I?|a=;az!CA`k_i)uzcsX3i9rEGUm4n@Z554{-gIiYUTUT9K3+du$O*eqN zBMEWXioE`FAYuDK|1h{3!%b6qtbkUee&-f;qQc`Jakq~19ii&J<*mP^)epbD84CR3 zYr<@=tccIf zg`Q96)WYRb4z0_=;9e76G!<$}$nN)FdGWgjy9c+i)*Mg_`U9Q`9PIUSX7Ps4(Y=*< z$^Fed(qHzht9U$|z0-wqJmvS;0^BgQx14=>#H@B0emh;=At%sRhor0ycUtQc z-f1zYl~d~m`*ys{e4)+^_9JL`bO~ifUCPPo@*D5IWw3Z|31t^Ew5pqK8(ibOL>E7< zdiNC{D634JA7%OE!mU4a2|T@7u0D5akyl~1+{8yiEMe zJKtC)@00P(RN)m!G_W%HAL4vrIrDHfoBOsxy?Y;}tnB8^IUgS+ic4x-kO{%u`nd1} z2Tyk0rc+K!-k6l-e?$0DmGUtJy0!44?f>T&F0SBg`s<8aqvJtiCMUlju$@-&I9UHp>`3>NC(A2tHdZwUFtCT37k zE?YIIzPIpmAcHf6oIVW+4(~c0>=@s~ErGYJEIYlhanw5#VigQ~`emh^dWb92%zkj_ zql0q3yYQPXHIAJZesJU}e$pg7bsgVyal^GA8|=?>&5~21uit{p(^TQeFRT>%N7oMC zinws3I%rzu1?oZSE?)Q{gX?b`G=nR7{P4Z+FISg4+;|c+zMXmXwVxXNyb5>iQ>(*U zGPLD->lZF6S1jJ}&m9TMDUM&cC27!hx!3K+!Ntw1=r1`<7czU*O}7me&6P;*F%Mg* z_g__3ns;$ea*-`4BiN$^AhE}b)sFlO(M!pik}nEc8#?f+R2%8}qP29t*aV92Y0DBT2Eq9AJ3gxxd z6jwOYzfh0(LK%N>!^$9CtGL1%(#qA3>y#zD3v{JExh-6BEweQ~2eNPoLZ&UesWIHd z@cM6~TzlJUu91Wpl8+zxNLe2)z2xeUBb-kT4=R`Q;ej|C|6i3+BC?l^~ z*v+Ro4~m~%WUg_R^Praww>s$^K`ih(YQ*5QXV{T?em#5dy2&#%bn$?Nz`C*aCcE1VeO z2eEh@oZ=`~RX>=?ZxNM~ynQn~12Wj{S+;VeUc?l(6${;g4*j@!DCFYR;Z}}8afByH zLl+jxzIZZT0}HR72-BbBs5C^lbVazgrmO+H1Mg#MS5zGCv!kB~m2X8|)trpG=C&|> zde`9X_iWApjV9-Cm>$bte8?=7T6NcG*{tJUAlHYsVU>kk4 zj=Kh}lv6$2HFmw;56krw?OD|LIVQcd^4;$#YYjI(4yp>*YQu9BVeYpr{KC-{;r+Nb zmi7MTaP%5>UP!906vW+8jG3@sYD|D=<$@m8De+eOUQFr%zu$Cr%Nte;H=MgJy$@RqRq9r{4;pD7nKp z-pICc<=}8AY>7hq3$L8HX_f9pp*Gz%*e29i#KN97do_bI58MIFdn~x}qQkQHT~gM| z8r{(?tHPmrxr-2PkKsz{_3R>){oW<7zb}i9d*N-#+@V-kPF-;|zZOs?kT)aTFt~&4 z((sjsyoc*#M1aAuiPEtEl7%{~*};6~S3Rw=ej{D?RGb=d7J*M^_<;5=|yK`*`Him{0Qu=gDy98;MzUsBru!jGPdr18f<-I^;-BrI2|nTDnd^E43>2x7mak-!+TM< z?WbHz`r`1Qz{WC5!b`q5Jy-^E_;z{tC9ZN(m%P~#DkldAE+M-($E4q4EYtF_sa;`) z^q7-uG`Wmht^~Kk^F&&?tlA157P)bCNc0Mx-4Eq<4)c9;VN6p)AaiRa-G=(;vKv3+yXGKYppstk4eh#8kN!Y!z-8+{$+2b?WTi zbHhUYSjBs2c)V+HrtPxp$_79+VNaK%v-pD0aKC!I1t(k(r?>{k&0HJ^Il;c$-JwSE z=GHRvSKX%lXYPF;oaN+1bs2W?!do)A#5OplyZovU8h&;7#z7W5f#&16^baC;ADU*d@~ zwrF9OKctXH9tW#MEg#hWaN2pHpIO1dNO{DlGeZCWC(eqU#lcW`dP`B66&GFfj`F|j z7T$4NY@_!(R@P6>QifBWWy;=7DTMFy;v8D}pbB4g+2t2srQ56MEZ=wI;Na=gr*~cZ z$?)y)+Pj69&|kuiz@WIVxxCEY@H#+kta;FHa5#D~hnQip;WiK+5GflE<HG0 zJi&2;5^gNwcE-V-Jz)m%r`R{JozXq0p$2~VtrwNq!s>^glL$G^TSFEe5@JQTNvX`w zH&ScLgmGq^8&}Ip!Qm-i;#?31r{(PEH9yPlPZ^O{nyK@;_}0tlZS`jO&~idY#s#Zz z9~3dmIkPcgA0?~@zo&7tc8+-^3_n0dzx@C181dTga%rF9Ci;(lPVJd{SZ?5}gWC-_ zleh5n!~6b#Gs=2F0Ve_n*NtlLx^hseTz0#@TmnZe+*lEMuuz-XY0&$M!WF57UCnT} z;^6uPD;ipSSvRk}X|PLAx3%(PI^3vTby=AgJiw!!F7^-N8yu;H%dRY=a)B)r%jXuh zn4Du;czXdS{XRa}4%SwDmPa0Srh_*{#eG#~!~*rok#P52nK!rc;2tk*2n&eMt%t1T z?vsT};q_ZSee=|#A)Vm@(n5#J`h`tVxz?N%9PH=a#O?D7hkzGzB5dKQhl_9GER5EP zRe9r|4{BB@x%h^l8`(DvD-zEjg#Xxief?!UY`ZuYx3JF|t|w{BeMvmZ<2@vU3U$e~ zw+(Kty@V(s&))LZKMph0TPhwe*ShxlTg@)N@m78wDLjA}zQy|89Lw}wT!>h>_|NX` zM+bLz>K-BPTo`POdBqUNAYn<|#}r1)IoPlv%L!{C>~;H_b>Bn*Br z460%9TVZfn7+f7TbH(xS?}x+Sm&4%FFnD7aT&&rtf%`kXd7`$I0!{Ogc!r&@R zD<6M5O#fUMyeSO+EDWv)gJ02d9}oY&NB>TSe}63uel-l1h7YG63jbai2FvuNBjMjP z41PTf-WvwL5eC;N_+a?=clGblFnoR_eDKF%@R2akD$Mogli}k((g*uA%=DE}RbIO(gnq9+ zxjPI`>xZCEPKN&rt@g=><2SeJOKTMHMGYU&@ctOQUBP4eNdFg&@c-0a4RRHcJoKg0vFu_Kh}kd{zWS_&Q>qUqRgR}sBB%8EsSV*fr}g>S$KsMz{ib8; ztUlVTdUQJ0i!bVrph30iCE>A^tQfWUOLlxnqYTb0KrYeDxn)OR-;~JNJ`q~%PjT+beftEbpgD%Y>Y;C&%IQ!2lyIPYEYtB)w5 zs=;Sfzn@m?ZjWej#Y4&zjkSJ@l5tkAzMQ>Z%Q&fZn;O<)Z&%}g z^{8s)*Hr7ymX%yHn^x&h>7k_7urltX3S~+i!YOSAPV4{Es)Q$xX}+7(M?9x>Y}r`z z?p4UJQazUQ!jARtiq(ewUUl6VlM!H!FXi z-lU0jsGb@g}97yb-mN7Gta1fHb-J!$BLlroteqd8Ktzz8g?oyuX zqxSHFVdAXn*3)rCkHo$(cIBh$|NMBag^$G4ZBg^=JgO`^sa%{^R?7X_f z?tQ!CdfyjD%?Y*8^U8(JW9skPHuf{h^!D(b;d|XJ%D^e*eS1U+Jg-)NF78mMG|$F< zK!tfS)^;_^uPggb#`=B#qnhFN*oxL{4pV+YE&BN#A@R>X89!2KKOeJYNc$Vz%}RE2 zMAf&mP2K2ORk*fQaeh@PosONRitg<8Ft?UfPPK>M3mt&hLmhqslY8DRwM6`P3O#p1yi?UHFDSswyzIIaY`vP0dWG z)}B?P?>wpv{#Udv-R-I!=d{wV9xdCaD&^P9J&lbqzhcpkt4REWD&5nuf(@xUoL3c{ znTV~*#=de?T~G6H>$eE*MCTr&HVRclepU`rLR9ya$7WmiNT>0_1 zE_)Qz$<=k7e;|Clw@b@y52-_YMv+ddp-q)#rPXT>YlY5k3vrqws$Vk`%D?tdoWroT zL}yjII}_>*XYW>Kw$`ZPpVpSHJ*3XPJ+vpxsb=j!q$$-c zt2y{_g`8G4x2%f%*;rHKu0UL^n4xW}irO6673O&=)|PSg9|3|18)y&5%v29lsN#M_OPZNb>i^Uu{wW-`%DFv<3m|o@E?6}sbdAMx; zT9ubn#_b_hF~3_`QK*k@9*sdGs>L%CO2EuvEx7ZPx}5fin#{~$RsPPS%GP#lGk$mM zs1B*OZI8sA`gK~#SC1+YFU6*yzNC3H?sAQ&?1!}dc`;`H4sFc3W9reG$JFe)_m$fg z?J_+bJBJY^?9~Z1`e(}4tf0nM^s<@!8}y^X(gq# zMTZ=n2b9eAh_+}?#~lGxhS}o^nVHnrn-^P{G z?ICUAPsYk{MD?USRIa*qBBnIE&TeJM>~8grt*zQJoKz*9nT*@I!?7pXqjKtuD{X$S zs&jipMSM;<(^;p_=I&4r_M-Z}xjU4c&IaYapHQ7^Sz` zswp!^HiW6x7M1gCLrtM~hY~t-LU}ebsde=esw3xBR86bK;>R`B8d6X7Y?=G2m%Tkh zVaCl9`i9@Hy3(A~u7KYio4A_$Gcj*BXsue?)gSnST7mf;D%J~kD%IWC$-bn@|EwC* z*R=9}Tve|Uw(3UJQe zAExF;;t}E+l}~Hyqv6BWW)(_%NFO#Q)G)e_sfM3dpEISc$KPm%{;q_=8GKZe52?pJr&Ld?rF7#C>N7fqeNMCOG*m8rBDR#Z+CO?m z)$qa|CHCbQ)O}ntpWUR&-dnHDO7mDFq;vK$6?JQi8p-TB9p{|Y#^t1v=MTgNJEG5~ z)m!;PvA%82RI=+e)was2_Q)E|H*WAc>*Ho0OK}ayV-Issm2rNv7S`EzTq`oF?x8yxb8JV6ET0SI@-Bynrg;cG`md) zq-Pb>9@eOHL@A!ztX1xgY2oeIY@3s+buZ_d^UM+L^?qG*ys%3PZI5V8=I&6FoH?Rw z>ufCZ{&8)oV^xU9G3_DsYMt#$@F}%!zrXC6^~ITEsu**7)ho2Dy0g|+WH;2526wF(syR zK&98UvGfkdtR7YyX~%=|nPbXGe^7g;-%wD;9t}(BY*bBajwnZbY^8Hs)n)iYN?U7- z*4*z`5@%zt+5573TYpfwFh8o5{HkWsT@$lwyBdCXtFojyS+0T(FU~0^+ry73p%W_f zPD35+{Fk&|GbhydPALzXlPdb=QLVw;4lSlVteMOlQ&Dv%lo_+Jo%#uNx6`VN-I2B7 zYHNQ-Y$JY8oM+t4X-@7?Hsq>*-MA^auu&OxR@I}mN$b@fiE9~4ziri%`#q`)?RYl( zl-Asj$LyNW8OFa=68*R;#Qe@iSXg({i7@i9SbdJV$8aI&bk#Yl~Z_e&j_I1aUp6=tZ(KpoZ_+x6QenOk$ zrEOfwa;`)Tfxp#nn&}Pk~RB)raEh5Gs%>Y*-e`N?AF)=jwxe%`&6ep z$F;KUVKs?vu1sv&m^t?;bNrsiL*_IOs}gnA>Hz(gtU;KnV>}HyCXEc`CVURkA+tJ zLTq_jW@}A6C{m;g>o;jBo3#3^SRCCATC3hVC8l>mUzv-ot9L*J*IK7sn|n}=zIp6$ zm`nF=<>>qxRkqgcN^pB9=EVtZ@SfEy=N?tJ`GWQky4zyT?^A7gIi8oA8&T5yF&(d; zR`L0R>fSnWA8B^GYIf%ytwDQ8EulTE4OlPNO3v+x3Eryu)E-hZpH{T)cJ(x^(X}!8 zv4BT4li5AW>)Cb6h4!#As&i-T&^D?=m|vrw;4AuKYqM(M{1&C7IiglIdpxf5xYoJ% zSS+daajCIaxv)_y`|=Yi%-#tdGoMq%o4sGlo?WMWZ647^VQ#ZpUhhs--{uizwm+z? zd7^Id_M0yAw;UyD4T=rgfctRQ$ zlM?IqXwL1CvO=mGx-hH?=8viN%&yUk-9r} zg(<&BrO`YVSM|8kG&8v+gv}mPPwDrojkVUP_+HtnruGf3@Z6YcP}9bxjcSEkqssG_ z4?Us<98;b*ZEQMQ)P0^)Q0qQrR_C5$;ftMIGnqTC1a=y+m5=MN@ub$Sw@-DUvsRs3 zEAH$!M`DjLqJ{c>Dn-9vMbsWrQrqR`{IF_`-yc7W=il1H+Fd)R9BLlbH`*hrl%2J) z@*mL&jb{}!f2Z2k?4+i;n__c1rVQ%br9HEm35|NWva7W@7TZYKmes~)Hz}zzC)Bb# zkH*TmRqN1wQmLDLM5i^LQF&Z=QVDNPDo4tYOFgEN@CVg7&fk}Zyqw<=6EdW) z%#SJiTAO0Bp3;YNaTmL3_05+LsYmGSSAx3R)%rX8RbiS3W4|>Nm#|Ywm^-fW^iQo1 z%j<4beLtnNcGkvu#e$#Dlzr!vf|;YbF7TX6$4}^d&uOi3dsx{u8!y8&53AD5#ABe= z`dEv#Qq4&fUu%o1?`+&PojI%}&u!DisB`*8bEi73UOWlykEu$}->;>0j;a{_xVle2 zu6o;PsJb^##01)PA!RQ=5o^-0_OqW+qn(+IjV06ipI5VP+SrbcZVIvd!L?!3j9>Jz z9nG)NLg&X+3^Rvya^j?#`b>GkYFyD;n>CZ}efoaOs)En&(80_zTGhE>)s@+&wCv7$ z9fZd0*3f)Wx+NV?{X75%}`H9#X4=WRAj>Opas;qkJ zR6PFJnout7Vbze%sOs?C79A*^)%-6!67%apbtm(?R9(86meY&-{N3%bs!nS2;dd+j z?cunKb4*F?98@20VXxY9Yr|MrYWIM$bj~XG{!UfNPpCY*k1K0t@7CgGCUmOcto9W< zn>F$WG^_4*mGRtI+!0i|dwahaWw$KAN27&erF

8oe9ls{yyc;e7qpiw91jzecHlx4#Wg+RBm;SD0^F5V+9;l_3G`{N_4lWHJ(z= zH;?Jc_%o^(GqGW}ZA{z-RfX;v&8l;^*0TG!O1(9q&bJ*en|E&4hrK5i!S63qr`&68 zidp|9Met84w_EGgH(od#dmNQpGafd4RaIv$ZX#N-j$OESyNK)McdsY>E=D5<|JgoekTc=re*6PF7 zn68(eRQ-55Hj<-i<*kES@!2&hqYJyWbNto4sydmz(cP}Au%}d?yCd2Jw5*y+dn6Xy zes!X~V`};H_s1e#qYk6{glgm5Lt3lZ^;*nqJjFY|UEN#vU@W)^rEPw%R^0DVx7d9u zRK1n{PoJ&PI*FgkM`{5 zCUs8xMV;S1shZlHjM=a;HvM?Ds2leynq{TAOLcQDc31O{$93MVVwsKG&d%YugS1oG z(c7yUJ{yl?JKK~27uKo@eM8l%cU;F(&&KltTJKgodE7aoo??ED`lqhdN_HPr2K9zi zAQuiOF|Qs~9qWv%3-E{ZVQVz@6_aI^R*tr|DAT(e6xO;+^{O+jGV4C34`(J-@;;sv zoqJG+JYQ2Xnn%?bJCAKv_p@E~&>z!Md-o{zD`Q$)zb{s^gG%t+7A@fArxf<;(QP5R ze^S+LZdY7~wVLYeSMGIVsrvHV#(L%Y%;83e*S%ZCG`m+B(TPXm&3GB8J*@R?52;G} zCzLI-u?z8s;!f0N^();`txM|;eY10is&ey)y303`sxYgYHwH_rhigP z?>?bK`$LM}JD{BC-lf!bPN-*@J*buU$HtWCW9rSD<&~O4F%A1;(dO!4=I+y0c4l%s zMCd%FCf7ZxdG_wytq&j5Ds>*yth?*9*0WpVhT=YTTHSa{MQ3fy-FTMNe_59~g` zgIcKHqlI=3tD*XM#-Mpjv3l#ZyYq}roS)Z{oAE+%Yt69M<7li1ld+j@)j9HKA5ubh zs|%goq&dAZ7Ay94Wl8UllGfd#cGDh;&1bJ_jNhXb=^c4E%w=wms`<>MGOx8(3HAF_ zbaPe(*^HNSJNxyO?grJ#PF#BDxaw$534)v-KCPBJ)}D6pVIcGJGLRLb$6e>;`giXyzq!>h2NtO+r#Q{ zpVj%fb86t7J5~I>C-wEF-4^CCzg>OADfN8Kqe^ivo=u^gU-+?q#K33~ffqgwHLRBKrCpM6pb zeK}@A(<(3gZtWO7uR?4dSJi9nRGsU@O9bZLPR{^}7$73Kh$Z|Lg72iG*|N zYvyw$c_yCb=w>S9xkuFf`EiY2iKk#YTegRc=x)(^&&6)~!rpi>Q^zFDc&p;1|Z9=Dwr`|H_)!G3|@( zCEi;!XLa7{>)KHG$Muc5t)r?ILt40xn|%MIYH~C7`<>gBgw7FlhqFf>)b$bNdhcO1 zy3;yaa#nL{PR62oR4X@^saDT@Nk#QF&9=2gI}a}@5v{m8ITLU5nHy3jKd(m9c`9z1 z4k{yik85gfSL~(^>b%3#x@q89wX_%1ZO+EqCweE;I_6_9<&SHfn&oBgc=o)tTg5wb zSSji5jH!D}`Q1Aa3-4Z4!rqru7B4@cYSbRmtUDXE)Mr(oGn1+ToqJ+`KBh$bgKC{OmANBlw6-KNzlG!Lnvb{^9zyfR|rRgrs@yMA1y zHoH-6?`!Ir<{#8IJL}blb>a!`*?6Ga+1Y|zE0e2>&EW+LazBYC)9+yPbq`DaoOE1>UQV1shM^U#IM8)sIT0w4C`%B-t^)T zzK=!RoE!v2GMd&`ErgkBoIcy$NnYV|OtL=EtNbk6+b?<~K&|J)j`K{`m zyPIM?irb0V$*tk-WVf#5XG{38pj$5b_{TqW(?$Hcc=_^=@Z_HPLKyt#-+$)7f9Ak{ z<^T`Be)8tC#@G|&g)=6BMFs~C^H=dZmA~QvStO_-MI9ESXkZK(_94dv9L}J?G(39f z<29I*#(XQ@fbYQ~{4f^dr%=J0umo>M6~Bt5_#GrC`E6k>Np%V-NP>%XkF)k>hb3#FLo75gf&F zIGn^Od6#2?|0@nL)fe}X?nifeHluE$4l13re2V+C%+C-6z!giqmStVA7uhFh=-x8gQ@ z8lS;xd={U>pX2lR0{#Mj2|Hyh%n)OkVa6~cjA7O=hFQxP=61#~cQA$-Wel^9G0b|# zFn2P(Gv=Ay!1P^A!`#gnW+P*m24k3e7{h##G0eS;VKy;_*~}Pb3uBo37{iP)hS|y( z=6=R7+Ze-aXAHB0G0aZJFb^<>$r!`zVhr;jW0)^7h8brJvzsx@9>y?x8N=*j4D%3U zm@hMid6+TGBaC4lWel^QG0XwRFgatG#~8yr&KTwi#xMsN!yIA^^CV-Krx?RbForqI z80JV>PbZm%Im#I37-N{@jA2eNhH;Ex{)#coNyac!jA2eOhB?g`<|~Y0zRDQp3}cw5 z8N)oo80Kq?VV-3S^Vf`F&N7C1jxkKZ80LA#FfTBM`5VSCf6Ex=9AlUl8N-}s3^UCb z<|W23UuO*S4aP8k#~9|nGKTqk#xVcD7{)V(X)=asF@|X~hM8du(_swLWehXR7^cS< zW{xq;%ZyWv#-M;r0t*N8O-#eW!F)5*uy8Qn!Za)# z%(pTP3kUOUOvA!`+bQ+&$+t5N2lE|-!@|M5foWJcnD1m777pgSn1+Re`EI6R;b6Xp zX;?Uz?`0Ym4(9uqhJ}Oqex_mJU=}eA3kUN9OvA##{2Ju52lFFL z!@|M*DATZTFh9mLEF8>YreWb=ew=AoIGCSc8Ws-bCz*zYgZU|@Vc}qYnrT=#n4e)9 z77pgWFbxX_Q(+nw4(4Z>hJ}N9Bh#>OFh9pMEF8?6n1+Rec{9_na4RnTCafc^lKPa4^5XG%Os<+nI)igZV|KVc}pdVj30><{eDK!ogITuEu)m z_}}~z)39(bzsxi&9L%pU4GRbJt4zbf!TcK2uy8QH&NM6>%x^Fa3kS25X;?Uz-((sV z4(7L*hJ}OqZKh%2V19>bSU8w>G7Sp{^SeyL!omC=)39(b3DdA}Fc&io3kP!v)39(b zmog0t2lFnbVc}rj%`_|=%zK!Ig@bu7)39(b%b13RgSm`pSU8yXF%1g`^ZQK0!oj?s zX;?Uz%bA9SgSmofSU8v~nTCafsWA-;2lEF^!@|K_#WXA&%+*Z8!ohriX;?Uz4>Ao4 z2lFAOVc}q|VHy?=W;xTaa4>(!G%OsbIVc}r@jA>Xn zm|K{Jg@akeG%OsktR)-?Qdneg@G!R%jsz(zGB|jcI|xUD6c!mA zJj^KJNRYxJgM)`zM>rCsu*l%xVb&9l1Su>sICz*l2}gny78x8o%m%`dAcaK+2M==> z;Yg6eB7=j6xtnk#NMVt|!NY7M90^ibWN`2>4Z@Kig+&Gj4|5OUNRYxJgM)|pBH>7o z!Xkr%hq;$TM0*k6c!mAJk0%sBS8v_3=SS<8{tTh!Xkr%huKaz5~Q%m;NW3) z5RL>XEHXHFn4N?pK?;iu4j$$K!jT|_MFs~ClM#*tDJ(KLc$i&;BS8v_3=SUVLBf$B zg+&Gj5A!9$ksyUd1_uu_PB;>zu*l%xVRjRa1Su>sICz*ngd;%;iwq7PW-sALkisH^ zgNNBiI1;3=$l%~%9wHnGQdneg@GxH{90^ibWN`2>4-<|ADJ(KLc$h~BM}ia<85}&! zql6sICz-Dgd;%;iwq7P<_O_PkisH^gNKM+rxQ z6c!mAJj^k|ksyUd1_uvwoNy#aVUfYX!<---2~t>OaPTmWa3n}!k-@>k{1xFykisH^ zgNHduI1;3=$l%~%rU*xZ6c!mAJj^M=ksyUd1_uvwns6jYVUfYX!+eEsBuHVA!NJ3P zm2f0TVUfYX!<-=;2~t>OaPTls6OIHaEHXHFm}dw_f)o}R96Zd|2uFex78x8o%(H|e zK?;iu4j$&O2}gny78x8o%vr*bAcaK+2M_Zc;Yg6eB7=j6DF{b`6c!mAJk0ZiBS8v_ z3=SUV1;UXag+&Gj5A!#KBS8v_3=SUVZwW_&6c!mAJj^-5ksyUd1_uxGBH>7o!Xkr% zhdEC;5~Q%m;NW4V2}gny78x8o%u9qLK?;iu4j$&~gd;%;iwq7P<{N|~K?;iu4j$(3 z2uFex78x8o%zq^u2~t>OaPTmHPdE~!u*l%xVg7+|BuHVA!NG$@W-3UK!Xkr%hiMXy z1Su>sICz*A;Yg6eB7=j6X%mhFDJ(KLc$gW&ksyUd1_uw*Ash)(SY&YUFkQltAcaK+ z2M;q#I1;3=$l%~%dW0iE3X2R59%hbkBuHVA!NJ44OgIvxu*l%xVJ;Ak1Su>sICz+O z!jT|_MFs~C^9tcekisH^gNOM?!jT|_MFs~C^D5y;kisH^gNNx8jsz(zGB|jc|3)|x zq_D`~;9>rWa3n}!k-@>k{CC2UAcaK+2M_blgd;%;iwq7P=6?{51Su>sICz+UAsh)( zSY&YUF#nTqBuHVA!NJ44MmQ3ru*l%xVg484NRYxJgM)|pSHh7Xg+&Gj5A(kXM}ia< z85}&!zY&fEDJ(KLc$oh~I1;3=$l%~%{+)0nNMVt|!NdGt!jT|_MFs~CHn3(9DySks z4JqodXdpulhXNjbm~SFJDySks4JqodXdpulhXNjbm~SRNDySks4JqodXdpulhXNjb zm~SCIDySks4JqodXdpulhXNjbm~SOMDySks4JqodXdpulhXNjbm~SIKDySks4Jqod zXdpulhXNjbm~SUODySks4JqodXdpulhXNjbnC~DyDySks4JqodXdpulhXNjbm^Tm~ z6;zR+h7@&JG>{>OLjjLI%y$wW6;zR+h7@&JG>{>OLjjLI%y$tV6;zR+h7@&JG>{>O zLjjLI%y$zX6;zR+h7@&JG>{>OLjjLI%=Zu<6;zR+h7@&JG>{>OLjjLI%=Z!>6;zR+ zh7@&JG>{>OLjjLI%=Zx=6;zR+h7@&JG>{>OLjjLI%=Z%?6;zR+h7@&JG>{>OLjjLI z%p&5Wf+`Z!kfIKY1~TMuDB#hD`2pgif+`Z!kfIKY1~TMuDB#hD`9b2Nf+`Z!kfIKY z1~TMuDB#hD`61$?f+`Z!kfIKY1~TMuDB#hD`C;Otf+`Z!kfIKY1~TMuDB#hD`4Qry zf+`Z!kfIKY1~TMuDB#hD`BCDdf+`Z!kfIKY1~TMuDB#hD`7z?7f+`Z!kfIKY1~TMu zDB#hDSxkIXP(^|oQq*D5K!zL+1w8sNKTdp9P(^|oQq*D5K!zL+1w8sNKS6v{P(^|o zQq*D5K!zL+1w8sNKS_L4P(^|oQq*D5K!zL+1w8sNKSg|0P(^|oQq*D5K!zL+1w8sN zKTUj8P(^|oQq*D5K!zL+1w8sNKSO*}P(^|oQq*D5K!zL+1w8sN|AqLdpo#=Fq^QH9 zfebku3V8Iv!L?b03aUs@Ly9^q8px2tp@2sp=4XkI3aUs@Ly9^q8px2tp@2sp=8eQh z1yv-dAw?Y)4P?mSP{5-P^K-;U1yv-dAw?Y)4P?mSP{5-P^Cse>f+`Z!kfIKY1~TMu zDB#hDc{A}*K@|yVNKuDH0~vBS6!7T7{5bq)M3#;h8zwBJo+$I;-i8p64a2Q4vPjdd{j_Hf*MlPVbMT_91aCM`Y@Lg9~D%QpoSE6STv9!heH96KFqs_ zj|!?tP(zA3EE>p=!=ZpjALiY}M+H?Rs3Aoi77b*`;ZVS%5Az=4qk<|D)R3YMiv}{} za46u>hj}mYQ9%_6YDiIsMFSafI27>c!z?2{DySks4JqodXdpulhXNjbn9GQd3aUs@ zLy9^q8px2tp@2sp=6%FR1yv-dAw?Y)4P?mSP{5-P^ZUd{1yv-dAw?Y)4P?mSP{5-P z^M2x^f+`Z!kfIKY1~TMuDB#hDxt#c@po#=Fq^QH9febku3V8Hkt{^@ts3JiPDeACj zAVUs^0v>&sD~XQ^sz^{niaIPB$dJRKfJYyuMtoFIMS>bq)M3#;h8zwBJo+$yKzvkC zMS>bq)M3#;h8zwBJo+$K5g!#)k)VbYbyzfzA%{Z&k3P)R#76~HB&Z=p9Tp8_$l*}H zqYv`|;-i8p64a2Q4vPjdhxrKcQ9%_6YDiIsMFSafI27>c!~6;HQ9%_6 zYDiIsMFSafI27>c!~7}nQ9%_6YDiIsMFSafI27>cgNy2B5h|!6K@BPDuxKDd4u=9B zeVA*Bj|!?tP(zA3EE>p=!=ZpjALcsZqk<|D)R3YMiv}{}a46u>hq<2ksGy1jHKeG+ zqJa!K913{!VLnQHR8U2N8dB6@(LjbA4h1~=FgFk%6;zR+h7@&JG>{>OLjjLI%*Tk2 z3aUs@Ly9^q8px2tp@2sp=HtXi1yv-dAw?Y)4P?mSP{5-Pvx4}jpo#=Fq^QH9febku z3iuVLr@YNKiwHIxHHs#76~H zB&Z=p9Tp8_$l*}HqYrZv@lin)32I1DheZP!ayS(5=)-)9_^6bq)M3#;h8zwBJo+%7BR(pqB0&u)>ab`aLk@=m9(|ZU zCq62uB0&u)>ab`aLk@=m9(|b46CV{+k)VbYbyzfzA%{Z&k3P&7h>r@YNKiwHIxHH< zki(&XM<3=dh>r@YNKiwHIxHHK*B(ZCoo>_d(TIGjO&X?XO|$7?V{EFX)o7!@o*6-$v| z8EROL6f022YFLb-fiYy*ha3}dID-Py@aUnB*I&Ai36`OTj_;hJDB}0f#dvFb$6$ z`gjdyEz8FuEJg)OP{mRtScV#wBgG2Tu^JYmXkZK(_94dv9L}J?G(39f<29JuSw0qF zF)CPsDwZO_GSsjfDORA4)vy>v17par4>=~_a0UgY;n71Mufg2G^05euQNa>au@niG zp@!v1u>y6hhQ%lv7(<4A$T0zjGbk_(j~@DX4Q7<(V-Xgkf+eV8DH1G04a<>Y1?pH0 zi%~Q%h79|VV*(CmP+%G!J@oM!%sQ5jMOcgqmY|BINU#hwEJunJsADxOM$y0+GVDW+ z2{@cVfoXX3(8p^q>sdY)VKFLLf-06G!7|ja94S_yj@7UjMFV5Vun##V;BW>7rs2^; ze|@?Cd?(AtA}mG)OHjp9Bv^(TmLtUq)Ug^CqiA3Z8TKK^1RTzwz%)F1=;Jk*4J;pv zuox9AK^04pU>Ryyjub0U$7)!NqJc4F*oPbwa5#ek)9~n_kJn)CV)NWx4U8efKIE8y!xby#Zn|#h8mV5#R}B1 z8Wy8yU z920Ojg96j==%J6-U>YnRi?A3KEI}1Zkzg5WSdJ7cP{(RmjG}=tWY~us6L2_#0@Lv5 zp^w*K?qT^@gvF>}3949%1j|sva->*+I#$DC6b+0a!#?DgfWsLSn1)9WeY^(qMV606 zSd0pmpo*nPunaXUM~W4wV>SLij{YD#g7|^|`L_HJgl;w3LFm%ZLDbM-5Ufhu?epZY zhaC2h!ya|vkX*`4nuq-5k2lvLELY1pu(Wk*NP;BVAtNWh4Yh?p@Grp%bLV9APv zl#HB$l8Tx&4I8$!?C9tjJm@ndU_?kn%$NyNX3SZzWJN+sMovLVMa`Oq4O?1vbo31V zCVhqkj0lN{88czZj5!OItVl@7$SEkPs9DpnVN1)7j-J8)LZ2Z4BSIo##!Q$pW6pvl zD-u#NatcZ+YSuJt*wV72qi66p>oX)^L`X!;m z^bB%+h6IcViHI39Vakj-3zn=%NXf`4D5#2#JUp zGhxb%ISZDoNJz=ZDJZF^S<|p#OUsUq9yiks_81Z{A|xVa%!Da3<}6sUA|WLsr=X;w zW=+F}EiF4bdIo>HK0^XVgha%QnJ{I>oCQl(B&1~I6qHodtZCS=rDaD)&*1OSXGp+^ zkcgNu6Q<0VvtY@Jgp`b&f|81wH4PiKwCw2U8T_643<($!5)m_I!ju_v7A#qjkdl#8 zP*PE|reVXDmK_~EgC~841dIrYh#50s%8WS+maIrf$;c@vsi;}guwhHfj*gze-=)ux zfDs`PF=Hl7nK5U(bwF%zcDn6qHXiiDJmoPv^ynl%j@wzTZ%=o$P&`V0ve5fTwIX2O&i za~3RFk&u#+Q&3V-v!-FgmX;kIJ%dW0Aps*oB4Wl&m@;F|f+Z^wQZjN1N-ApBG;G+? zvZJGC@DJ-VBw$2HM9i28Q)bLruw+F-N=8mWNkz??h7DU(bwF%zcDn6qHXiiDJmoPv^ynl%j@wzTZ%=o$QD`V0ve5fTwI zX2O&ia~3RFk&u#+Q&3V-v!-FgmX;kIJ%fK-pCJJwLLy?uOqeob&VnT?5>hg93Q8(! z)--I`(z2tYXYfzxGbCU{NJPw-2~%dwS+Hb9LP|zXK}ki;nuZNqT6T2w3~GIb1dIrY zh#50s%8WS+maIrf$;c@vsi;}guwhHfj*gze|3;r70V6^pV#Z9EGGoqyB`XqAGI9z^ zDr(j=Y}nGWqoZf=ztv|*z=)8Dm@yNk%$T!a$%=%OjGTg!ikdYI8@9CU=;#^z@AMfG zFd`%(X3T^sGv+Khg93Q8(!)--I`(z2tYXYfzy zGbCU{NJPw-2~%dwS+Hb9LP|zXK}ki;nuZNqT6T2w4E||-h6IcViHI39Vakj-3zn=% zNXf`4D5Wq-5k2lvLELY1pu(Wk*NP;GflJNWh4Yh?p@Grp%bL zV9APvl#HB$l8Tx&4I8$!?C9tj{O|P{5-=hpB4*5lDKq9QSh6A^B_pSxq@rd`!-g#_ zJ34v>Z~6=g7!eW?GiJh+8FLmaS&@*EkyB7oQM0CD!L`X!;m^bG!w`V0ve5fTwIX2O&ia~3RFk&u#+Q&3V-v!-FgmX;kIJ)WGw9zy~~gha%Q znJ{I>oCQl(B&1~I6qHodtZCS=rDaD)&*1;0&yau-ArUcSCQO+zXTg#c2`L#l1tk?V zYZ^9eY1z@yGx$I2GbCU{NJPw-2~%dwS+Hb9LP|zXK}ki;nuZNqT6T2w4E`_r3<($! z5)m_I!ju_v7A#qjkdl#8P*PE|reVXDmK_~EgLi#~1dIrYh#50s%8WS+maIrf$;c@v zsi;}guwhHfj*gze|5cwM0V6^pV#Z9EGGoqyB`XqAGI9z^Dr(j=Y}nGWqoZf=f7543 zz=)8Dm@yNk%$T!a$%=%OjGTg!ikdYI8@9CU=;#^zbNUPk7!eW?GiJh+8FLmaS&@*E zkyB7oQM0CD!L`X!;m^bGz*eTD>#2#JUpGhxb%ISZDoNJz=ZDJZF^S<|p#OUsUq zp23GcLjp#GM8u4lFlEM^1xr>Wq-5k2lvLELY1pu(Wk*NP;9t^bNWh4Yh?p@Grp%bL zV9APvl#HB$l8Tx&4I8$!?C9tj{LA_b2^bL)5i@4Olo@jtELo9|l95wTQc<&}VZ)Y| z9UVP`e?^}m0V6^pV#Z9EGGoqyB`XqAGI9z^Dr(j=Y}nGWqoc>0qQM?R0!D;H#Eh9R zWyYKZOI9SLWaJc-RMf0#*s!H#M@P@#U)5(wz=)8Dm@yNk%$T!a$%=%OjGTg!ikdYI z8@9CU=;#^zYx)cc7!eW?GiJh+8FLmaS&@*EkyB7oQM0CD!pCJJwLLy?uOqeob&VnT?5>hg93Q8(! z)--I`(z2tYXYl{fXGp+^kcgNu6Q<0VvtY@Jgp`b&f|81wH4PiKwCw2U8T{+|3<($! z5)m_I!ju_v7A#qjkdl#8P*PE|reVXDmK_~EgMUMxAps*oB4Wl&m@;F|f+Z^wQZjN1 zN-ApBG;G+?vZJHN2cp3qLjp#GM8u4lFlEM^1xr>Wq-5k2lvLELY1pu(Wk*NP;NR3| zNWh4Yh?p@Grp%bLV9APvl#HB$l8Tx&4I8$!?C9tj{9F1A2^bL)5i@4Olo@jtELo9| zl95wTQc<&}VZ)Y|9UVP`|EE4f0!D;H#Eh9RWyYKZOI9SLWaJc-RMf0#*s!H#M@P@# zOP?VDBSIo##!Q$pW6pvlD-u#NatcZ+YSuJt*wV72qi68{(q~A(h>(bwF%zcDn6qHX ziiDJmoPv^ynl%j@wzTZ%=o$RK^%)W{A|xVa%!Da3<}6sUA|WLsr=X;wW=+F}EiF4b zdItZtK0^XVgha%QnJ{I>oCQl(B&1~I6qHodtZCS=rDaD)kBcFLJ%$8~2#JUpGhxb% zISZDoNJz=ZDJZF^S<|p#OUsUqp25GP&yau-ArUcSCQO+zXTg#c2`L#l1tk?VYZ^9e zY1z@yGx&G)84@reBqCj0lN{88czZj5!OItVl@7$SEkPs9DpnVN1)7j-J7PpwEzi z5g`#VVoCQl(B&1~I6qHodtZCS=rDaD)&)`R& zAps*oB4Wl&m@;F|f+Z^wQZjN1N-ApBG;G+?vZJGC@E_|lBw$2HM9i28Q)bLruw+F- zN=8mWNkz??h7DUhg93Q8(!)--I` z(z2tYXYl{mXGp+^kcgNu6Q<0VvtY@Jgp`b&f|81wH4PiKwCw2U8T=Re3<($!5)m_I z!ju_v7A#qjkdl#8P*PE|reVXDmK_~Ega1;WAps*oB4Wl&m@;F|f+Z^wQZjN1N-ApB zG;G+?vZJHN7odYbu*W__4hT48#1SFKM4S+F%9t}IoHONu8JEntV!<^_Zdh?k!W}91 zWIT}bNWl{&&s4ln^U9hx8s6FP!In>2zS!|i#}7Tf4E`JTv&TL|4hT48#1SFKM4S+F z%9t}IoHONu8JEntV!<^_Zdh?k!W}91WIT}bNWl{&&s4ln^U9hx8s6FP!In>2zS!|i z#}7Tf4E`wl*<+s}2Lv24;)sxAB2I`oWy~29&Y5z-j7#QRvEZ5|H>|iN;f|DhG9Jiz zq~M8?XDVK(d1cKT4exCDV9O^hU+nm%~0|E{iaYV>55hui)GUkj4 z=S;a^#wByESa8jf8&=$sa7W5L84u(@k7rq zgFo2M9{UVAAmES@M}!;`aYD=~W6qdx&XfygTr%g11=lROVZ|*8cck2t@j%Wa1y7Vb zQ}II0D{J0pcxS^0TRv&|V#hZfKlJ=E_+#v6k9~$55OBzdBSMaeI3ebgF=tFTXUYXL zE}3)1f@_xCu;P}4J5uh+cp&GIf+tFzsd%C0l{IfPytCnhEuXY}vE!SLA9{Wn{IT}4 z$38<22smWK5h2G!oDg%$m@_7vGv$IAm(00h!8J>6SaD0j9Vz!@JdpE9!4oCVRJ>60 z%9=MC-r4ZMmQPx~*zrxr4?VvO{y6*DW1k@h1ROHrh>&9c9>{s5;E9rFDqg60Wz8E6?`-&B%O@>g?D(eRhn`>j_-XJ5_Sk30 z0Re}MI3nbjh!bK?8FR*jbEaG{Q!5?owd+amhfPh0r91(I%#0fE{j5%Y%Ia4l}amk!37F@IB zh84FY+>vrm#sfKz6g*M#OvMW|udI2a;hha1Z26?+iyhx|{Lu5u;7_ohJ@y%LK)@j* zjtDs>;)Iw}#+)(XoGBN~xMa>13$9sm!-`uH?nt>Oy;)R-5*1Xa1&V~=ReA4p8 zj&C}C==o*vC)v*)`wTfC;E)kVgd7ubLd+>+&X{n{lnZ8DGUti~*DSeV#VrYUq}-G7 zK+YotPn0}U@j}fjYu;#hXTt|uK56-4$2T26^!zgTlkI1ZeTEzmaL9-wLXL?zA?B1Z zXG}O}$^|nnnRCU0YnI%w;+BLvQtrukAm@>SCrX~Fc%kN%HE%S$v*Cj+pR|0jS zCrX~Fc%kN%HE%S$v*Cj+pR|0j&9c9>{s5;E9rFDqg60Wz8E6?`-&B%O@>g?D(eRhn`;s z|1JC3W1k@h1ROHrh>&9c9>{s5;E9rF zDqg60Wz8E6?`-&B%O@>g?D(eRhn`;se~SI=vCohL0uC8*M947_C&ZjG=8OsFOu1mj zC3CJ=aLtk%R@{&rd$~_qmgdomu#d8FWpl4mMjsCi}08x8Ml_+ZN?Enn>TrsId6Uj~1w z{p_*NkOKk^8F56&F%c)koHFK&3Fl0?V8$hLu2^u*k{edsl5j`LJsA(=JW}vP$uku% z)V#9hjfQtNe6Zz{mM?aE)A2*kFN6P%{p_*NkOKk^8F56&F%c)koHFK&3Fl0?V8$hL zu2^u*k{edsl5j`LJsA(=JW}vP$uku%)V#9hjfQtNe6Zz{mM?aE)A2*kFN6QC{p_*N zkOKk^8F56&F%c)koHFK&3Fl0?V8$hLu2^u*k{edsl5j`LJsA(=JW}vP$uku%)V#9h zjfQtNe6Zz{mM?aE)A2*kFK&7o{DD388FE0tAtR0mIVR$Sm{Z1_G2xsk7tFY1&J_!; zS#raQTN3U_xhLa+oJR_tD0!yhg_>8^ywUK^h7Y!U((=WQZ#sVH`DO5@+0P#P3^^d+ zkP%0O920Ru%qe5em~hUN3uat0=ZXc_EV*IDEeUs|+>`M@&Lag+lsr@MLd`2{-e`Dd z!v|YFY58KuHyuCp{4)5{?PrgDh8z%Z$cQ6Cj)^!S=9DpKOgLxC1v4(0bH##dmfW!7 zmV`S}?#Xx{=aGUZN}j2Bq2`q}Z#2BK;e#!ow0yDSn~ooPei{53_Or)6Lk=+_2)7gga91 z$#@{=k%A{mo~d}D=9M*XG`zFngDs!5e6i!3jvsn{8T^^{v&TL|4hT48#1SFKM4S+F z%9t}IoHONu8JEntV!<^_Zdh?k!W}91WIT}bNWl{&&s4ln^U9hx8s6FP!In>2zS!|i z#}7Tf4E`+p*<+s}2Lv24;)sxAB2I`oWy~29&Y5z-j7#QRvEZ5|H>|iN;f|DhG9Jiz zq~M8?XDVK(d1cKT4exCDV9O^hU+nm%|iN;f|DhG9Jizq~M8?XDVK(d1cKT4exCDV9O^hU+nm%r;Isc!Z}kem~qLRD;8X{&azMZ#BaR3;CgOycQ^uSz;hZTK z%(!IE6$`Fea>I&S67ER3C*y&fM+%-Od8Xopnpf7m(eTcO54L>L^2LsCI)3Q+W$@?N z&mQ{>IUwMW5l4g^6LCV!DPzuCJX7&P z%`0o(Xn1GC2U|XA`C`X69Y6H^GWhS?&mQ{>IUwMW5l4g^6LCV!DPzuCJX7&P%`0o(Xn1GC2U|XA`C`X69Y6H^GB~oIJ@y%L zK)@j*jtDs>;)Iw}#+)(XoGBN~xMa>13$9sm!-`uH?nt>O~0|E{iaYV>5 z5hui)GUkj4=S;a^#wByESa8jf8&=$sa7W5L84u(@k7rqgFoMX_Sk300Re}MI3nbjh!bK?8FR*jbEaG{Q!CzoMd+amhfPh0r91(I%#0fE{ zj5%Y%Ia4l}amk!37F@IBh84FY+>vrm#sfKz6g*M#OvMW|udI2a;hha1Z26?+iyhx| z{Lu5u;4ieFJ@y%LK)@j*jtDs>;)Iw}#+)(XoGBN~xMa>13$9sm!-`uH?nt>O@(zmfI~(c5pqn#2{EUPIb*^( zQ!bcs$($<|T(jhc6}KeZk#bMQ138ZrJW=vY#S1mBta+p1oedvs`K0BG9p7~P(DTdS zFS4IK_8D?Oz#$`!2stL=gqTytoH5~?DHqJRWX=@}u32)!idz!yNVzBDft*JQo+x>y z;)R-5*1Xa1&V~=ReA4p8j&C}C==o*vKeV4c_8D?Oz#$`!2stL=gqTytoH5~?DHqJR zWX=@}u32)!idz!yNVzBDft*JQo+x>y;)R-5*1Xa1&V~=ReA4p8j&C}C==o*v7u(Ms z`wTfC;E)kVgd7ubLd+>+&X{n{lnZ8DGUti~*DSeV#VrYUq}-G7K+YotPn0}U@j}fj zYu;#hXTt|uK56-4$2T26^!(zWv%w$OW1k@h1ROHrh>&9c9>{s5;E9rFDqg60Wz8E6?`-&B%O@>g?D(eRhn`;se~JCord%-Nk~vo_xMs-> zD{e`+Bjuiq2XY=Mc%tN)iWh2LS@TB2I~zXO@=41VJHF}oq34&u|Hyv!*k{NA0f&q@ zBIKBe6JkyobH;>ord%-Nk~vo_xMs->D{e`+Bjuiq2XY=Mc%tN)iWh2LS@TB2I~zXO z@=41VJHF}oq34&uiT&)c&yWKG4jFMo$T1No#GEqbj0xvVxnRa6bFNr$&5|2d+>&rd z$~_qmZa;hMGvt7PLq;4Ca!kYtF{g|8^ywUK^h7Y!U((=WQZ#sVH z`NbipgFmpxK0^)&IAp{TA;(0V5Od0yGbWrf<$@WP%(-I0HA`+-aZAD-DfeVNkn>2v z6D7}7yioJXnl~EW+3>-ZPg=g%@lD4MJ--b8O8ePkpCJbX95UjFkYgfFh&g4<857Q# za>0yC=3KGhnk6@^xFz9^lzTEB$a$pTiIQh3UZ{Cx%^MBxZ1`ZyCoNy>_@?8Bo?ixk zmHq6o&yWKG4jFMo$T1No#GEqbj0xvVxnRa6bFNr$&5|2d+>&rd$~_qmR|j5s3Xn1~Z%P8oB?gmb1`FyoRrS1h<@ z$qg%RNw_2Bo{R@_9w~UDWYF=6MM#DQBKG^a}%NIMo>G+}Nm%(3aKYQ#mr;Isc!Z}kem~qLRD;8X{&azMZ#BaR3;CgOycQ^uSz;hZTK%(!IE6$`Fea>I&S z67ER3C*y&fM+%-Od8Xopnpf7m(eTcO54L>L^2LsCI)3Q+#R2DoKd{F>Lkg zdomu#d8FWpl4mMjsCi}08x8Ml_+ZN?Enn>TrsId6Uk3kU``KfkAqNB;GUAAkVgdomu#d8FWpl4mMjsCi}08x8Ml_+ZN?Enn>T zrsId6Uj~1J{p_*NkOKk^8F56&F%c)koHFK&3Fl0?V8$hLu2^u*k{edsl5j`LJsA(= zJW}vP$uku%)V#9hjfQtNe6Zz{mM?aE)A2*kFM~7t*<+s}2Lv24;)sxAB2I`oWy~29 z&Y5z-j7#QRvEZ5|H>|iN;f|DhG9Jizq~M8?XDVK(d1cKT4exCDV9O^hU+nm%|iN;f|DhG9Jizq~M8? zXDVK(d1cKT4exCDV9O^hU+nm%|iN;f|DhG9Jizq~M8?XDVK(d1cKT4exCDV9O^hU+nm%|iN;f|DhG9Jizq~M8?XDVK( zd1cKT4exCDV9O^hU+nm%n~-QjB!037G%Fhq(J2~wm;kRnBb6e&`qNRc2xg3GzgWnRu@ zE^|3A=VdPEWnRwZyv$`Tb2%^PGMBl`WiCjOB1MW6DH5beks?Kk6bVwKNRc8%iWDhw z&x;3sZ$EthfFBW}#E6q1Ns2TXvgF89ph$@_6{^&z)1XOc87z#u_}7$(FB zql6h_oCzkGB0`iHaS|j+ktRcy9C->9DN&|El^S&#G-=VMLzii0m}QQ67U;3a63eWx z${Kyv*ric(FMw|pmQl!a{B}bkDMM{*Z zP^Ctl22EPD>Ck1G8D^Pdo&|a=vcxhgtg=R*bvD>!i*0t;Wskr=(9Zya1Q}wO5F?Bd zW{hzrm}H6wQDVeNkR(N#3|VsIDNv+DnF>{E)M?P9MVk&?rkP=uIp$fQ$0AECv%)HC z^jT+vO}5x(hh6pvT-471g9I63m=Gh35@w8XCYWT32vK6hNsuH(nhaTTd7d8DNkgLktsQgi*qb zG0p^&Oc5bUj5rCBq)3w?OO8ARij*i*p-PQ94VtuQ)1k{WGt4r_JPY(#WQk=~SY?eq z>uj*e7TfHw%N`z^1@b+H~kL%?z{5G0y@$7FlAM6;@fJ&pI1yvc)z#?6ODTKhw_ug9I63m=Gh35@w8X zCYWT32vK6hNsuH(nhaTT9DN&|El^S&# zG-=VMLzii0m}QQ67U;3a63eWx${Kyv*b+H~kL%?z{5G0y@$7FlAM6;@fJ&pI1y zvc)z#?6ODTvVH~_B*+lMgcxCzFk_4}!6Z{eh!P`Cf+Q)@WXO^uPk|yO%2cRQqfUb+ zE!uSGGR+LL%rVabJr-GFnH5%9qt7}UY_i2RJM6MY;2-K|fI)%`F-(XNMhP>{E)M?P9 zMVk&?rkP=uIp$fQ$0AECv%)HC^jT+vO}5x(hh6pv{3HDgFi4Ogh6ypkC}GAJXM#zl zh!7=4oCHZyq{)yaN1g&jN|dQkrAD0wOric(FMw|pmQl!a{B}bkDMM{*ZP^Ctl22EPD z>Ck1G8D^Pdo&|a=vcxhgtg=R*bvD>!i*0t;Wsksrp`QT;2{ObmAx0P_%oyWLFv%1V zqQr=kAW4ce8M5TaQ=mwRG8L-SsMDZHi#8p)Of$nQbIh|qk42VPW`$MO=(ElSn{2Vo z4!i6TNb6^SL4pi1Oo$Oi2{Xnx6HGEigeWoMBuJ7XO@=Hv@)RgiqD+Mric(F zMw|pmQl!a{B}bkDMM{*ZP^Ctl22EPD>Ck1G8D^Pdo&|a=vcxhgtg=R*bvD>!i*0t; zWsksB{R}WjkRgT%F~TTe#u#UUNv4PpB}SYCNm8WAkR?Z+0!2!csZgaxod!)>wCT`g zni*!9W1awwCT`gni*!9W1a9DN&|El^S&#G-=VMLzii0m}QQ67U;3a63eWx${Kyv*FqrbCx$W|(D; zc^2rg$P&w}u*w>J*4bc_Ewet13@}KLA%+Pt!YEric(FMw|pmQl!a{ zB}bkDMM{*ZP^Ctl22EPD>Ck1G8D^Pdo&|a=vcxhgtg=R*bvD>!i*0t;WskrO{R}Wj zkRgT%F~TTe#u#UUNv4PpB}SYCNm8WAkR?Z+0!2!csZgaxod!)>wCT`gni*!9W1a9DN&|El^S&#G-=VMLzii0m}QQ67U;3a z63eWx${Kyv*b+H~kL%?z{5G0y@$7FlAM6;@fJ&pI1yvc)z#?6ODTrhWz(B*+lM zgcxCzFk_4}!6Z{eh!P`Cf+Q)@WXO^uPk|yO%2cRQqfUb+E!uSGGR+LL%rVabJr-GF znH5%9qt7}UY_i2RJM6MY;J?$)0D}Y>Vweykj1p#yaVD5#iU?6+#7U4OMVbs*a^xvc zq(qqtRch2}(4FqrbCx$W|(D;c^2rg$P&w} zu*w>J*4bc_EwsZpmvlNN0{beU#`S>~8$fgX!2vCImq ztkGwk4K~?gn;mx9Bk(WuGr%B0h8QNq2&04FqrbCx$W|(D;c^2rg$P&w}u*w>J*4bc_EwwCT`gni*!9W1amvp|nUmRM$mRo3XU z&IX%ovCR&<>=F2v`Wax5AVUljVuVq`j4{pxlS~mIN{l!OlB7tJAxn-t1&Wj?Q=v+Y zIt`k%Xw#v~G&9UH$2<%4SY(N1R#;_?KI?3-$rjt}u*)8Sl70plB*+lMgcxCzFk_4} z!6Z{eh!P`Cf+Q)@WXO^uPk|yO%2cRQqfUb+E!uSGGR+LL%rVabJr-GFnH5%9qt7}U zY_i2RJM6MY;9u!yfI)%`F-(XNMhP>dMvWUGApdIMxS*y*kp@scGzW)Kv_Qn3=(9BVM2^BN|-UmnP8GB zB1DN1Cqa@FX)sZpmvlNN0{beU#`S>~8$fgX!2vCImqtkGwk4K~?g zn;mx9Bk-^FGr%B0h8QNq2&04Fq zrbCx$W|(D;c^2rg$P&w}u*w>J*4bc_EwsZpmvlNN0{ zbeU#`S>~8$fgX!2vCImqtkGwk4K~?gn;mx9BT#uC_>b7feg-(eAO{I@h#?L$%n?Ey zWrSmla-1+H7~>@4oMM90Omc=P&Jy7qQO*nNU7j<|3ubu9EU%d3HS@e-fw%N{ z$0F}p;seWkWQ9+x@|iWh(B~`bd}D*}Z1RIGezMIkcKFRMf7s(Mf&bC|>|;Lz9AJ=x z1UbYIhZ*JwA&xS_F-AE~m=la~l5tKk!D%Kr!xU$UaE>VFiE)8A7fEo5B$r8Xg)~>m zaE&b2$#H``Hz{z7BDX1Vhcb7maE~hYsquh14{7j-CXZ?Hgf>s<@Qg0cndSvEykwSF z%<-Ce-mt)1dc0$i_bl;&Wj?aPCsz5)8ei!1m36+c!FM+K!4^N+<`+BsW|u$g@t43o z`?HVz3~+!!4ie-LLmXz9BZN502*()ZIAKmO#!1FG#RR9BO7>uBbq#>#S_{*rNcA2 zJZG90%glBd}M`Btn!&PzR>3@>wIH_ z?`-meEq=1iFLwCNE`QkLFM+E4*~fkcIKUtW337-b4l~RVLL6m;V~lc~Feez}B;%Z7 zg40ZLhAGYx;T%!U6XODLE|TC9NiLJ(3TdvA;Tl=4lj8RC<0RvpVuI65a)v3+65$+C&J*JTaW0bJ5=kzT z;tFZ5lHnRzu9M>id2UkR7DaAT;tpl*QsEv|?o;Cdbso~-5ltS`;t6e@(%~6ho-@r0 zW_ZagubAUC^Soh!xAb_&BJWw^1Iv75g-@*VnKi!9=PT=cV}tK(@`EjYvdu4c_{}bV z*yAsOn*G_weg-(eAO{I@h#?L$%n?EyWrSmla-1+H7~>@4oMM90Omc=P&Jy7qQO*nNU7j<|3ubu9EU%d3HS@e-fw%N{$0F}p;seWkWQ9+x@|iWh(B~`bd}D*}Z1RIG zezMIkcKFRMf7s(Mf&bP1>|;Lz9AJ=x1UbYIhZ*JwA&xS_F-AE~m=la~l5tKk!D%Kr z!xU$UaE>VFiE)8A7fEo5B$r8Xg)~>maE&b2$#H``Hz{z7BDX1Vhcb7maE~hYsquh1 z4{7j-CXZ?Hgf>s<@Qg0cndSvEykwSF%<-Ce-mt)1dc0$i_bl;&Wj?aPCsz5)8ei!1 zm36+c!FM+K!4^N+<`+BsW|u$g@t42@`?HVz3~+!!4ie-LLmXz9BZN502*()ZIAKmO z#!1FG#RR9BO7>uBbq#>#S_{*rNcA2JZG90%glBd}M`Btn!&PzR>3@>wIH_?`-meEq=1iFLwCNE`QkLFM+!K*~fkcIKUtW z337-b4l~RVLL6m;V~lc~Feez}B;%Z7g40ZLhAGYx;T%!U6XODLE|TC9NiLJ(3TdvA z;Tl=4lj8i%PZ!1%{*^d;4MAgvB-Ot_`otBS>Y3_d}fU=^!ds<-`LglBd}M`Btn!&PzR>3@>wIH_ z?`-meEq=1iFLwCNE`QkLFM)q&fA+DT0S++8L4q7&h{FtXgb+s=;TWSFC(H@PILSDt znBX*%oMDQyL^wy3^TfD7oQov5M3T#-xI&t%WVl9_>*TmWo|_c7MUmT-xI>w{RJcc# z`_yi%PZ!1%{*^d;4MAgvB-Ot z_`otBS>Y3_d}fU=^!ds<-`L$dBXy4>G6(5-m}C9mifpEpIGHHYkZ;4SJwH)2H)A_2V4AP zn_ukkn_d2}$6o^fyZzb6eg-(eAO{I@h#?L$%n?EyWrSmla-1+H7~>@4oMM90Omc=P z&Jy7qQO*nNU7j<|3ubu9EU%d3HS@e-fw%N{$0F}p;seWkWQ9+x@|iWh(B~`b zd}D*}Z1RIGezMIkcKFRMf7s(Mfu{Z0$9@Jlz#s<+a)==gGt3b}9A$)KjB=bXCm7=- zglBd}M`B ztn!&PzR>3@>wIH_?`-meEq=1iFLwCNE`QkLFMF|s$&za^0GrVM$SIqI6dET(VTY9`> zk@qa|fn`3j!Y5Yw%o<#&J!6lMhCdC!fTqVOb zvRo&}4f5Qiz%7d0ro1L{1a!6TYHro|K5Jf*`kx;$r^7tHXISza;6 zYvy^w0&nT@jz!+H#0Qr7$O@lWRC<0RvpVuI65a)v3+65$+C z&J*JTaW0bJ5=kzT;tFZ5lHnRzu9M>id2UkR7DaAT;tpl*QsEv|?o;Cdbso~-5ltS` z;t6e@(%~6ho-@r0W_ZagubAUC^Soh!xAb_&BJWw^1Iv75g-@*VnKi!9=PT=cV}tK( z@`EjYvdu4c_{}bV*yAsO|I7aDV?P5NV330ZIm8f$8RiHfjxxeAMmbKH6O3_^aZWM8 zX(ldBXy4>G6(5-m}C9 zmifpEpIGHHYkZ;4SJwH)2H)A_2V4APn_ukkn_d2}$6o^fxBc12eg-(eAO{I@h#?L$ z%n?EyWrSmla-1+H7~>@4oMM90Omc=P&Jy7qQO*nNU7j<|3ubu9EU%d3HS@e- zfw%N{$0F}p;seWkWQ9+x@|iWh(B~`bd}D*}Z1RIGezMIkcKFRMf7s(MfwukG$9@Jl zz#s<+a)==gGt3b}9A$)KjB=bXCm7=-glBd}M`Btn!&PzR>3@>wIH_?`-meEq=1iFLwCNE`QkL zFMF|s$&za^0GrVM$SIqI6dET(VTY9`>k@qa|fn`3j!Y5Yw%o<#&J!6lMhCdC!fTqVObvRo&}4f5Qiz%7d0ro1L{1a z!6TYHro|K5Jf*`kx;$r^7tHXISza;6Yvy^w0&nT@jz!+H#0Qr7$O@lWRC<0RvpVuI65a)v3+65$+C&J*JTaW0bJ5=kzT;tFZ5lHnRzu9M>id2UkR z7DaAT;tpl*QsEv|?o;Cdbso~-5ltS`;t6e@(%~6ho-@r0W_ZagubAUC^Soh!xAb_& zBJWw^1Iv75g-@*VnKi!9=PT=cV}tK(@`EjYvdu4c_{}bV*yAsO|IhyHV?P5NV330Z zIm8f$8RiHfjxxeAMmbKH6O3_^aZWM8X(ldBXy4>G6(5-m}C9mifpEpIGHHYkZ;4SJwH)2H)A_2V4APn_ukk zn_d2}$6o^fzx~@4oMM90Omc=P&Jy7q zQO*nNU7j<|3ubu9EU%d3HS@e-fw%N{$0F}p;seWkWQ9+x@|iWh(B~`bd}D*} zZ1RIGezMIkcKFRMf7s(Mfv)}8$9@Jlz#s<+a)==gGt3b}9A$)KjB=bXCm7=-5^4sHKj28fc`6W?E>ajdnWdq>FBP=%tT-1{h?BVMZ8bjBzHIWQu8Km}QQ67FcA7 zWmZ^ajdeEIWQ%Qf*kzA>4mjk9V@^2bjB_ryyl*d7$7FlAM6;@eeoeehGVw)Xy*<+sr4mskO6HYnfoC_|w;+h+7x#OM(9(m%K z7hZYeoew_w;+r3S`6FOie*y_2m=HnT31QJOinG{k*l~0tzXjm=a1UqnrvVsiK-1YN?~11{!IinHE}Uqn!>q>7tt+dg-H|0R|ajm=Q)9 zW1I;lnPQq5W|?E21r}LinH5%9W1S5)*!6Z{mGs7%%%(K8EODwa(Dr>B>!6sX5v%@ZX>~p{&M;vp)DQBE>!6jE* zbHgon-1ERAPdxL&D{s8>!6#pQ^TRKH1gz*!AVCBZLMUN`6G0?VL=!_Sam15AB1t5Z zLMmyblR+j~WRpWKdE`?-Aw?8ZLMdgGQ$ZzFR8vDOb=1>9BTY2ZLMv^w(?KU)bkjpG zee^THAVUl@!YE^mGr=TNOf$nQbIh~AB15^4sHKj28fc`6W?E>ajdnWd zq>FBP=%tT-1{h?BVMZ8bjBzHIWQu8Km}QQ67FcA7WmZ^ajdeEIWQ%Qf*kzA>4mjk9 zV@^2bjB_ryyl*dg0fiJ%ObMlwQBDPwR8dV0 zwbW5h1C2D%Obe~F(M|`QbkR)@z4Xz~0D}xM%m|~5G0p^&Ofk(2v&=Ei0*frM%nGZl zvCamYY_ZJ_yX>*g0f!uM%n7HQan1#oTyf0}x7=~h1CKoM%nPr)@y-XIeDTc>zx)yK zuk|O8Ac6@YlrX}HAd)Dei6NFa;z=NpB$7!Xl{C`HAd@Vz$sw0K@+qK@B8n-YlrqYx zppq)8siBrS>S>^nCYouXl{VVxpp!1T>7kcC`Wax5A%+=YlrhGcV3H}OnPHYW=2>8o zC6-xXl{MDcV3RGj*kRXByA(Sw}i6D|FqKP4vIO0hlktC8yA(b@J$sm&~vdJNrJn|`^kRpmH zp_DSpsi2Z7s;QxtI_hblktUjHp_Mk;>7bJ?y6K^pKKdD8kRgT{VU#h(nP8GBrkP=u zIp$elktLQ{VU;!3*8zMmP~f5=AsI#1cn52_%w4GAX2zMmiZ}l0`N-X?_+;Yb~4?Ob3 zGcUaI#ycN;^2Ikl{PIV@hW-Q+L@*(Q5=J-?L=r_bF~kx_JP9O{L^3I)l14fiWRgWT zIpmT@J_Qs~L@_0lQbsuyR8mDXHPli^Jqh9qTyn)VH{5c^ zJr6wc#4|6v^2R$KeDcLNKm77Xz`xa>K!OM+giyi=CxS?#h$ewe^2n!vLW(G+gi^{Vr-DkVsHTQm>ZqrIMw)1*g;v^Vr-M$q=%$BW`sinX zL53J+gi*#AXM#zlm}Z7q=9p)JMV44*g;myAXM;_)*k*@a_SoluLykD+gj3Eq=YmVF zxaNji?zrcHN1k}*g;(Bq=Yvna_~wUS{s`FApFn~LCWKJJ2q%I_qKGDjSmKB$fkcu> zCWTbeNGF3#vdAWfT=K}LfI^BWri4<;D5ru-s;H)hTI#5$fkv8WriE78Xs3fty6C2d zUi#=~fI)^BW`t437-xb>rkG}iS>~8$fkl>BW`$MOSZ9Mxw%BHeUG~`LfJ2Tr=7dwu zIOl>(uDIrgTkg2$fk&Qr=7m?@c;|ypzWC;cU;YUAAM_`XAc6@YlrX}HAd)Dei6NFa z;z=NpB$7!Xl{C`HAd@Vz$sw0K@+qK@B8n-YlrqYxppq)8siBrS>S>^nCYouXl{VVx zpp!1T>7kcC`Wax5A%+=YlrhGcV3H}OnPHYW=2>8oC6-xXl{MDcV3RGj*7bJ?y6K^pKKdD8kRgT{VU#h(nP8GBrkP=uIp$elktLQ{VU;!3*5^4sHKj2 z8fc`6W?E>ajdnWdq>FBP=%tT-1{h?BVMZ8bjBzHIWQu8Km}QQ67FcA7WmZ^ajdeEI zWQ%Qf*kzA>4mjk9V@^2bjB_ryyl*d7$ z7FlAM6;@eeoeehGVw)Xy*<+sr4mskO6HYnfoC_|w;+h+7x#OM(9(m%K7hZYeoew_w z;+r3S`6FOQe*y_2m=HnT31QJOinG{k*l~0tzXj zm=a1UqnrvVsiK-1YN?~11{!IinHE}Uqn!>q>7tt+dg-H|0R|ajm=Q)9W1I;lnPQq5 zW|?E21r}LinH5%9W1S5)*CWctzh$n$Wl1L_nRMJQ%gG{o>CWl<| z$ftlpiYTUpQpzZ&f=a5WriNPTsHcHOnrNnlR@!K%gHF2WriWho=x2aIh8SjqQN|c& zf=Q;BW`we^2n!vLW(G+gi^{Vr-DkVsHTQm>ZqrIMw)1*g;v^Vr-M$q=%$BW`sinXL53J+ zgi*#AXM#zlm}Z7q=9p)JMV44*g;myAXM;_)*k*@a_SoluLykD+gj3Eq=YmVFxaNji z?zrcHN1k}*g;(Bq=Yvna_~wUS{s{PA^e2!Yf(ap%Fv5u-k|?5yA(lAeNg$CVl1U+z zG}6f+lPt2yA(uSzDWH%diYcL#GRmo-k}9gHp_V%8X`qoNnrWexHrnZ+lP%G_Vg!^Ac6@YlrX}HAd)Dei6NFa;z=Np zB$7!Xl{C`HAd@Vz$sw0K@+qK@B8n-YlrqYxppq)8siBrS>S>^nCYouXl{VVxpp!1T z>7kcC`Wax5A%+=YlrhGcV3H}OnPHYW=2>8oC6-xXl{MDcV3RGj*{a3Y8#ifCep zC60I!NF<45Qb;9@bTY^!i)?bpC69axD5QvDN+_j_aw@2#ifU@8rH* zb~@;!i*9=8rH_6F7-WcHMi^y`aVD5#ifLw;WsZ3kSY(N1R#;_?bvD>!i*0t;WsiLh zIOK?9PB`U^b1t~#ifeAT<&JwEc;ty^UU=n=cRu*!i*J7T<&S`U{Rt$9U_uBbjBp}| zB#LNah$W7A5=bP8WKu{ajdU`|B#Ufv$R&?_3MizAVoE5bjB+Zdq>5^4sHKj28fc`6 zW?E>ajdnWdq>FBP=%tT-1{h?BVMZ8bjBzHIWQu8Km}QQ67FcA7WmZ^ajdeEIWQ%Qf z*kzA>4mjk9V@^2bjB_ry7$7FlAM z6;@eeoeehGVw)Xy*<+sr4mskO6HYnfoC_|w;+h+7x#OM(9(m%K7hZYeoew_w;+r3S z`6J+e*PlRw2quJ3!U!jVNTP@)hFIc=CxJwgNG63;(nu$ROtQ!(hg|Z=r+`9=D5iu` z$|$FTN~)-)hFa>Vr-4SAXr_f$+GwYPPP*u(hhF;VXMjP57-ob~#u#UUNv4=)hFRvA zXMshQSZ0M))>vnQO}5x(hh6sA=YT_wIOc>?&N%0SORl))hFk8q=YdC_c;CWctzh$n$Wl1L_nRMJQ%gG{o>CWl<|$ftlp ziYTUpQpzZ&f=a5WriNPTsHcHOnrNnlR@!K%gHF2WriWho=x2aIh8SjqQN|c&f=Q;B zW`3trCy*e52_cj)!igY~D58lWmN?=`Adw`JNg%G{!jf0B#2-_2qlbgB8Vi4Xkv&Zj(8GCB#C5F zNF|MQGRP#0Y;wpYk9-O!q=;flD5Z>YDyXE2YHFyZj(Qqsq={x)Xr+yII_RW}ZhGjY zkA4OiWQbu#7-fucCYWT3X=a#Zj(HYXWQk=~SY?fMHrQl~ZFbmYk9`g}8zMmP~f5=AsI#1cn5 z2_%w4GAX2zMmiZ}l0`N-X?_+;Yb~4?Ob3GcUaI#ycN;^2Ikl{PIV@|D`{H1QARKp@b1m1d&7$ zO$@Qb5l;e%B#}%Csicuk2AO1$O%A!_kxv1I6j4kGrIb-l1(j4$O%1iwQBMPnG|@~8 zt+dfj2c2}$O%J{F(a!*b3^B|Iql_`m1d~iL%?z{5G0y^vEV0ZAtE{ok2AgcL%?`Wl zvCjdA9C6GEr<`%l1(#fL%?-EQanA#fJn_s6ue|Zj2cLZL%@4o);bjx>4}k;`ObDTb z5l#e=L=jC4vBVKi0*NG%ObV%_kxmAgWRXn{x#W>g0fiJ%ObMlwQBDPwR8dV0wbW5h z1C2D%Obe~F(M|`QbkR)@z4Xz~0D}xM%m|~5G0p^&Ofk(2v&=Ei0*frM%nGZlvCamY zY_ZJ_yX>*g0f!uM%n7HQan1#oTyf0}x7=~h1CKoM%nPr)@y-XIeDTc>zx)yKf9p>m zK?D;*C}D&XK_pQ`6GJR<#FIcGNhFg(DruyXK_*#blS3|f!6Z{mGs7%%%(K8EODwa( zDr>B>!6sX5v%@ZX>~p{&M;vp)DQBE>!6jE*bHgon-1ERAPdxL&D{s8>!6#pQ^TRKH z_|Odahd_b|CWKJJ2q%I_qKGDjSmKB$fkcu>CWTbeNGF3#vdAWfT=K}LfI^BWri4<; zD5ru-s;H)hTI#5$fkv8WriE78Xs3fty6C2dUi#=~fI)^BW`t437-xb>rkG}iS>~8$ zfkl>BW`$MOSZ9Mxw%BHeUG~`LfJ2Tr=7dwuIOl>(uDIrgTkg2$fk&Qr=7m?@c;|yp zzWC;cU;YUAfAlAiAc6@YlrX}HAd)Dei6NFa;z=NpB$7!Xl{C`HAd@Vz$sw0K@+qK@ zB8n-YlrqYxppq)8siBrS>S>^nCYouXl{VVxpp!1T>7kcC`Wax5A%+=YlrhGcV3H}O znPHYW=2>8oC6-xXl{MDcV3RGj*Kq5&blR_$Kq?17=S!9z# zE_virKp{mGQ$i_alv6<^Ra8?$Ep^n>KqE~w(?Tn4w9`Q+U3Ak!FMaegz#u~mGr}li zj5EO`Q%p0%EOX4Wz#>a5v%)HCth2!;TWqt#E_>{Az#&H*bHXWSoO8h?S6p+$EqC1W zz#~sQ^TI1{yz{{)Uwre!FMkC5zxoqM5W$2HN*Lip5J?o##1Kmy@g$H)63L{HN*d{8 zkVzKVk4wwS|A&?+~2_cj)!igY~D58lWmN?=`Adw`J zNgh9qTyn)VH{5c^Jr6wc#4|6v^2R$KeDcLNKm78C!{vZ~2qcJLLI@>{a3Y8# zifCepC60I!NF<45Qb;9@bTY^!i)?bpC69axD5QvDN+_j_aw@2#ifU@8rH*b~@;!i*9=8rH_6F7-WcHMi^y`aVD5#ifLw;WsZ3kSY(N1R#;_?bvD>!i*0t; zWsiLhIOK?9PB`U^b1t~#ifeAT<&JwEc;ty^UU=n=cRu*!i*J7T<&S`WuRnnV5ljf7 zgb_{zkwg(q46(!!PXdV~kxUAyq>)YrnPibo4!Pu!PXUD#QA`P?lu=Fvl~hqp4Ykx! zPXmoK(M$`iw9!rnopjMn554r!&j5oAG0X^~j4{pxlT0zq471EJ&jO1qvCImqtg+4p zn{2Vo4!i8J&jE)Vam)#)oN>+tmt1kp4Y%BJ&jXJ<@yrXayz$NlpM3Gn55N53VkzJs z0tq6R5JCwfoCqR`BAOUti6fo_5=kPN6jDheoeVO`BAXm?$s?Zv3MrzP5=tqfoC+$b zqM90NsiU3-8fl`L7FubeoenzbqMIIi>7$7FlAM z6;@eeoeehGVw)Xy*<+sr4mskO6HYnfoC_|w;+h+7x#OM(9(m%K7hZYeoew_w;+r3S z`6J*z=uaR)1QS9iVT2PwBvC{YLo9K`lRzR#B$GlaX{3`uCRt>YLoRvbQ$Qg_6jMSe zWt3AvB~?^YLoIdG(?BClG}A&WZM4%tCtY;YLoa>wGr%B23^T$gV~jJwBvVW?!z^>m zv%n%tEVIHYYpk=uCR=Q?!!CR5bHE`-9CN}cXPk4vC0AT?!!38*^S~odJoCaUZ@lxt zCtrN?!!LihR1WxuK!OM+giyi=CxS?#h$ewe^2n!v zLW(G+gi^{Vr-DkVsHTQm>ZqrIMw)1*g;v^Vr-M$q=%$BW`sinXL53J+gi*#AXM#zl zm}Z7q=9p)JMV44*g;myAXM;_)*k*@a_SoluLykD+gj3Eq=YmVFxaNji?zrcHN1k}* zg;(Bq=Yvna_~wUS{s{R0^e2!Yf(ap%Fv5u-k|?5yA(lAeNg$CVl1U+zG}6f+lPt2y zA(uSzDWH%diYcL#GRmo-k}9gHp_V%8X`qoNnrWexHrnZ+lP%Qlmz@kAVCBZLMUN`6G0?VL=!_Sam15AB1t5ZLMmyb zlR+j~WRpWKdE`?-Aw?8ZLMdgGQ$ZzFR8vDOb=1>9BTY2ZLMv^w(?KU)bkjpGee^TH zAVUl@!YE^mGr=TNOf$nQbIh~AB17~w<^Nfgn<5KA2KB#=lF z$)u1<8tG(^Nfz1UkV_u<6i`SJ#gtG=8Rb+^Nfp)9P)i;4G|)&B&9u-;8|`$^Nf+Jp z&`Tfv3^2$L!;CP>7~@PZ$rRJfFv}eCEU?HD%dD`<8tZJZ$rjt}u*)9%9B{}H$DDA= z8RuMZ$rab!aLXO{Jn+a9&%E%;8}EGZ$rs=J@XH^D$pZc%kRXByA(Sw}i6D|FqKP4v zIO0hlktC8yA(b@J$sm&~vdJNrJn|`^kRpmHp_DSpsi2Z7s;QxtI_hblktUjHp_Mk; z>7bJ?y6K^pKKdD8kRgT{VU#h(nP8GBrkP=uIp$elktLQ{VU;!3*8zMmP~f z5=AsI#1cn52_%w4GAX2zMmiZ}l0`N-X?_+;Yb~4?Ob3GcUaI#ycN;^2Ikl{PKrE(tv*mB#2-_ z2qlbgB8Vi4Xkv&Zj(8GCB#C5FNF|MQGRP#0Y;wpYk9-O!q=;flD5Z>YDyXE2YHFyZ zj(Qqsq={x)Xr+yII_RW}ZhGjYkA4OiWQbu#7-fucCYWT3X=a#Zj(HYXWQk=~SY?fM zHrQl~ZFbmYk9`g}g0fiJ%ObMlwQBDPw zR8dV0wbW5h1C2D%Obe~F(M|`QbkR)@z4Xz~0D}xM%m|~5G0p^&Ofk(2v&=Ei0*frM z%nGZlvCamYY_ZJ_yX>*g0f!uM%n7HQan1#oTyf0}x7=~h1CKoM%nPr)@y-XIeDTc> zzx-i{Jm4P!2_l#fLJ1?B2qK9hniyhKq5&blR_$Kq?17=S!9z#E_vir zKp{mGQ$i_alv6<^Ra8?$Ep^n>KqE~w(?Tn4w9`Q+U3Ak!FMaegz#u~mGr}lij5EO` zQ%p0%EOX4Wz#>a5v%)HCth2!;TWqt#E_>{Az#&H*bHXWSoO8h?S6p+$EqC1Wz#~sQ z^TI1{yz{{)Uwre!FMn7d3HXOVf(RyrP{If&f=Hr>CWctzh$n$Wl1L_nRMJQ%gG{o> zCWl<|$ftlpiYTUpQpzZ&f=a5WriNPTsHcHOnrNnlR@!K%gHF2WriWho=x2aIh8Sjq zQN|c&f=Q;BW`37h6G#xjgb+#?;Y1Kg6w$;GOC0eekVq2Aq>xG) z>12>e7TM&GOCI?YP)HHQlu$|;uj*e7TfHw%O3k2aL5tIoN&q+=Ui~f z71!Kw%N_ST@W>O-yzt5!?|ksd7vKEw%O94<0{$V8Ac6@YlrX}HAd)Dei6NFa;z=Np zB$7!Xl{C`HAd@Vz$sw0K@+qK@B8n-YlrqYxppq)8siBrS>S>^nCYouXl{VVxpp!1T z>7kcC`Wax5A%+=YlrhGcV3H}OnPHYW=2>8oC6-xXl{MDcV3RGj*qRnJo3adFTC=`J0E=V#Wz3v@`uIIfPV-ih+skpC5&() zh$M<=Vu&Syl*dk|arzBuSDaNs=Td!&)heKN=-i)?bph9qTyn)VH~iri2O|>xB9SDLNg(TYdE`?-ArE;(5sxY62~T;(b6)V05?=9|HrkG}iS>~8$fkl>BW`$MOSZ9Mxw%BHeUG~`LfJ2Tr=7dwuIOl>(uDIrg zKirb=KkQE;NhFg(DtEZcJ<>?$J{e?^MK(F)@_;<@DWH&tJfeul6!V0qJmWbpcu5Jb zc+DG1DWjYUDtXI0-c!W~s;QxtkJM351C2D%Obe~F@riai_{EbKj_|6Z0@{8Ye z(?c(P^fSO9Lku&*C}WH>!6Z{mGs7%%%(K8EODwa(Dr>B>!6sX5v%@ZX>~p{&M;vp) zDQBE>!6jE*bHg8QaVRR`FA_;2nG{mF!(Hx?MmqP&Ad@Vz$sv~qHb7<2yh2$uEA>O%J{F(a!*b3^B|Iql_`m1d~iL%?z{5G0y^vEV0ZA ztE{ok2AgcL%?`WlvCjdA9C6GEr<`%l1(#fL%?*FJ#gVv#zepsBWKu}w4tKdn8tL38 zgG{o>CWl-ekVifR6!MTq6!Dm1p74}sJm&>3Dd82bc|$2>lv6<^Z+XXis`x-PHPrHv zI_hblktUjHp_Mj1(M|`S`9ddMeB~S8`N24mjk9V@^2bjB_ryOwwoN~rF7hH11H8=d> z76&2|{vweil1U+zJKW_SX{2+X3^K_gn;dd^Kpy!NP{>0bQN&}4dBRhk@thaDq=Z+z z<_)EkQBDPwyyYG5sp13G)KJSu>ZqrIMw)1*g;v`5L^~aP<_n#4@s)3U=LbLe#c#Ul zp_e}T8DNkhh8ba$F~*r-k}0N{VU{`OSzwVRmRVtyHP+c+lP$K{VV6DjIpB~Zjyd6! zGtRl-k}Iyc;SaYY{BQe{ND|4UkjfqIa*s69xlaa}WRXn{xjZ0`dq4$PmMfFv=L?Ofbn5)66i-9P=!&$P&w}u*w?iY_Q1|+w8E* z9{U_{$Pve!aLO6yTyV)1*WB=jTO5Z?_=`l6NG63;?r@iTq>;{jGRP#0Y;wru0eR$8 zKp_u#L=lfE<_S-E#&cfqk`i9=nm3eEMmZH!@|JhJr-~0$Q$sBusiU3-8fl`L7FucJ z6YX^HnJ;wG#aF)Zoge(<7r*JIhhF;VXMjP57-ob~#u#UUNv4=)hFRvAXMshQSZ0M) z)>vnQO}5x(hh6sA=YT_wIOc>?&N%0SORl))hCker@PF)2B1t5ZLMnH-%RSOa=RO%^ zl0`N-w8Rb+^$y?s> zo+>_2O%1htq>g$TXrzf|T4<$>Pqfp)XTH!$7hn0tcYg4bU;L(<9(w7cp8*CLVwe#| z8DpFYCYfTI8D^Pdo&^?JVwn|IS!10IHrZmE9d_Acp92m#;+PXoIpdrQF1g~G8~$)h z!vD2Di6oIs3aQ-TF84?yo%>{vNfz1Ukjn$|$ftlp9`cAH9#hN{p7M<6yx=7zyy7)) zD5Z>YDyZZw?|4rYAE>5=T0T-oJqEJV8=%kCUeB(Pm_{lGR(@hV( z^wG}%gA6gu2&0TK&IFT8G0hCK%rVabi!8Cs3ahNK&IX%ovCR&we9*{>q1r+j-M-=gxVxI7n zXFTTxFDc;_uX#f$Wt3AvC2x7hd#d7$7FlAM6;@ee zoeehGVw)Xy*<+sr4mskO6HYnfoC_|w;`*PK{zF1SLi2z5`@c2(r?$T}75?4d{pb3^ ze|h^KoBwO$-~PQ1{~ZbckdW|~$G`noNuD9TgJ|IY&j$XD-+zejAR34UqJd~28i)p> zfoLEahz6p8XdoJh2BLvzAR34UqJd~28i)p>foLEahz6p8XdoJh2BLvzAR34UqJd~2 z8i)p>foLEahz6p8XdoJh2BLvzAR34UqJd~28i)p>foLEahz6p8XdoJh2BLvzAR34U zqJd~28i)p>foLEahz6p8XdoJh2BLvzAR34UqJd~28i)p>foLEahz6p8XdoJh2BLvz zAR34UqJd~28i)p>foLEahz6p8XdoJh2BLvzAR34UqJd~28i)p>foLEahz6p8XdoJh z2BLvzAR34UqJd~28i)p>foLEahz6p8XdoJh2BLvzAR34UqJd~28i)p>foLEahz6p8 zXdoJh2BLvzAR34UqJd~28i)p>foLEahz6p8XdoJh2BLvzAR34UqJd~28i)p>foLEa zhz6p8XdoJh2BLvzAR34UqJd~28i)p>foLEahz6p8XdoJh2BLvzAR34UqJd~28i)p> zfoLEahz6p8XdoJh2BLvzAR34UqJd~28i)p>foLEahz6p8XdoJh2BLvzAR34UqJd~2 z8i)p>foLEahz6p8XdoJh2BLvzAR34UqJd~28i)p>foLEahz6p8XdoJh2BLvzAR34U zqJd~28i)p>foLEahz6p8XdoJh2BLvzAR34UqJd~28i)p>foLEahz6p8XdoJh2BLvz zAR34UqJd~28i)p>foLEahz6p8XdoJh2BLvzAR34UqJd~28i)p>foLEahz6p8XdoJh z2BLvzAR34UqJd~28i)p>foLEahz6p8XdoJh2BLvzAR34UqJd~28i)p>foLEahz6p8 zXdoJh2BLvzAR34UqJd~28i)p>foLEahz6p8XdoJh2BLvzAR34UqJd~28i)p>foLEa zhz6p8XdoJh2BLvzAR34UqJd~28i)p>foLEahz6p8XdoJh2BLvzAR34UqJd~28i)p> zfoLEahz6p8XdoJh2BLvzAR34UqJd~28i)p>foLEahz6p8XdoJh2BLvzAR34UqJd~2 z8i)p>foLEahz6p8XdoJh2BLvzAR34UqJd~28i)p>foLEahz6p8XdoJh2BLvzAR34U WqJd~28i)p>foLEahz9;&4g535-DSc6 literal 0 HcmV?d00001 diff --git a/models/ggml-vocab.bin b/models/ggml-vocab.bin deleted file mode 100644 index 38f63493a97a7e85ef04a21697f7d2989156e5e4..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 432610 zcmZU62bg3<)%FoVf}$-LP*96yVRvC!S|!Ijo7ja7&CbG-xjoZ8GtJISHxqW*Mf@(9 zFt>t;m>M%G+GGTTD+A`xYtFXlXAWb|`oHIW>+*g7|IhO*ytnGqsZ;sXsZ-T6myV7; z&hU37{*~|#9|8aKe;NLH+#dKB9=+-n0ssI1{+xh+4*22!+0BC-JiY+C7r?nO>Intd zvjBS)VDAF#Q-CKH;7JA8w*XHrz*7p~*S*uMZzE5OqW@QeaHvj7Ja;8_KD zb^#77z(ECgP63`sLkn08_ z0ag@XWdT+dV08gbFTk1ttS!J91vs++>k6>G02>Ohu>h3-X1(+(pbOB}xFk66a1=wDI%L}lh06PnCMFCz^0H2^Zs9#)w|0+OM zfU62{bpc*dfR`5FWd(S70bWsnYYOnn0=%jKuP(rA3h>$jysiMRFF?BhZz#YU3vg`# zt}DQs3UGY^-duni3hm;I;zXUVu9aaAyJTD!|PWe5(N8F2HvR@ZAD@uK?dKzz+)0FTf8A z@Mr;kRDd5B;J*v-lLGv-06#0h&kOL20{pT7zbe453-Fr){I&q|1^8V7eqVq;6yT2q z_)`J?T!6n6;I9SvTLJ!FfPWO=p9T0=0sdWp|2WtU|3X;2b@GcuGfD-%_bVM5eIh82Z@ zWh%O1vldYUX)~g!y4f`fL)x}-nHfXq4GgV}R4h_27D9@pMAL{Zi_Hv#Jeq;SU~{|( z*P5M7G=L+mV7FYt>Lx)}{l)P!VLD{PzDONkoD`sIp+VAl{; zk@i(Yvj}?)Q9whyj;M|lZN;L4ypgDZ6xWIFaI@mtO`*5ns6u(4dz+wl#CI_M zGL))A)I|4s7g1Pm_8t<}y-nVyz#Zs^MAP8&L85IyA0`?G`Up`2s7o}9mc5NARLt%W zT5fh1Q57QIBRW*gy^1}>?0zC22@gmFug#pW>CN&G;f#;$hgshf?0=tRvL?#)X|Y;q z_F19^I>P54+5^3%I>YY$MK#likJX-tfxaxX&g`p--{E8H>&z_-A>(h7U>w!(Z3b?0 z5x&d7Dn`=xiDp39SDFT@?omM>R6l0?RKx5iM6(d}XGCFv*)K%69liZm%6`}r{YC_9 zFi=I{?eTjCHjw)t6}`&r&qAv)j){i1n*Ci#s^Ifa2KvgvPJbNnLLd+9FhnVe6)I{^`Lo|!d`$WZV z!7M^F1~&W7?GB#9Y~3d-{&cgaC{wS{5@CHBHTG1+Z!p_Wf!?Y2XJ8AH;?tr;pnSTZ zch+YxgV6GMI$<(3iO3)F|!+17hV#> zFE=|}0-=_VAeu#ATq*{anjNWFFkPlt2z8WV&%kU;HVvO+pD&Iz6zm0x-)Qzi1^OIu zGy@x$yN+RCa|$vaOIUSyoJf7lA1`hC5&pN=(>G_w(VtQhc(qkUipC!z zx*ReMOC}$GHNu8BK%MbNqB%#DQ|~sjQNbZhHDin)hxiQ!R(%*=stU|p=&cMM^0{?f z6y7^01iiH<3BzWDHVJ3PAx(>X!&1k1m;Aq!-v~Qp zrc53{{bZRKW9VHaIWMs6D#llbyi%_wk0xZ!1cTSfOT>8?p)VnEW7KEUm&Uvi{8DjS zWcD%@ql(tbUPgjIu^SBe&Ok_mFv zdKE*PQ#H#38#8G7SBry(zIyHnj}0pyOuqaC4EF3bl5@x>f!9)^=CEZ#r(l_Uo$_lT zKfzg-;`Iz|q0eN3RbT$vQHrgWMd%9c8zjZ#v}JE#X1#lR*o(eVEKu9o8zn=nj(&5k zWB`k6BR9+^*AZ^_?5<;c6}i2M$OU_oa`1)cdcpuzIlEr*Xty^j4a)Uq?URARroFj zh7&Cls$%MSH^Z9SEPHp6>77tHRfOAJ0O3$`w(pfN6FV%6a61a|z8H#XcpusMBKm#> zR#9Psvr`z3AJD*qLWkP@07*{tHQ;7K4vL$ZH&hvn(+`r}$cSY^4b;Mi*khp%y9Swk zhzT3M!hcvfdT01Bd5m-NzeT~ldn~&}oqhWl*V&ISqKTwggf90-m3_;b|D#GefyJ?_ zx;xJ@p#_*`ZjGE#Nw-Q+q`!@U4QOK7Z44a7Ja#*oHobdgw=0!Tymu(CDKt)Y2ZQTA z3E#=!OY1(L2#2bxRCg)mAc6%OuIt~eP;8zu!79?(Jz`z+4u22j7)ASijG@&T>_M}S zFvcW%o0*MMdP}3~gYXeq18> zWMB`d-7zPD$pef)_r|RLATdUM7j^>XGwt-bg75QI%gNEZ!zZH1+p%>Q97Ugci1C+P zf$bdwTRvsY5jL@8W^;3UqI9#9QDJ+SF`?G5Ot3nI!aPC&8|qY#kfS&ICnd+7y^mY= zNhTacHQ1-b1G;DSDN=-O-1IYDILoq6lVHl%s?UhFcZX%4A#Huqo9(lzk?qYXR8{5WhLge9NI&b7-`7~AI)=*EiDnV@btc2!5T*YHNjXpi$7awB-(+ah ziN6{1*!3E-Z!tJbqIJGSZo}w1-&PV zlQl8VWItgtn7T0FekvB*eA@eIoF=7v{!B4GEHh#3_#w;BmAVcMH2b+yI4czY7ZTWY z8Nn%6sDDX%)Zd+!35RJPOTUtkiDkc1-|RhzDz;y%c)qs&niO^HuzsV?JXL20%6>zd zs+*dAD=Dj!sNCNw=~^(KpWEAKwvl4=XY=HB1&l?%BU-f7vfq(@?q+o1-;1LwAHOHZ zUKc~({t!pa7`A`eADFO#R{End8bzP_BdMz#+JBNPW4`wNNhD`ssr|E zhuW~eh^BY9Wq)Dt1aVfCw+Y!ipxJl zUxPI$`-f=88@~1XCuyc&cF6uon#PnX6#pvdk!{2i-uM13a@bO`e~S&88{z*^+YfWK z&Hh6w?(gkkDwD0ICH642%-QLN4|aQaH#ACp$Z~sl4{shB93Dsd=JrWT9;fhWF3t)c z#v+*uhJ|Rp-5%Z@Nqcwh$}ABc=YqMLa)oY_E4VT3D*EF|j-19ViTKg%TFV|!Sunoy z$18i+zjhaKwN|racLq1c(UuWnN0|$9(%*x`SleeT*@ML6zMd<1n_I;4Js#fIi}}C* z9rF;BD-nO3i%wew-}6~pnH=PqZZT2K9MgPXdecLN3-iJ zQ*a&KE{_zxzCDq|UiZlp)tZxaS349Q<~o)OHoOx(iTp5=gIpMEAtnl8D4!#iKZ%ky zU`pASynPl*_7(5#ZUxH}9tNf?-NA2xILvL7QI_75v*gK%3|oixaET5Ztj07)AjDa0+>9N>1KQ) z5%ftgj}-U$%(tJ!z&tTw$$k{Wr_%kz}e!uLPbaf!`YBS5M9;sr!g5cOnW*>mqEQt1Vis8%LI3zg7O&p(1&2vBW(j) zz2q5W(74<(1-c%c3x-`UfzdCvSJj8hGfC`I!ZRfpX8T+)xVm}(LjxA# zRq6ZyF>6k`I{GXTPoo|L>j=%B#nQt-jJ_-!Xrz8N`7K#uiO}LjmOWcFHPHm?XOmlX z8+5<}31_g|NDh41Rmm_t#-%YkkonhXpvv_Tq^}$#$*SYnttAIV$%ZXEh*@A3ggC0IV%5)VC;-F=u2(#Tn1xC%AWf$SGu!iH_TaRRkiax?MJW~vg05MLN zs4TrV!ayy!1|8>cO5ir6!^wSComdo2j21hB9LLqWlOrU26@6GR;M6ETLXy>*K7B4F z%N^*#ODWh?-4&pvOgIY}bHT6<^>Uj2*ytwMT9oyHxs?RrM=H9aqjTVn>LeLgd(`&9aT21A*zS@wL*5G^gmxu`<#ov`IAaShXK@&e|G zY1E7T0?LI$(p;#F3bu~GG@vAmjc7hsldIoAhE{c%`Y>X$80+p4VsEXL;MxzszH=5k%55-J1 z{q*)EQo%ri`Qs!}p_kx*FH;PTACW=CbvO8*%p9(~&=R2qr&@Nh@|wm0P=2y%!jCsj zQE<&yl2e!r#@H-|&R}zvM_lpau~S7ogJa|5)L1-k_Uu$9twUukNLB!UQNdgjqUINDF%=Xa|%b){X&H$*`8c0r4Bg?kejC1)t(96Rk51w-mwE0_=~m}M(Sa^*tHS1^}z8cWvBf(k2Kgj;Jiuft3!U4m8@dwaX&rHRx$O2xBn`o9&cbGx77^il)73& zxiLY*Ru*HhDCUA;5(DXU$_bPRoe1md>5_8$Skv;;B|&|Pd*C&s4(D33hBVa~)LpiQ zdHO~=Un5zD>aG;8W%%69=!t_sgSt0vfukigvMRhd*=@ks^5Yxa}{D5 zSf42ES(2>k^P_?>`{LM;6w^MgH>uM=cXIn#wn+&FF^lHHEtl8^6+VS3Ne0D#5FL3? z)iB;{Zna!A)gfO4H$S|WuR_>Yb4Vtem3|vG=GkTz8Tx$_t&?wNIfpSLoK3zhm+EW< zVoFtDwN|xUfeTK=NhZ0$I9s#i9Hy#Hz@T*wQ(d{#@^dH`cA7p!wkYqa52-CGE(~*i z_Ls$|314ltF!d(%g>#v}R|ZSYW#|%&5{1GrS+o3HW;*42rSrsnjVom5kr)Pq>^zon z*Hs$g=SfC40iI9T#0EYQ9QIA-`KremH<#wc$Cz6&E>H~0ijH{!`Ol76b^&wU^>VCf z`32IXSz3CjT*%0&Y73jw3z;9z_bd|(y|-B|ia?{}MWS&2g1+Ksg2C(kB9)L%D;HCO zCKzALWZ@JnAA(aj;m-wU-P`68C0mUdBe_JxSQ;_~k4!-`%Y}RQV_;gUfl#Fao}|p@ zO4!I#wb+DorCbpUmf&QWJfSvN5~1%ptI8GQ%<`&|LgC_p))0Aa++c}N6^io^s~5|Z zYo7YRQIFdLhRJ?~_eMe2qOxJK-#~Rn!L&8ilgo#AUYSXO7cfK@%OlU;SxwbT)@#{R>S5fYD}e+dm?%82pPuJ4bMg|EiRsFKZM zxMvCuy?ZSetV55sF%pkAF++}#xal|(4%0aB%0*Izwqy-S2MbKnpcGp%e1sNXXt|;% zT*0_hJ$AeSb9E9wV5Y_K#ih&!-VU#~{8I9345Nfw8H}yN>n#yRSA(Lll_YM|%D1W| zn?pQgvT;>f&605ybGaLs$Kz0`LdVVH$3)dFhZCaWDmfvlmY=m{ihx9~v3!E<0s>>}`aNud+WfGSLf#fpfbRTLeyG-<$J#xX>2~_?R$>WA{iY0RI5?}cg z1*@EEvx*wjPS&PXKjYX9CethdjGEcBD!rWJeR)xf(4Bp%HA>Dl&6HH-svMffR6`|6^h*@8pwoCk$ z@Bg=xYt^m3+r?{Im1$gdz*5W!P~G(2%gUZnEXt8Tq`5wjS^I;P+u8Z7L}q5x3%t|Un~)Uqojrcc2U zo&xvD7c+G3e$3Y|mhey{EmLq67S~+C)hXX&{TE3v5u(os!}kfjMi|-(%ziPvIf89a zCTU09wIyUsj71H_7RKqV<)5ZJ{gG9aKo)n{Y*vawc!CHiF{!X z!s}c`PK1N&tLarFpM||36ZH9^6GPoxaTS{+w3=><=wHQ52R6fzh6!DV2v>^)8y#OW z6#+xNrB|znvLnO~sIN8aC<)a8EZC73ro{f$%wXLrvzJi%y6@z|OPCo<^p?b6Uwl*H zX+I^&gfYj0d&D)Ik>xKT!`aKNFLWs;*q2JKY2O=%m#V_nLUh5oo9mVej?7}1Me3T@ zT_;ix;dJ7qY!5%&^#j=cODQj!90TxW%=FT_g_kiD1%>V7Wy%&h;5JJ|G6XG6B-^X- z=IMw8MhS4qzB(Qk-kEuz)Xg^d4-rxz--Zp5pWAY zPgg{EG<%!%U!i#u3a-!nc1`RqV+|Z|glm`)T+oth$Sb%>CB24{xHnmLjq+RVXYrkD z#0nNB*X(nVg=usgrG^y__47)mc1GcqWDkFrM1kYpZ(hlq#+%g|e62`;;ofunjTVKH@&XLmBsbD^vW+KO&IYMnNjHZY;)N{&qkrefjV12{jmH<0bv3=YYHFmyI7 z(QlA7R0_Y(Dd`*33ed(;ugHte1YhT9hA}Q;n~M*;CqG6^F|V( zJNq%f8(F|r8!UYzIZna0oV`(5)md#Fkqm(S8#RW9>NGU;#T0W9+WA^$5U?g%h`8p* z64#0qEP0?xix?_e=hFBs9?YdF7-6;R!7eW4Xl2;M}V+uafr-lVL0 z*I6QnvRf*OVfva0x5m+pIwElgfn2bGQPmeks9-*@>zN;hWw@TXVE2e6;ChKWJ&OZ~ zR1whqYw(z4)%f|k>++pQSM&8czn-F1VIu7dLb-%zCE4O~0tSWfN}f;(qtjw0db1j` z_n;gd->lrn-HOqPanp6^G&Rc=heHG$>Gj0}OEu=B8`QERbgc_FkOi#BSh`|0L~rZEh-~?oD#uiZHJ|A5ieg}Ghyt! z@3f9c>ew~rg4-h+Gx0uY__?5Gr3U%PSzuzAkrHt#3HzN-bf)9;4rXr zV{E9Z>&r^ty9-(Og`q6swEnH+i@D+@7T(IVO&Cm)7Z>rY2QX(JD}VJLANL-Z;K-NL0YPh@w)FEGZCSeTIX$Q@tVwbxngSAiTB?| zVS8PljowaCCa{LRT{S!kOGpxN{c<#@C@zBnq0rtPC}bz%+fcIn?G!RBhhim&dj#;S zB|ofj=#}qaE}K?aA_Tif`VOXo@d9qQVz3EIuj1DaSg!QY%g3$%4i*?n9hliWRr>Aj zTpr%3r0_FH6kdh9E9TDl0SyYZq7UpHQTd&BQkI=q0U{$hKlIc(%tRtDQGsYsIG(6dkbU4W1+|m~hpZDN4%ruwHx<`4qNenTD z<(F}jx-GnC(e~+0D&j=)>0yEc5>dWrKqs6YT$Zg(<-X-BCe2ES3V)3V8 zka?F%2~$G)E){+l(^aP6Fg;ZHO%Ob^u>+&6i&)tM+?cK_LgU<=?R=0)fL0{JQC^d|4;XR~6hpJg39K1WH zG2DHYGLdig+IbHJ3#VEB9@Zrn>j$i_@DbF@dnGW=D~B!U0|seyvE;qX)z9(Lf`}WJ z_<>~-GxK{X&#vonl_8H2*gIficrWuqx4H+m4tpPYg$jm2gxIwuf-rhp`aU)F3@<%o z?^9jWarvn8J_=H zu^ozO60slurXOaOI#SJI#7g)ac0R1`Foe@mtSKL+j0=`oUkS!_zHy5L9(E-&+`=p_ zb?2Mp7Un#Ja-<4|;}9BE!BtpeJGYSIC6`!!iwyVE&2jm8^hFhV@4$^%6&GVHd?dCe z3hZ_$#WZkJEfvI+jvE0K2$L+@v!ihL@XJ3!*{a-AixF($^63631sPmr;iIafNu*6a z%8aY%-DwPle;KY8h>s8Ljz}(nQ)(`_3G+t(qpFe#UMuU89CoUEEOZ}!vY!S0Uw`hW z@B|OVJcGGA?J9AL_bj>;Y|}>T2&FjD5ks7Lj#*#C(H49wQx4M|Am}4Bp{iJNtEy1< z?>#g3tn{_=Rtnejizgjnob{ouZe_l+7{8GZwvf=$ZzG?F?!$iOHi?N{$kmo8+-I3o z;nNMjPm|q7_B3X8V#EwL*-9{XGt3(?EtGD%ow=TlX1JZ)VI4)oCAUj@S9@+(2cY7K zt&$QfTWX!#nG^O$SO7%tUP^rtEM0&L8zS%}EZiZ6I6X-2h_zkCnvllu-n($8L*y&a zdpioAazB_zF^C~1=w8=$XJq0ke7KV-;Z=_g8Y5_RPVeMs!lutv;p|Q=(Qf$jZqgAc zj{J}=zf;AGM(O^Yly~kXSUvAjRzvOu9`0iJRy1UC7prbylcjet8~3h7g=HcMYq3kZ zOGRp6iOwS%tcqv>r42Ypx?9}0xJzcZn=CO8CwG%2PPKg0rFS#eDeS_tyHyCk2kd%M zM^RxXY-EDlU=``#&5TEQE8!mHz!SxwkLr*sn@Nm;#j|GVJ#nDJBr}SBbdM$?w}wLJ zx`zS;MCA8S#&EFp?@{S^`u;KE#tgb(_?VLVB8#r1808_1?ua9bkCD&da_f97mJSnY zOgcjMpuWhr(U5g7i3Txh!oAAZM{5!>lrT&0RY9qDWHEwvM5U!$Y^0f};*hX^ui6dk zJlf6fqd=|8E!?LJT(wOU3UAf)K1xtU^=2aA{M!+xKSVC#&Csw!902Wp^1ZZb;ePUk z7K>^T4ii3C-%r8i+>^zKN3(ZWM@bjJ3=~6Q6|?^RYK-L=1|KJEdaZw4rJryMbfOQ} zM4?K5JQl~Dej^n(P#vXqPpLeHL;rz>r3Ag({gmJVvEAklKOugMHk&m|9w6J^t-ew| zz%s7bjO$8K44SbXpm;;5$2^A9iK<_G_(djrP#tet4qo9wrfEP;O&(NVhsw%TJ$+ET zaej`HJjm>6N$Mz~G4A&@^9LzQ6=uo@)e$Fj!8B)TI3Qwk6LLkN=c7j@xoYSf;529Q z!9mM}u3TcB7+)WO+nF$}6#bmC*8OPx6OwJZflF556RMi~vC&FCL4IpD<53mn87;G! zXs`#us{dGGU-Umgro*s?_dlVkfwvRKz(W!f*SGO5iSlc?eI!xDu9taJM4}b$>=3b! zn$AO%m?UB2eT0HviRDwMewihYD1UdvgfUa$7c8>u z5mgK}sZf6(QB6$2B$huST?xAtd4Bd42@M1N_md>--G{~w!kDw60eq4$suvIX@=vk-LIvGRaM;f!KTTfli2!~3)5`D+_q6yl$(wGV`?RX9ez{Mo znTTMkgfT&>!-yZ>fP4=G%LYhXZQ@|z^3W}Oe_yr_>3|^16uMKm1NSLmDA6t z*&CBE+fCpapychn)jDDk+zjBRwfr*@2v&R4Y+np99xyL{Rt#asKnMG5(LmQ*@>z15 zg(8;qH8NM?<|u|~Un!>iv6jz~&rlPqg&;gCEcu+usJF=HSPyH_uRizKprO{7iw=FxdqiIxaK?r6-_J|#+g*+QJoB5o8FRaW!&;d7 z(ukYbk7u7}7MT4zpH~gy`i>vV=bx7(I1zHw+UH|`K-IWjXkSoO4f&)R^no#jN=&|> z{M~acja1mCqDj6WhA@!eAgl8QiWXqA&SL~r6Q~z`rNJ#xw>^H5%;rAewvsO@OR7tW zB6i*2x@N@Jpv0N5`xRN|i^^gE8iIm@Z~6KP#<(*3QXwnq>Put~Gqu|>^dXx{`XyFT zSc=P>;AHBY^hNI;ou!45q8f8OMB!$3SG0u zD&kT5J=WwoCQ0#rFwe7xTj@Kg@Z;*vw%R#&B<;Y?nlI zO9Fi6JG){_Ty1%TE1`-+xB+AE%jAi}7c}*kN!;+80$(N@zeneNHC2pjNSR<*yVg2N z3o|8#_?KBapY?G_)>jPf;-d&(k<@-ED}04HxPyA~6-}TxB+@n|{ffE}uDH3wfhfcB zrgaoS+d1aM7=dxq{|f7N<0iaw#5{Jr96c@w2W(p``6~09z{?=%SBp*#?*?Bx6t&I! zl3;5LCsbcm^+AvJmh3AgRB)W|HD)%j&cfH28SdKGEK%r%7h3u?iQkyObc6f2q^bKU zWk(nWW*oJDP3?&laK!pwqjc`nU|(lAx;7k*!q=G?b2#R~ud7W^W2u7clQpQD3JwF9 z%>@USZ?IexFkjtaeQ|{8(0l4PC>&2_FqwZtJZEMt5gdnuXZj5m(D&9tTN~E-hAPcf zep(;CQHVc*UKBsDW5#IvCR49R^?Z}uU0q1NNpjt6OuxxeHR)Ba2)N0+s`E|N96B3n zD;F8;bXc_&cqXvxmm&h-$4L zOy{DS#fGylxD9qP`!-qP%7%q+ODfD~c*iLDHcJBAo-eGTn#JOqeVfG|L@B=gKhpWU znJWdi_}144)DH|Z`;I96np_=6;Nd&uaSA-(zr*Z9@7k+g{R0=J(UG?@A!dI*j9Li|03HsH1w1nCa=J!S1~^QkaqJ6!ya;gh~Je2;})bdlvkGf>JSH#gIN zpDc#`7(INS5~IJ+8#4L68U()Ra7#$PPd-rXvhTBL+{pz;clv;a=`|z6{|0* zE!sx^8Y^X7PPy*}xkb><*Q!6=6e_i+ND|{I5 z3_VIljcvY_3i`m!C{$b$KcMYn{&-aN2EVNAQ5FiF9kX9d%{I*S(ZuB*6hu@w)! zh>06hu-XfTxer+KBXuYaI$?BWxYhhf5>XxNMEc$hmj8$qK!x&S)>p(drcV1Y8MQEx zSopE(1$Qu@&?q9zG~q7wV-;YaW?6)%!(jho@(d$9AfKTW525#E|IIp|!f^O+3Ijdf?S1;t2DGn_Fj04}>z|MhHWznV5N=Sr z{D}rNEpbqmenM{B8}50P{e+pEw6Aqy3co9n|3t;{i#}+A_ys2qSOV;)EJJ+~s)pbY z^r+;g5@i54am3L;EE&fj+j)!Y?WMV!XDN z{E|tbEv6#C0mBHMfney83$61@iH%tZj-Qq*0#Bp$MeenZmm-uah|o)ar4%?1c6BKE zl^V$1oxJtZUr`Qtc|leBmHLJ6BNPlx6IJyq7J6FOIr}0CtFV9gHR-E35DkKhaOj@= znz`##E7b=)$Kbk`J@(<+(yy5@x)3ICML^?$hS85y1AgboenV!s7m9T~{D%3>-3|u; z!7w;$si0r1$prmas`DGN$IW$^RfKWefsybVRfoIPLFu>Ok|kz23%_LwM8IEI*k@u( zf2-Q$k!ki@wnS~jI!ftYB6*}9KtJe5-1Pfh^JE?dP(Kk4V=9^_3p{j@&Z{@!*dFE1 z<|WynjA5O5iQ~_><&hBnI=)fV7oXrkNBW(_>)nbQu;F)crKnGF2c7(mc@4l}=y#H3 zJ(@vq7CpEV6T^CfY3X+qr*}IxXI<+TeBX&3yZt_9;vVDS_mm8#3uu17FP0|xQl^Tl zxqEc>duBQ1+Y<%jLIPSt!G4XeAA|kcmHmODLYoVJP@B|hmi$4j=Rf}7CfGSRME zXPp@6R`x$g#=CS0Xk9V zk7^Ov7F~bMl@|V~))&O2VD=~0z+(@mgg;3Po(Ly@B4^BD&;W#KX3ZiI7phM2;roC* zrr<96PZYbd7Ehw6I5)ku<0Tjr zB2tXwhV);UQv;*mFXXoC8mvIcU&zf*Em9GjwGJ<@sIuU*VjYEUT4%XPhoSlP1;aE} z`@f3TeYiu!lQ6+15zn}(PZ%l{#_wNQcTia1$k_QSbDz5zO1#3un7{7T|0`vM`NjT5 zG>nZohVU9F!=Nn;Rtl!m=g9ctj2#Q^;&qe zgukmK%h5%Wzq7^t@-a_Y(!VP=*oAQfBtrOHp?Z{l81q;jY1a6mV_)HToClB2FpDs~ z)51T<1{ccUgqA40*@6dv;4HL=>>n&J0y{#OwDW&F{6s&`P0T>dHUK z-}i>$pG!$4)>{oF(DAIpXkd>?OYz~oo_WVnExD6B2O0M)3 z-$wOCG=!T&|Bj8|rTMqA^K*1d{#{6j5sC2}Ke7J#-R3MN8+6YSMPcIxD5UwNvq@9$-qp#8)_VG86|sU53hS%dq%GI$m8+hw>Zl7$nJn#LKD$c$9Jw8UDX59|FHAY!+hx9t(0e5;dm;+O9|TGlP%CR~}xi&Stg?J35UUpUkD?GfIu z#DQAXPn+iLkv&~zf3FBrq&<$J(v_<8I2Om3SrkVR!A(eI;fBxpiSXh0vr42HaTV@y zEdDr*3#}N8i&K_8jzw_yoZiQ*tuIW9^tOE5BYcto*O22*vKup^x#4k-?CpZO0U86f zYrV%w^6I!u5HXiC@LtfoBzFx4ZuGmc7<2ERgLipJA5aT?-@2PPVCM42(8`L#i9YuF z$!@F&?w%@&aFZF-y4}^qk>+$ZBWSn%DVyh?y zmzA-G2?v}ZTT6)>KCLPk+t#ht7RD(%hQZ@mciS5N>RneK!L=PcA|;7xsH@)NS;AFN z{gkmk=7Hv&R|5G~+3p$~y|<(7O1m?4!18HfcM6L`fA`QXD+Y%GuzBsSKGfV{m6%`} z+F&Y9@+5DGCalKfDn_V(Uwzs!VtTuElmN@X>8q?8!?$8p5>uRl-eWz{tl-;TebLn6 zbTcoe?(gc)qwPVt-95@m!qpj6P1u8y!lY{DJycHJAkxPw-ifa$LBIi!rICs^SX+Cr z{&1r)6IyhlwS_K)X>SjWSlkn4b9Q4=R2bYp_fSjYRtl`Py_kdBG5W&0UJ0ssF}o5^ z*gS!P4q>ZZ65IxV=I{g-3-@`Zt^5QH3fNxVwIERxoh~X$;V&hm3Wi;2+*-nT+!t?e ziKGr+;%AH8 zgsJ;XVjz`-aiJMI?mZ>@T7T|h(#m_XUU3B}*;4{c&yJ2-C4S)UIT}LAr`!nL+Eb%x zA@;O;s$Q`kU<9=l0V3-t!h86xmsvLoQUOsOaSi%WPdT5m$ok4`cE%0N^WrlO?Zx&| z25#4?TWK#9s4l`l^9opXd2x}z z_EsTQf=Ov_RVCUjD3}X-S;4+pN%m&`LsR}JX=QJgExvY`Dg|y+k6BA(4XzN`-fYl` zhP6e)yLO$JZw<%OT`_9NkeDmGxtqqV7m57s!~WjtChmnZidIn?Y^Hq`vVAD|+$}R! z+J_?I*-kWA*oUQqEec~~A2EnK>SUissB`JmQ?*msN3G>99dK_E(}3fLmiX|LE)&L8 z|2wU{56d>N!a52a_M^A%K5@BjO!LM=9;NWR<2~Wc{-9D{88k2#%?mE^9}ajT%Q6F% zwe&|0Dk|Uveh;$WEhn|{8E%IN2(7Z8DL8LQZU!kvUioI{A5)= z?jpj!{mGO8TgXaG;fj13@h0@_mM|{M)bUCobC`8gVOyBLmC<=Jy9T?G>x*4+g=&xs zt=eq8$a4tlQ2)uSS)7&IykOOzXnG1u2IG{AS$Ycdg0m?M6;C0(zd=%dih3cgy3(GQ zhzw>mEEvLgn-Ruy;rckN+btzuyd=fbi7}5EzoXQC3abLGfuY$E)tS(QyMhhaZS$wZ zDTLat^~4s(I@o_JHQxV6SD24^Z$aKxqPPoSW-JMwimMl)q?G`7-Q*frE~&jXKqE** zw-%eqN=fY__h^Ot1+|vokZ(2w-Lt1%VtwEQPxDzvQPVhU>&sLr^1H!b5&nDSIkQIlya%ZIMsRT1Sj%y7l4ReX6RTLl?JjB~BCf#Cn2_5ghv# zd0?M2FTQwj6N{4VM-kCErmeIeMOt!&1%-z7;9uU4{Q+A+?{mq1s(Q>zBUaf@<@HlV z->aqjsr@j!>QV02eypbY1ngvy<&~Js+rkw;8tI6nil^1Pg5j{k@K`1jj@P$YZ$Fg} z#z(*N+%H7K5;-r-w?A!v3V-BtPvBcpZ2$*mtg&E(1v9%$3n zdzyMQ-}k|k))(ojxZ|ByG!Bz=d(xh+W|{P-8cI)Bp>S2!dsTS4S_#`yD?gp0;HHzm zvYtGhq6Zv3Rw4te_2_B}$5R2;dOAz-;tTQ8sn`s`EZU9o^Kvm zPP?I`rxcfC2IwmVPW+*S2%nAP5j>hoX&SzKJVSY{a(^XZ*hyhpwm-& zc)|GG&{u@NST(PRFv?+U&tyT|Gq3baHU*A_c=3G*(eHkyB(xkzpOizo6UbbHN=r zSMEJao!KoN)|WI;RWMj$%3++2KAR;Oc2Awsvnl&pygL;X99DWcp3MSct>jCwB5GhL zRutj39mtw0BEbKr^=xH~84rg#3U6T`x5X6)EKn7nt%Bp-J-&U{Ra_O0hIk@aZI0u% zUd#iBnrP9!h-e*}k0PD{-)B2e)jN$9!{@C7nZAlWKoG=6)5-^`e{bh&Zpndcf&u8e zF&vkWFn6X0vO=&lV#o_ucOdKRK$WTKK4|Tj0bX3}9H?^ex;cv5Rhm=qXL%tOHgQPR zD^g*|M~LfU^9RZb>E0MR60?Ie6sqX+rGu31w7-ZM4pIrQibK^ZD+-$xEHa5c{6G|% z*FloW&wTyPc&c>tmKw2^*wYavJBT$k42Ordg2AC_or738+hIaHNHsg#;#!o)Pjq}= z8GG>)hG|~g5({|POxk=DymuQ;8SOdbw-VdJ=fpngE}TKZu$YyfqmoV2;*=v<;DaAbw9(ALN7sQayVp2Jc$V1Dij`dXaF(3zdq z6DPdp$d$OSm`PXbqhMQjUD2Maik)u2JPswT^jwx^$AtTCgy&K;E?H$o_>B!r%K8|j zV?`yh@V3ERY3XxSa`)!ICPh4;#$)gc;|R27?dP%rH>|Xdn7P`7(!^}m`Wv6QKCs(` z5xA$s!|plJe=dt3a0_le@?7U9NzYU8!KYa1c`6rtP53%gP!ugf&tnnwtV{B|M|gCE zWyp1BpARa}qx`ToxnEQIJk|l;M8)V)%CLn$h!Qhf1x;Txw0d`h@dogyb;XH3I``w* zY?gQNQgXx!t)=d;KVi@!V8x5NVWc}!&4W8D1}lD3#Mu#4%4H;%&4I0}X} z0Z)2M8oc|3h+dS&O=Q-OALES?^fuA43U0&m-zpMseqwq)m~z9wiWBvNDK|7(D=T=q z>0Ss2tDJP0sffny2C2d|JGR6YD?6Xx$V7G}-U4pN4C_#0F}^zD97b?cyL+%|$ImBx z$C)3@62Y?QFGBRh1IKmNkCNdDfqB90_{AbypgMP-^wI*#4u@6?3sf)Ig~HTXR>S}r zH(9`<%jW``a!dltT^cz}!}8P;h60Gu5?KxKRmOIts^Rdv6EUvIT31w@k1+$pm<=%N z_G0FkNcsy@i>_?heB=vHGyes&Ll}j14Ox~B(c}bmN$+QbLs&}Q#VjAfQu@O$iQp6p zToDD%>u@x3h?*C3k(*swksJ1G8p4^9wZ>q5RJN!kKi^TfyYY9$4tgjGqu}t+-PTj^ zYz-Te{vjMk1Iuk*1XY;8Y$1yQBAnyuenuF?cXsjxn*gymC6}h+Qps6nz9&BE+ zEWyt?9;Q6&e!Hf07{$l>;1;ZK7zIWjC@UP7n5IVScq3nhlt*9XFxA89a4{3aT5qk$ z)*l(dJNn84-r87B^x?m$1J(F2*2E6HZ*ASkR*PsZUO3&bo*+D;tRFLT-Q_UpF3~p( z)oqKIjjI3{>5DW#P+(o04U3pnV-!kWS(N^|8hV&MaKr}1rXnhsN?u~AFt3HQ7OBj0 z_rnl^m;Ra4T6)#D7pYxwUJ~52}o_CaCE;|H`mSdhdIJ6+$J^cS%X;iYf$ zg81bz{32}}-=|zgUk62&1!`1^8eO}>B zAM;E8r)Oe4D=ne8YgbrULhf$ZDlbuibmMmkO9~Itmsll6U=U+389)578Ec6uOyOKM zQ-mA@+L3CD{|Iqs32VW3$f&oj(oluW3yfv8_ZVYz%YOWr^aXrAer#)EHMPTIn@o-B zhF$4!)eqiN4u`7*e2Tn$xH^o#+>SnWxGJEjXQ(Qogmf@Qr-!qwFgc-J#I5STeHIzG zdjgCVkvHBRgnw@Qz(!%#x`#*I0eX@94CZ1N>ND7d73}cPVGLe@Ld+{k1E=cT=h_hx zeArF6C4J~RSU5tZ@MpxaH$Fmz=Q)qxm`ILbvHSqrox3WL8ZP?iBB>bpd%5t7ID*wQ z_a5lVf}!_bYagKwJ>n*%h~0SF6-JB0l|6>z-k0@+`S43$?9bxmlzH)Ve|1|bo&)HD zrKKv^);e~vVW||z=K5!6?anOsRZgbsbDaXWV3v6B_y0T+OGGl+)wR9xg zg8l~Bza05kv(qY5K2pV=#^jP5NnXn?u}Tc}pI}JE$}e)aL@=-tzs{j#_$3%?N4x@! z*EuqF2{)y66#U%_*CVfUq^v)CD7yey+A)0bIk8FRwbj%5~v z@fZLWNnsAvWEqR-E}b}tsw`vKhvD3oE|WMlAA_xBY#UTAcQILvoAB2Q+VKOsq?UEW zVD2Uid%*@8Fpru0!E8@>vl|Qg%VLdU6XVY=%uDnQgZSwYRT7uTh^sZ6ULQrN^aC2@ zqmt&W6#gfh9rbbIJ%)JsnWuo#zIcX~wTu+4B5-AoR zZe0b#QxGak!T`3!I4XxCS!Q#O<9Maax{-F>76%+IW66(cHgLa- z9@UNP2a!HkD*Sp9_O87$+j$etW!OHV^Yz~5%dDz!KFoU0r>t?SRC>OesyYt+<@qEU zgiWa|2v0%$u8Zn^1UvlaYx)|}h5S@$ar|<>wVog6`AJLw+;%Bh9jjSea4~*nsuPpk z>ZiXF1P`LRVKt9}v~VKPi+IE@@5MZ^habWWI{$oiY}dT;V=}D01s7TA1qz1w5Uuk9 zN<0q#w(<)!Ee*)HkSM`g*u!E3W@UddBYlAyaC*jHbZbR!1OB^JnbHiw%=!YA5qS?m z7-;t|P~~leO45yK{3axR!iIvKz1Dh?Wflu|KT>S*#+es|`=P#&v`65W<%JXiua5gB zHoTC`FLh&0`GqVIem(;~gTr8+9#$0WR^L>?4ZIs@Ed^J{NAN>AD*v#as%pRRe!uAslTm5UcF2GH##ojVlAw0PQ%hM##+;g<_=hv{evHTNFqtEHn^3O8WE z3J??%+`+Ycv>J&@d2%$16h@)miw2%!*k~Qi+8AwEE2e~!h2QTH@!5mc78AXs+z}0* zUg<`9w~yr!!)L&HM{60=U0VJKNI$Y$v)Seao9;(<3=4q?9E$xh6cv|DFyO*5s>J97 zQWhQVX~W`uj4Jf8&F?YlBJue-l;;@MhAWity=y5c)#&UPm1#MSgd@i>9GrKA`J3!r zCB_K$ms|2=vdyx!tKF*$v7B~HRhwp_|YEz2K%w9`E~9}%o z#MK|e#J(YqL=!&TBOdVme5Arr6)eK@lJ64y(B*NIkIHW8IMv(=>>rO)!Dvb@#}E84 zA>5!vGzulLqO>?Wgpws1TyDf-+tLTFLb=1racWAMHPPyAF~GpFj_6=`hXtv79P8{N z{2Y_``ch#%;jkG!t}jgL`QuoZFq-*$7Ir*K!FSC{!u28iJb5^tC0gkguj5%F^j2)~ zlH-}0*H2-PPzs#=qhB3QDORkp*71@Xrx4h?;aKH(^;RkccUxPDW=GITI^vDHA@F?e zDgv|j$MC~ak`g}*f`>~fd(i2#qb(nomU}aqCKV>a`*oR<=KSA9en?&0g&&%W|ur#e{&W1gGp#}VB)cVTSe?o@WSmu@OS|}|i#;Fo&Csu^giRxtQ zv2{F=qMf?0l~2@M;t$QmYn{o7EIw{>VNp4e^@6qC)kj6}ZO7J$EIf9}ui;O(WHB$l zz=FM4Tc14PN6+ghCHB4W@lxJ+)FY1>`AvtOLgCKWmzeI}Hh-cT@pSzV?n#n=X1kS6 zqWl9mVNu|MQ%|;XjK@Qe$w?~tge(D-lURxwzWJ1%M5a({{CK-1O0vC8kb_37Y*Ebud-gu7ia2j!tCp_h1PYO!Mvp2h?Y5-0>KR( zB9!8XzawtpWEQfD3tr_IKIV6Zlanb)?|y7EDkrO2@N1RW;iM}!SSS{Z&hN7`Xs$CGqLq$KSo3& zc1O8DrNqI#cwOud2MUh(p^D&g+$8EN2i#2BKob8vPVE6X<;_$8atX)MwN ze#9pzBK|l``7{=A@fDVwMtR}bh;CkqAN=`~^fVS7Ke7$iRAF3U9Kw&!G2GAltSyXd zcz9jsG*%A=)~NqTdRI}niScUuNF9^l*Up>P6An1mv%c~R1AbICFJZ6rpYcv8w_mv` z3HtLpL9m6kEhmziSCPS(>njcnY%>0|y2zQ@ViXz2Jtb4z4t^jBE3&%NE!CFmE5H}P{kP*6e&_rnC5Gb%=b*!98(__1 zD~;1FrJaShkx&a(Ri$(DaxR8=&fl+0_LLBs19aiYWQ%XDlmNOfe)5mJ`6EE3<&bP9xh69s#iq>+~2`@`!qVqEcYm4jI*tIWL zeejVi>&67@{c-VpIcsePU{B#gO}EVV#fa+KyfBX?ZG~9jAu{Y;ODiY}Jji*{8dj+0 z{N{Lh1xt6yQcGg!O6V6AC67kCbVW2c`&OEl%UUZ`m@~ZhW-BP$dTcSS&;4bqRQqzXL&L0MMc~=qPR!b`+M_Hn zi&a=O1e;Lyd#hN@@Cro_Q}D%C9BK1{voJ8(>L{llGnZDA9IuvNZ(+4c=EfjQR;yJq zoZTcM!VQkDRaR3P>|0Y2xHEMtvcxtSeM;0}y|3cJ)lolIj6}n3AL>S;LAa&ms~=kp zs8aS~&iL`MzR2qCc$gARK2)Pm= zJr?jN$a1{R5^3;rC)iY-&T8C>{rBpIdEi!$**8Y; z!fqNP{8h7-(!;gv9_;5c#o&4!#`hVNi;m9zBgh@a(G#W{*?w!fD+c02#B^us}fjb|w#j0kujv1|M4qHjkf9N5sQ&HzWf)WW&k6{ob>%<;E z@#t^VSK?sTCDvT)5Y6YFsD$<|({nu7}y_J#2Srz%jll$;$Tj{>a0 zXzN9`UcG(c?G5Z_=apDj%4|LJ^B?*||1GVj{5X`s8xz9o{FJr4o~4Cijvud8QT#y_ z)I&w-qjf3OSCFfAOC0#%Rwf);b2z&a)u5<4>s2spo$j%&G71--jGx108!TRIy%>mJ zfa*sOXb{L1A9N(if!l{PTpX6!%|_|Sbzt-OI^qkXBtBpX<4%;M}~8`Ny; zVJA*EP@K)QF}D=4(K}_fL0twv*+}o{wpd`?Kj`B{{QWl--7TamE{##&4d+od7(siI z4Oc<>qQFm(Ko6c*K6usuCY6n<4}Yl4O7Vl27DD{sz2Y!3tN|C^^v| z_-;hHQ3C|Gg!~%EMhy_$!_GFcp4L~a9h1X7%{n63;p>SAPQ-31rs}=ldXc>^lCDwp zB?p|MF(b`KMm0>NR*@VqilO>&qF9lPEAhkHp+YIDH5^`7DBdRhU~ZzXc;&7VlMG>l zpH?XCI8 zStqUn_=M0V(c-En@5OFXSGg1yV>dCcwK$F2B>e}IaXg1f zMdmx1R^&D@cN1(63U5y1*hk5i*R2yNaGwias+(A2xJdzb43a1$rWpE5_LUq@ zddtR4BT_IraJ<&L*};n)LJQYq-tnu{$~!jJE|+Io=}4}O$I zI0Ho)R7DogW#9tZW>r5nz3%Q^+N@Hzksm)}picNHOc?AYt4X$4fwwWaCzA zDR}OFnB6ns>B}&wv}00~-8yHh{@~-|W|${ zo{vmwuG^!Jox_5JbKpT$ig*xOM~K+#rYwx(0rW0mOpIuXbJ&1Q+6z;CG-}>iS}`>? zSNKtJwJeUpY5Q!Sqf+>)0v)>(qxB1on3K<8(}fq~H^|8zwrn@iougvGw24Ou`})G0 z186*(k8!xhjMd+^#BNpf%}V^3Zan-J3tOl#(ZPaSpz;>Bmj9-DvPBi8(=Fd6R1_Ib zKGITg(Q$oC;r?r@*%nn^3oBGxH2%OBOu{k3H#gmg@u(<%BAprfMAeJIE3upCE7;$I zoZk{_2YVsZj-AT{Vb{y8bgl-$5Qc0}@W#Q-Rz6q7#l1!Nr$+L*d#!S=svSSWIqY{v zQYD9UXr9Pwn?EWO)5F~h^Kwi-xXL=`vcmoMLfqnYE?Z?eZk(P=0oS8rRIw@Qi51>U z!hwTWxgQb=5?M`e!!Nw6?fj>tG3?J{k^HxY!+FwR;{&Of%R~w95cf(=&QsAa8Dd^m zvVcqP>3I}pHTIB^qJlSE@RRYXD;!0)&r`!J$1zAp!Dv|P#{AqpgqLJ;#cgz#NU^H& zCGx&VdqPn(W<*bolhBd;_&)><+TOxmzG6>#|YqDmYh#{hEU0s^H~Ep z5b^7~si>ewVLoUnD&UD&*x9ql>{8#=D?J{Iuucpf#Jg2pVHh#+>_rr5y>Ezn5@`oa zMCYp$L1Ss6n&*p8zhjH7@C6ch8#+?y0#VlV!ezLCa`HUwHY-QMLA)QBT)@IKFyB=~ z6sqI?d`23-Fc6VdD<-MIsX2>y3mmQ6N*?g@OC3RY?ps%|>Gy*o!Qn?)FT$O5>tCQM z$EgQ&$O~BIaH_QnDU45Wr3+aYr(9uO<%6{d?UhH8^;qqD5m$iwkqC

UNRp-xY4KC|$&YjpKp;a1qA_#E7Gz97+A{FwFkRMQTNVzP+Mkleq7e zUZmm-~r_{k@n?chLn_xmY#g z`W%d;>BTH&bIJ|G!Z0Y}ezOw5rJ^l9Fx`TrBa-17*72@LE{1l0u_|SCV+uyGo)Tcb z#2nLCg0K>!Y+hOWY~+3`b_r+DXfeQJJ(p;SkcHQOupnH*<_a%*k(Dn|p<(&KZuk=B zhl$ti+LcSxQtSOpK9UXk(OpYq7+zilnZA63<@S29o<&P{Vg&jKHuBvl#UM7PIz7xI zPndZhuwMMYDJj+!ePBBBp7+0s({(5HlvR#jNh_r+23{|)kh1o@O}%@T^$7zChfbm- z%}cG4vV=TJy2nz5AH2X?Da$(RHuWsk3bcjxD;UE>wW?+vp%bsPu28tla;3!YN@Hyl z#*q-_GUWstv;X4se9CI(!MIsfneoy?RZX#Y3Eq5D55mUP4L#+mS_Z~*m?rh1i^G+e z25x+5B*Lp%uoG7$IF3G{MAt2=vZmHx*eJ(v9_q6rT=hqyyGo9SlF+PqBwFdKys~J( zR^1o$TgvmIsE;=;H5-z6Q?8(vhL{s3!7!vwvl`v39PxTg*oiO{9ccGTD(hq#`OLU4 zN^3|}fL}MjV-l7PspslvIK5A{MeRNiD9I4Z3!fA{ncN*xUE;lfT-o`HF4l`Vz$3SA zeMR8D1jfGT@$7=xFbl8*cejRF0E~2a+YPIXJPI$zkJWAmOolZwq2ggMR)!_E|8zDg zG8GH_mMzALFwTwevkH-nH`maw#L!Rb>(+_feaBrbZgaFE=GMb&}0isj0MRBkbyxb|>cC zmf$4LaIzW;x+1!%w#5}cGwQeJIyGrU=;V_%Kf>xN;i?UrEmy+8=SgrfBmrg-Hu3bY}TNNjZg+W$btEIMEUaPb)bzY3U48IEE!?BaVlaJ!5O^w zlnQP|-)o6@xQ>C93Hy(9TRUR(Nb5v~?i|pK;VZF}cORBS$` z^dA+nQT3WZR6uF8sBHYekSup$l%<=c+evwp9JXS;7uvMeDlz>wY|_(FR*gThMpu~D zsG4`~F3TcW4XRUnRKrY9Hg{qSp8KGBvSq>H`=`N zXGe7sk{`BYqpB96vVru00V^n7hd;f_!a!OxfunFKdly5a14+ zR=F&u!bxRY+~R17tb1>UEvY-Ea$=H&t9?GEiiY>M^&-DjH2t9F`!OKIKmR>Nz6hx! zU05p`N_{q7g)K$wzqk<^64b9hj>1R1!7MS9{a#ekP^tM-ofUoHnH#iv8e?!$iw&6g zEp_L@Eb^W~owplo?u84j(_p1e{vWE|EIRJ1+W*|E*Xp(Oa=Z1Y$KTD(?NqoV(W4gq zhbzyOV8_*#6ZCK3Qj#sZpRTq+Qt0tAYLBt<3)M4*_5}HBigN6i=Hu8>+&7zv-1+ zndLEjMq6h$q`>GItq+t=i!(7k&JNyPN*8;0?N6SGA+T}jbw%YbT&P?1jOG^KpD;M< zD&+z+IAN-O(xa=cDSaHyqP0}W<%WohRG+==$7?U4L1=Mp49+C-XG6VyC>ZFC!*LZ1 zs88n2&PJbMJUwosXEoKxV0lTN)wr%;z@;+y*yXm&G|suLq9QZScTw_;b1DSI-B>Y> zERM>Ft?h#0(rum9&_hAPI+=5c6Q`$~ebPA|WI415;0O%Wii=0Km2?dmJEx&CDXz>s zdM+@&|DYA;G#*GT25~ONa{+$+q)HIFw~{4p4-=aFSX#v4eEnRYnbjieu2t1}*;QN1 z<|6tW7&)hr;Hk(J3D0SXqdjQ7-89G@WGNh7u)l}9J@$(dK3Mj6ui1G``VmA#=hgWu zuJ~4*k4Eq?;XcF0&#RFbi_*wuc@wLYM~0`7s&bsCZ}o(`2kwLlp(o>!dDhOyWYy7C z)Bi_8x@8?zh9AKzOqDkt<^)sn#rvQl&+9k|o8?eSKQA;uZM%iCEyM&8=2L-WM;A0t zE?q{Bw-7_{p4zz51v-&7nWa3G8TU%&OtCJi3o#$)O*3Ql1s(D*TX!FGt69^1h_coe zVj^oS+jw0)L`fMr{A68g@CZEJN~RG{TUE8^&%%ev3R4_>XjiBZml4c; z6*|j3)>5I+$>k}QkoK~Edn$C!cTYl^ImtS=VHPS=Xj_F`IosYSv`ldXD2Y2sE8eJH zAMqt+@fK1gs-#?_gBIh~jmFGB&t_l9k&eMi07QUFFL4WejtNlQ#}33g_& zq`}Fz?D&!p$6J1KDTWie>=mVp`imd6>XOzKhgztxXVzI(|7yZQXL<(P=d0oz{IPGOL#l*wG6yo;`^BiVFg^ zyg2J4x)7t|a$?sgU6f($RaCpc5{osb+UyRiXPIR*3s?2G3t-k%+~)%B<%Q_gzJ1oY zkQl}138U?)f^#(o3Fi;w1E$z@WAVEudyYx7xM^!=sAAlL)Q7i_vpm12%dwW~=mCE0j8T(8e!D zFWhrEE#uhwVsv&Mr$!Yem!P7m7o#UU!Q4k+{bEcFbdVcIWdlxsuc?NsLASCDDygh6 zl|e$uaduIw7ot>Bn?Ys<=TB)@N3?ALn{mG1rD%{Ffz3Xu5A;;HAc{-T-P`XxWaIiD zPjhV1$t)CvV?`f4*3n3=UWx%1?kmp~yy}ABwD{_!m@9P5AqXyM?wzP)ovD)aoBcJA zKnT|1Vhro;Hn;I8db(M4fd?NK&Lu5Ou4uPp-BxoeJgcptX$0yEHrj}RT)p}_D;m*_ zh}}A_G#*bGQ$t-k7}hQoRX~z#)rN)?B41ux^~UaAz$q-WXTorO?6rjLPI_BPFFLi^ z$-bPNL#L~k4S9esn3|b?(l{NT+B?U`M;hy>%ZmJPbWqnYIREiT6hrOZ5jIfiv- z=!U9H@rs+Ra#^FA;YLP zqYmJ3)q3i{3F9+#xLTNB!^t}-w(ndDV{h^AXpg=2thDG7(zI;xAp z=&bdWj)=U2H^nS1&72O&?=ZU>EJWMht;GA)u12R&cI7l5y&9b=&`vItJdKXaxRO_I zywxVJYV!RQVa-;qY6dvjk(pHm3PC(uqn;(v@C&t`t1%0gx$>35JwYpL`!qqnTot-z zp;>!^}-|J}+WWY;TUf&to|?`euQGiEl-wkMJZOf3t`L$d`p> zzf*1U%_9HYHwy%gU$K>}fEtfg-yFThz`y=x;f@;%{oVW^I2ubULEx*!{h9j0ZoN5f z6L}+dF-<379_IzzQ<_QQ1V}((2QgFznv`rCr9%Q@Z_x(8_9_$#M)R8n2^Gv@dy7Vk zVxNt_MF=4j2k4o6i*B`u7vqyrc}tAv2m%15+)LnFV%}DeJ**~Vzg}FOW=&Za*y1fU z;!%dJqraC?aPGY&rUj*WF4}=$UV<)DiQ?MZZCfP@q-$LxT(?I=DVzs0vKm!sh#fzg z8GCtUj;(5q&72DZyF%rPimN`9R1O(2W?{-;xdC7z0o-=mZ8o_Y6NbX&eL+2xGY;jJ_Cc3XG zyRlpbIYan*owLmZ2TJ^o845^jq^&G>Eci@W&W(Jjwxe?RD8L!zz0uPfZ?Tbk179ev ze8ooZ)eZ=&^|5dN*WL%qd_rls0xV6X31z%&Wz`5d zFK3Fr@VLB$I{kF^;F)XpT2=kJ`dX{$Uso=+dNxkHnptZo165`)v)e0QvrT1ySsu9e z>XbV)Z|nNIxC4#pR+hihPr`N%P~o^cDn`JaW$P+Ezs%n0WtDw?3-<5Li^^k(uvQ0| zi7@G$ZL04z7_ovJD)-OX*1fsdPX?QCJL{Zs|G_(|(;#5heSwFKCjmQhUtnhnCY1gS z1+B6BwAI~LO{~j8nRxV@v(kMr;2ENGjNg~9 zxb~WjDGTz$&y3=&fsb=ATS{*Y%z7Lq8&?f>C%vD@>Zj%)1Sa3ANsBO|9V=(vqO%Dq zZ;kbE*8!VWsl^j=$5hQyPD@x#O%`{;`N?h)^^9&dl;Mt9R^J+HC=~BQBf6Oo+_ulw zvYN;EdCt~VZDp3LLDdR>xkP-W_HM(@tLW$h|5^04`&Mlcd@REr(7teAx^)pW_ zUAB+fOJA~^YHtR0s9}P?G{X(OrT^!VOl;>=x>MF20YfB?3KF-Fy)7D!Z}-+P@-`s{ zMIx93Zxasu={EMZz$iz+0%I=T7ITa8dg*OCGfra0|F%HJuG{vB1U2{oD|S-F=9f=d zSxvGz6a=QCtQn4gX(jQwW>wW@0kNuU*^y$)1G=q_7O%G*6>;k(d%LFH1!*I1*8+F7Wq%pG zJ?0(#z%iu`?y=(SI^jH+5a<3$E2%!@CG5|HVg(F0gY_b-#M#-oR@erf!#gtgn+W#HP!4X zTi1YYBqXe=BeZ_EwN*w+q1H*jTnW|{DD(}P{+^1#h`LZaZ+&Gc@bN%hX7||U+hei2 zc`ha>pe1`#K7Qn|ZRsP#5VoyKgf6fhRU(QOURh9$w0Fc5pjTic?+|i0d;K*$`VLJS z9@~~}>>Yur+fYM!htTOnNGWUZLhE2XG@dn3f$i8tet>86Q{EwjxXJ&VmGyO}yxdk2 zNO=ERw&~qo@W`i$YBskFC0J7${uSgv{(l(l9+zBAK&hwuv7Wda$Mp?BZ?x z9Xg%ddzRCbErEi-oUyjj+yESD@6bwAP-nDOyPAak_+l4q9F;rq)z7{h;h-9*%w5N9 zGyh%mXkD3J#!u^3f(^>x85n@u%Jg@M?PS``Q|D`a*EaXU7$^%y;a-X=_Xoz0R%}}T zhW32*{#YZpIf?Lswfi-d;jqF_GZn=L?YuRzSvVKyJ*?)JOHUT8sZMzC-!(yWyFFj} z>l!IXjqB>`!_5?ztgV8D2htrpYn|-tVNSB{{i1eQ#UL*3-k!3FliIPqTDwdK2U+XJ z(oMFR0GEj(!KpdSPvYCS?Z{S6`Ej>w*v=1htS{J(h7vYlXl*oeX}jphMw&vDtC-km zGY~j~m((%+?@vz@I`uy;1Fh5ytnG6><8hVo(Vnykl_~z?l{Tp)(fh2d&jMmWK7;|GUkpr2HFoDgc)&^D!%m*D> zQ&HF^ASsLDAeXO}azfLqtsLG2&d=@SY#g3H#nsc)@q%-4SJv+3CoA1!g7*c4I|`DZ z8B6cx*W0H4E$R?Q!)B~Cx9N&-Zd=*xWgh3-3C!0!J=)23v7^Kk;GIGc+;A8#Bl=2j zTBDh_5H?n0?~E2u#_|x9xaZY(xN2})m(n|R^d0k?=$(PvC0xi(yi)^{4^*2J%z2z! za-ftGMlMaOWQ(|1gI=7@uk$l*$X$J>77sgy3rP*&s>|vL15Lqsq*c~XEeP!}YR%?( z@_4Mmrs}!z$y)XoEofU$*mt;ZxK@^dRo;GQJ`@(~yZ+ca%AG@c+0B;Sskl4b?!7ZG z1KHEhR*%k}agtz=P2q9HHdVja4-KcDqND6Dr>*S6QCIZc&X4fCvZE>yXZl?_yYx^R zd6%aAtm_oKE2jJ^mfd5!f3vg;r6GowtfV40piDjfE*(v$aC_qq?uo3jhxDY1ok0Jm z{4T9?Z5QC=}$v*`rE%);U29i*;#kW>U;wfw@<0%F1XyJBWA8E@$SbfdW{a5EeS z&4hdq=GeQ0X;){qb$y`gnk?Q$@c@dRgbx z;=J`UD>%O#b;^RR^nz_FO_ZwhHWau@_z>I5`lyB2wlZ*Oz-;ZPUuV9>%jZ3sVRyP= zBkzf++IPT4-=m)348>#RjHwj24UebdJvtbPdxrDflCm%f<9R-=57wab+*HnAyMrfe z@;yTK%Wk%^k~luMN;ZGQJ(5mm1&=jHbUtOdV2e}jJ=)J#{8&>OxgO8c?B}sXo*#%R zv)EBE(NrrQwPsDpGd!r)vqAJpxPfvcM23b0YiAE%@e1n*4xU|3TURw;jr;AB4dB$% z`k8h{=iohBk3_EJYU3?PQ-^tHgdjoNYLl3;r|7=Suy*FkGPmAmlrQvFE+az3`v}}q=)#gZA zzHDV>!H@O>ucE9&5kGob?H8xysH^&34O4MOK#SVNT+ZrhtGNC4+pQrGi@Xrw8fQ~3 z2(_jvxLxkrdtK# ztCRUUJu^_7br$$$LYd)BIm~kYCU#o_+fq3!0c|_Wxv|ntrtjhfV(*K|#cj(u8+o4) z#Zid5!|3~BG_PT=s0=nb$gSR|-GyHw9;ELJT<+ue^l&fZ@6(3Zchn}bSa>a))c;7r zt(-+(4OWz#!&-PcOS8$(T2;MpSr>bzmRYW*VRf}voZykiVsEI275wG@FLfTbrs~WG zgK;hE%(%mAu9$>}l#%yNe$;E|QMGgQJV}Aw(U=K((Xnn8+vC!>-ur|ME(wgLVS@Fu z-T{Si(pTs&T(C_+DfXN=VZ%%>L*s4bG@P8Z?F7EhB|kg*e-W_uK+G-81X(fifaaY| zj!g6cArAH)n$iG{c5zptM}sWv6l7(Y5{qv5KK# z27>z^|8cWzCLA~ZAW}tkey+hK1tUk9@X=sv+V%rNx2xGibeWyqKIQYu)^-bcm_r^F zBWr;iG?6e~TNBno>v2pOm|mk}v=(@A{o>MEkSKfb_Ou&ucw8k5cVRG*Ae|htuAm$FE_H);EZNN?!tjH zPG4Ms?#y>B%k70{zP=XId-H!;ORemNRcj{zHUhV8M`J9m$7|rFngz5id)M3JShjwC z`8yrTK<)W`Y@15spkmI3D&;{CxHq@dle;Rmt<t07l-gU4ggX z(f4bzI3t--*gNkJ>=o`txKNsm3s!o6%sfJU53e!){+Lo`Dr{sY1o@_$Y%;;Q8&B6r zDZf7!fGf#x;w9|;uxF-~QE&!>A=N*DP(jTwCf7!(>q~M^YpBZH8&NW~RSjX|i6v_a zZ14%z5^TktL#<~8+Fr1h+SoH|ZT$_=iEyuz9d?~7f4+1Dw?vrFo?5@CkkoyBA&YPs z)Ie?W3WJDNl_iwwhH7M)t0%{L7DI*YOnY3b9f2ueh;tL!2Xtd_vyXg0h&Ycvf;(&( z{Xkwc9#MHrDKA^MEO+3bxj?iBdN~6|_%1*-KV!UAG4+JJ5th+qS zE7^nnOE#T7V4s9%X4IlwywW%HfSw%r$C7j9isLJv{x~Z@?)Zt)vHzOW{Yy@fh~u?p=WZ=`k6k)Yk83H z*!kQdn+cD{@&Y{@s^GcHs10oi_RV-Q-xloP!{Lm^b`r+StegDW2Zf0U7crs_1*RCy zoSIAU$37?oF0bezTzoKadjtMJOCQWL(xsq>`}L`$sz3gv8TtHQ>^xHA#N<43KlKW9+W=;cg6N1;&< zg^*AWK~)Vj61wHrsBPxVID0+9Lk?Az+YT;4wkkQ|wu+v=9=T&jGlcE}E6P5kdBDmX z?az@9X}&HYT_AJxLxCE1ix?i>#y%7n^|KvqiVww9Y_wT#B^7kd+4>BpMr0#@#+H?J*Ac4-7UD#Na|z46G&#JvInA+D`uhYw+-1ZAg+ zkc$skBXjOR;=*Q97mED{tf@l96sv7b(4Ygt-PO8EO~JiuWvOKzkL|2H1GSLJOP8&i zv%?6Ts_117?kD@27+CXI=ne#kz;3*LJ|sRu2t}mQIm~L+8*M9_JcV-jwty>m>$2@+ z)-ldn`*2{zMZc#YrAPD$TQM}2Mn4>Ljf#zpDaq5ziVy2P+RNH0eOQ-m$;Lk{y&m^3 zBX7PFDu*xJCv7t8)H!d<0>x=-6;;1_gH3-}izN4T;nD*}X8$%EF& zQg^so=Bl6`fSujU|LaRvz)H64jw9BUF%P{7vmi4n<%+d6Zpy^CJAzeIZ?&#+aA=JL zn?>+m}Pj?TtrG2$9>~G7*hi;gO4$NxJnXyoCjkH^CePxQ1=){LE}og$IOWbg{2_F zuuz(_Nr5Qv67js11){*mD^prgvygzW4U~47tW{MC2Cma`wFmQ%g~jQD^}0${e8L;q zaSvFynhni43E6y5rvc}C$Bj3$9E;r6)my>?T1!cs5n4OPfO>RC$H~%>MIAEQK&VwEF0U+Z26GfNDZ@BQ|LL`mLQ{zY}*=@Kl#FO+s=cr(5Dc}eMI98 zXS+=4$VYToeV)T=^dp*dG+f7&bg#?BN3^y%>Np5XAJNdK4umi3@s9-HJqK+fE5GVh zHkoDo(Bnoae?;4%?oaAUw$A}}&Xski^?fYtX2Duj{XsGKwAGXjo^|~rl0)pegcr6N z37TjraCUZwMuacd?rN$>8=rx~&M_cZVX3X>mmlf1otwq}+7&58r&%tUKcy3|^_) zb$+GQ*JHV$`rgPwkduBCtg3n)<%nkX^W+L#xopN`A`qtswFG6JaUa30buDEn7Qk;< zk98KRiq=Ueb!dWarccmNFWYrrvUIzj6A`A{;q_;r!oeQd%;v9a;0+U!i{Q4@Tmc$z zgrST%_r%EYCxb6EVx9)-m}}O^16A!>yB*KdPO2crLu$RD%&MUv zh2Q@zb^Jkh2!1LzuvZeAdZP|G) z>*-y{xJ2U;6EFe@)UwR0Ib7?R-ouN!p|%xkZZ#|8h=S|AsjubBs5N5guCLHJd)(Hu zZ&RQxp&jyrF$?jCh92r9fH{%g1zwwR_To_7cN6qvs`5*WIxU9sxuGSVp($V zDdu&37rZl$TiMrRAhewsl^QIq&c|bc`U&RLigh#d{2VR!^5ZogAgrGs2(uvv^MfJ6 zbWPx=QAHZ6e{N-Q0hh^JDz}K{CrHY2uVy9h!u~b;MBoYIS)OkrpV0ha8spwO zMn55RAWVawJobse*~asn6h-D>;R7SJ^a-JNAO7AWD%iN-?D|cf_(WjXHP$8tq<9TW zx2*I&r+6!$h&}lF*W0uT7gsJ>HM`?x&6KaH%wc?`W%GB5Ij$P+18h~J*&jA^O?|lI z4qPkgV<}8x*HyzE|FyD1+==R4^kQCl3x zp^qkNYxJQ&A~Ze3+1Nud+b+u~lsxC`aTS^0Xw&StAQlG@s!ga&fi32wk_UVzJtWlM z71F@ULs}Q%s^vMGRyj1#teWL`oi3iTT7JzhV0~`&hjau>B6`^x4+V~iVzZj<@40io zH3e+uBA$F6(zJ#OY2452s&eB!)=JQjuAFS`EGdgrr`S3gUvb9@@7!*}e+@K7@1aC>_rW#wuU3mh73o>I{D(~!&?T23cKb_U$&gbGur|I8Oam0u0 zHT&B@YX;d^OU}TU z5bWcA?g;=k4X8e<4w`9{spx~1$;Y$+uzwjm_jgqQ%6{%;LsRZEpq|j&9daYneuA!M zuMf$EcvIC^2ui<}!`#oyDl525rS;#jZv6P*1aD_`X2zc=ovaRbyPK_}ga#!c030$r zu$0Dh1jVgv;|?Ca*+v9|QX8p6w3RtWcr$IQ0wlccWCQ;%k=D&V892EccIJqZHQt`1 zpN!>(OmEj;&DbY(EzNTT7s_?BO>sD^^hsd@juv6Mq4z#1l;ZosEfptJd6vhEO(viz zZmY6dT;hpY`D9>P4(+D(zdIkQ>i^>0A}gS#x{&*j?UUNHi?hyZU)LA3p&u@-Y$BvqZB@DYO6z1}9xkD)fA{XQp8j=Vl=anYfxpZ_ z!U#7Lo1YY)C=H67SQ}gKtvtzB860RSYn74Mx5RM(c zb4FX5V%I#Oeld9~rvX%9316Gmh z7K2Z62Tuzo5?`yTG+u$PA27^$9JScaFP%V zNBLN92}S!N%v>w`>WU#e$l3|p8514#i{}($>Sn*39ib_b)YT8ZzAA;r!5{~7%>&JE zCU97MxG}Y1ORLu{i2bFpC77;^w5@7$?iAK`^#2@FY!7SpA*gvW*+_nJ3%KU*=)-}G z%f7nC9u8#S$l3}eS3qa!VKu3!4{Vr+gjXP>#fz>0)|WQgDA{KZJREahV@74`v@VN* zGAr)zcJpCfc>Yr5km0}`X2%v5xt_LEG~P4Y%5a_lG+Z{0-w%Jc*{5Rcr(m>xYWFJP z_89$?c8gRR^Vj&O~mEDxLwA<@Xr?%||cRSrxtkE_ts z7k9CX`PZ`C|HFb@*Er@fWME4@Utm+Tv*#DQvz_dN2TML@U6sP~y!CdwvjSPx&q)b~ zM~MA$B<}n?!dGsp-o_JbB>`mLchTF}Qbuw2?Y1p|8=t4IwxgcLrEH%TDlVbZI`Zk5 z;G2*ZjppCyc@~a+IyTEcjl4dswc$+IIkr2bfl4FGz(7(!iorWTEoQ+qHx=eYe$A!$p8RS=ArR=N8bpgL=#F72IREQ{>f8 z3)K*7C?c_Rn+fO$7m<2e_85KZ0_tL6Bo;YfZZ2zQmv~0{8P`!c2uUmyx}S~}&3mVp zHTHq~zW#mrO*Y832A{P})hp^N2%(3cj-@OWC)?6jaen49=G5ESDR%(2BS@1sm^~6T za8lxHZRC-dCa%v=of~~5=96bDl*yPrIBmF?ibn#88=qgW(j(E=n+X*5NMM-=V5pwz zU!TXfgcbJ}vxcifCt1JA^%@*SF%!IbFuF~-u?Pcr4 z!t-Zq{}G+>Xzhi(caYhq6@+&)p`cQ3!z_v~++YH1<%AOP1veG>v40V=btk9cb+0x1 zOu!H8F4vM8`ONM@h9ZDK_>9o^73i7=Z0s|E0Hop%SfPrD--GtG(r2#g$2vXY=6>U< z>=vS|$BEAbO5Od{q~M@g0CAm#h~>g5Sow^u87w4JhKv8*R#n`;Q3OkPTX~4tcxdgY zZ$37bqc-nQ-o0DdZRc>ZN0f2PEw|W?GUj>a zF)`NG#7yz#9%*aViKXrKHKXl78bpqbwRQD#7`qQiG7Hs1>F@z7sbFz?#m3v(Wsp4F zIBi`fGP31(o6N>s`MBKHF@2u9IuwPo<^_%qn-;_a?nO3%ABP&qDlCI@{=TiZ1A88U zj2<=g73|9gZM7Y13C~MS0SHI=2@Ce#dNv0O6CObhe2QBZNNsg990PU)6`#qD{jQ)M zKW4qGk5mttp8h_Jib;;pb+0$G2m*EVgEM1_Q+O-qW!Zh9`%c?dztE+&9f7+2ZnH;Y z`mf;0b>vaa)t&`R(0Rs;KB|FFt@yyl9u*3g7ID+3G_JLw&K}ivI&b5T3M;RHQnLva z@put7nZ=iI=vq!NcL1`I>0?}I(~kxg!@FYiwyHmhv}r*DHA};jv3jNtV^r73X2VW< z^-&RA~v_mGRGOvxH5B zYxYbiZ`N;rwG%tJEZ|%WZf8|dp%qA~=`4gdE~_RmxHmFWt7Y#_!CA`=@XqFNQ>8^0 z|69-M7*8+56jHj}#E7xxVB-gFXbxliGhG+|@KdP;>I~Q;_Vdy9a zqb|a=X0&Q?R%i$v=2kUx0+=ZTbbF$w9HQ#=amlopTmw@r6KWn)jE!pvhqO&f~(w z#=j7&0OcW&rgXt^Y?DfZ=OruW|969&O16ZrU+2tCD+`huyeWMVfwR@rXraKg^)JL` zJ}_+!mBDi~ghBQaf#9MwcPm_W1K%}OAb9I~7FzO`aVv-NRlk6({e@Vxs4`h6YriHG z$huh!^m5tv)Z`&79<8r*aRv^~AR+UvJoEsJlvx)ZjBA*+;Znob$wE%5ZYu+7huS-8 zbXPkb{e;JK%rVfB$28|*UoiTZFtd*n0@lG}LL<^>_hVQ*7IS;>sFhTG(dDO)=|YJ2 zwOmPxrn@k92xFUJ$mmdpE;c15#Pd4=$gq9q8*&)J%Rv*(0DJVUYQd7x;-0@6c zJ^Ksm4QfFEizyb}YJ$K*x~aeK+;ilBtz|o~(9k|wS0UD+wN&Vm@4&Xwi|&oKqY8-R zVU=4~gC=lMFUJyG@Qc>ZPfjU!L^}|KYj1TOpvR=|;OA`UyTY&<3eGIbbSlZa9W9bf z?{&UT0(2cymo0orsBo*l%Qo^Qp#qi6SytibmtrDeksv?)Qs8pqlbmp03S7>FaIK_| z;*zW6jjICTLXh?(EM8r{izk)E9q_c3ck6g)l**R^-?OM91Av2F6`$AfkAi+X-;YQG;412OV?lSB4*I;*4U2LUHl5F zG`0fYn6~{T9gJc@I){8GC;rj1W?$CA#tn~+d^wQd$`SAp^g&22-%nYJm1V`3g;2M> zgsW1@a_$h^#=k7oUqVg-|KQ6y-Os`+RkGMuwz4X4<{`AJ?6%^5O{TvbXq#WLs@j7& zh>%-VH=crh@MVo#4yI!!8!AamLXJC?oQL#lW+zdnaF3&F%E3{bt!D)!oYqozbC0*P zFL+2KEXS8MX|6qheg@ZUH!Ggx=;5_jH3s}uvUUYU*6wkZS|md zZPhxe;(}o7W(W2`A@@3gxTvlpr=J5l2Db(W(OLEg%VM(=3+@#q8|LinbN5kO`oQ)D z8hbl?z^U!>D)c4gQrF@bXGMcmpF}Q9dJt_-AONUkU8Q4F(pPks`J2JUzoK(S!N9=QSE8G*yTK+ihw~W?L8J}>5eH>O9l^PLETieK!~&^9 zGF3Cjb*T|&*RtI*tEH}X_i>e-w1%n_C)i5aJrp`#KaaPkHMLS4cV^?dT5$)5 ztl3tA_Hf_rEVj=b2zIhFE;ECF0p3p_)YI>tj#FSnHsbfl>LxG`q zA`lmGI(WM33xTEk^BK=CkMDBcCh`kKG`=REh)D~v8qY~9t0k5>l)@9*t|&VZU=$mH z>Fg8dKBt@-FZRw@P05Ytz|IqK!f+1n+H5uwq6<=2)poLSnh6fF6emA|ap!^Kwyxwp zcSG_-EG*cp?I&Vk!Be^5t_nL@cdpJE-Bm~3r;uE#^i;T*Ti~2UDIvQM6rDPVJ=%e^ zFWJmoyz|>o=@&n`@tAGp{~i)}TeVyZE5(O9IgQK(Ys$VF*n%8zs_v_Siwr6_s;osE z@N&c{3&gB&{1smfR2Hwj$x2_<3Wqwi@vp|94}hz$#-PK2GlQN~`F)fr3#!{lpbqy{ zAu0mY%=3ae#XocUH)nj7qdHS+&D&O25Uq-(ebJ=s))v*Ct+ zO(k7D&eql2qqEjh>Ns4WuZlo$l@Mp}gRksw&?VjMo~xm|w5pd4Bj~lhx{9|-=d%p- z1<%v3lg%6k*EDC#P#|4j7m6X_58jWIXVB#wG z$jh;}p49QTHyj*|J*jC;w~-urPijxPuXHQr$I=QU<@l3A-Zx$tUwf znp+CT;N>TU@4=U?k`>NF_D^TpsrahWcszp1kS**FFUtBj9y_{`r54~Mu4W_fNzj8u zu&RQ`+)0$JC1_kc*t%N3$a}x_WUMQkIYunbwyMAvW$kp7=h}hJKowCm4LXND`tR8@TLiq*0Rdavv!6=6+5->Jw+ z+(bVW&m7p_?+K+y=b|<>9D-Qcnvxrz14*iXz(Y2+)IVn%I6Ekt8GIaKxsxALzAd^r z4*b1B-S-6S01D#$-Olq&w}C!hGk=oT+U8T*p!;f=A`tKr9I0Wa@CHPfIs7Y*DUz z@ECtu+ccG=xD_X!)-rHk#et`%^Hxe;wUyO&aU}d9Rk977rEm_ahZi=UuxkEabIa_S z{`Whro_(!j+1hwoD+wJAe}b<*9V=o#oQKS)@%*FunyPaX*t(KS2m)GJC@sO;*{$Oa z%sOi3+Q)bZcAwsT^tv8TJW8#Xm5H3Ua7 zsLaS3nR5(wFG|~8Euz7iYx}4ezP@j>wannShW5@nN5iDMrJ?URiJNvnwAJXXH(5t@ zoD~lPJOLbb&H0{2|F5^)Vtpmg)CsI5uv}@bZ@HNjDobcH54&2YZl;g)+_u!n)mPYd z_JzkX=j%>>$pYEF7UP6aLDfg;J$nxyu+gt+l=$$$80>4Be%!=ztQ0C1-2Bql0*M~O zcl>M7J{u4pOn5(>P^Dn-f*XCs_4hR^t89TQ{O~DI$<_vI@VTZ1a|L!rRmtMTk6SI1 z)6-U02NoB2lQh2e|GQRM-K$^I{`7FHVRh70&4sx$ww8HP*=CZ~b7at$JStjW(+YL* z3+t?{cDU=YQdXMd4s{=;nG6IKStb3C_mws4YP^FSq-bKOakZ8Fv7t)VkTBU+4j-F9 zyv}8pc%9IP9TkJh#Yp8Dt;s$3WEy!!__@<%siV&XCBgf2mltbe&jeyNp5sEwYNzN^ z=^0@Xv~mrPClKffo=TaO`g55TT zV>R`m*i*N9g5xfEt2I>O_QSTSRPkTdRM`lbx!aMs3s}Fb3$$z0SZg`AQ+EL`n&XhnLQgb8``GN+sLy*5B68Qn4S$>?B%2zdp0JAMO-`^$lSl@_yH@a zfII0MSMt`IZ?y?Q}--3^To-5f&QoS$>9xZ8U+R@44ncj0}_!JIog69lK#SX!Iv;l7Fuv&h0l+j>@{ z1U#+qHjX|+0{3QZM<1+rP78ZZI|wnKjXW2qxPaxPQl(pL?76_yX^#T`oOa4V9DPG9 zJeP<5`?*iX^MgkeN+t<%0Wt05bDCbH7OW`RD`(~r4wuSqnj2+0fiLsqWyc5}d~-N_ zHFXB|T0JXF!QE>-r?tAThUmg8 z@?Oe1dtpk9Kd%A1Ns2qpnaDiXNSn;`Mcyst=L6Tpt+!g``M@>Q;|px{>CE^a|7lgV zUqJJNndf7{oQ27d-Ew)iHM028W44;fGDhQBlamH|P0hMnU|Uxj4j}|~b_Z7*)=rp2 z#pDg7j3fTO?ItAHHSe=t)^<(1zFIqp1bd*rXPlti)ZaD2@C=_9iJM0Ha70|EY%2jS z*I8KGnpDJ1_?^Pf+4EY#5ko5sBVW+MKfq+#$O|z~D7jzYoEd#VYm(ItH^atW$bIO+ z7o8Dd#S1ZEd-hlo)sV}?^`^eYS zKANt6$O-5%R}pS0ec6@I+BvWYw%&OmR+O_lyZQgw1#BIAIWx54&h?$V7Kj38fHufJ z%=zVNn^_UDB8FYrTV8W6t=rm#<2IekHQ;Ou4ulA{$652ZuQ>a<=J@0aGsXoy^7ZK2 z#v|NDU(Z7-!Uizxu`CN~76K1?;p>{~V;oeauLlb6L0ee=a>}2z2|+9Ff`6BV=eR-3 zO0$lT9erKv#$Be{^w)Ja`lIS2R!vCrz^{E>Yv>FL@{F*qEFzkqVy`rsA@jB>Xt*-~ z|IGv)UM<$tl|`2Fx+>J>xU*VHL*$^jt@19_gh)^Z`dYBEFt(->~AK|8ITXmcmhmI5UV(|o~m1ez|kaT`A z(u)blz7zGJUd)>!n2zbA*z2;^qNl04_Fg|zt<(z)b7JwFg6A}zWzmg*vml6t>t9c1 zJ?Ci4Vs*uH(*s`)n_{XtSf!}F=tdpb37y57*=h#FB3Qg3jv zQO6T5@}u2WT=orfRpY&@&O;5NJAt1h6o<7EiaQHzXG6HRvW}_{1;8~5yIBRTsf*TA zW3J4{kRGtU`gG-6Z?}Px8;_#Gl>p>jbeNz|u;{ijXZga)DPni^bZ+66c=u+?F=EGQ z-w4#qcwmK*Z)i`|SC%3K;^;TD#1FbL`PesdUf`;JLx{qf&acu^ey*V5#pyBrjX*v6 zRnE$v&=*08-U(M_Qmye=M&mA9^N?gdg_UpU)RQ(7)GQH+vU6@_Sp7zzZWd=~*&e6O z3Ffz+9oxWDkaFl%cm-Wm+eL(#x2C>$>^Ku^O`zcuqYe2D9T@2umS+Rd z?3b^wI6J$2U@>wJb@LNnetskOa^mWX9Dn`YiL2WnhwQ$4RyYkdRSp-ltbk$m*5#ns zi>rUIJ`pV`=cqgY?tDY&3^i4@E4=Y<25LB>ZRDFV`Pi_#ONh~L=8n2I^qt1?(?v(b z4JlL&28oqat>DaMJB)u*^WiFN5xsWen_3ty!drrHoBXC0#xmy1R#sgO*9*KkD&LIt z!~j8`(#3I>Pc>n}VseoIK5GP%XOAX4onDvZSg6Ia>@6c(WK=BO|FzU zD;t5Y;B`%BH-ga(r(9Xr-o;9)snNr*QP{fmgyK3tVaL$OKGhMyz}Cxb-^nN`At>C? z*5)|FQZaNj)&#t$uxQuSF2&b zEMqFisLBG30udh-f!=tSH?XQTQ0+xDQCIVS zPJ&C;%#Y(9WM)lp;LV({bq$g*ziy-364ZZtxwTb+Wx~1K$=cW#va!^waFguO8hV0O z+zpodDmc$U0c%2iImU^xnXPbH5rk-%HKvaowyjKKonhNqYIY7<-*!~}Jn9AZqE`IO z$%_b*!OV*>{dY56qc6tva~NWZ^rChRv&8{cD5r2oh$U4hUVPZbGkJ8%CNg;sUI&## z`e5Z3wFb&gHB|IJR*W{C@Rk`*HCuuq!&<6k&KYz)>#BgX-x>)hT#K(}TmSwlYv%vB zvg+8H>Mn#w)r1pq?=$To9NJ1B^>pV&Eu9m3LwD5?f22C~w>wYl6&IBDU(_MvyN=Tu z_h~Qgy0YLs;x=007id#JFkW)4zG1fPrm+js)b4d42rDf#w&9;acN}1 zvva8Vt$tf83SFZs2r#~_TRDU+JT_q>2@rv?FuF`YCm;ws(o{>s+jxp+v5EywI{JI= z0#QGdoOe}mHAFZRH;L?L)#tdMG}QlknQi7AU;ylbykUMaTTrLAmEFd)ix-vZUvWje z1pGY2!u!m=qr>Vvtb>v7=nT8*CL7JakGLT4I~pVemb+*GTJfEj?K{zJ{f-cuhrW#` zh#OvK6A6Ml$R@uNvv!Io-B6rm6-27Z;GY)aAwTv*DOq20@$y@2pkX=3Z1Z}roF6h&6LZd1*~$qz%7vq(%UE_)0`rnt zV~<9xk6Uhem5nIv3i~$N*UCG{%N?zuzAo2Xq%QL*vLfb-R#F;gUT}{3TJ=*dgPh1h zQ;0MtGd;_g%1U2yoz+S|(0kypP4{Edc^KZ{N2m+w`n}fIx^O0oJ7z?)OT`NhFamwq z=)XAXj#~cN$4e0kZ#72}mz_1$@_)Xx`jD;lV>ulphE6oNo+arzPmWe!XTtUDc3%tj zKVM~?9OPcN5$x_Z;JPB#%WCX{Faou{T3lMe8T}yZU2-2}o2m!3*L2Ove)ySD$n1Z{ zY}`@}IW*fAsEtSQD!HTD2hh=h47TrTXSh<1>q3rvH_+-Qg^hk!1L4^RPYLR7?7JFB zscOY{1I;*(aD&3qcXjGXIl#tMZiYQEp>ix4CkQ76y?AN0W#!B{zu{ghE4!d?vS|T> zCgf>#*{TU!(hjQ$#Kt3ltAAIhO~VxQ;u_zLMdU{KJd4$T1UBiErJ0~Iy!e_5*K01t zSx&KX+7h22hO@&CqWU|W1osDkYu(u0xjX~Tenj*aHWUOXb& z*!KcY?r^U7o^Z%}^fViQ$jQosA#LNT!wZ@NVj=-B8ytixRG@=ZR;w6Ru#{L76#-0@ zZ7!&3RYd=viq)+6s#jX=d$A@Wh#rHlC;YQ6j&FP~hF|9>{9dfLSw9(?N?-9kx+Xv? zpm9BGf+b$RDsye!@sb&Ct6`n|7<`PJu1bZw_ulukQr{tsJ>?X4Lkthp#sXn}HdT8j zUVp<~P%j|$+S127cM^|Xjj14luFGt8^3wxM`cwD&F{!*@H+!Temn-={29;B$MEY|~cJ@{IdJS6dVT@Z`C|I-@kVR6P8U#t4xt^Awc-z9c9 zI~Le&Yl04$0*^Mqxwa~+qLl;la1U*zS$#-yHAncD?F#UnP$Ru8yw^Dr*^hnTC)?xv zAxt|!H?!QnsVN(3*agnw8O&C8*+aA0wq_qLqr#K!BsQ4X5UAP@v~h4{3sd3;y2spd zX!Hk~=ODyR#r>5i<)A7-J9(VoX;osQ0)msds*<7gQu{$Hja`;U{RgqoZobVL*}savAXf#i zc#V4jv1Sg;qXDdC&NAFAjygfZjSfZ_2fC&NCq#IQZm2m|9h#bN#wk<$MT)liJ0l5jaFLV2$w!~>g zhadT2jC&upGD=;&&BlHh_&ehIFopj8SG+u_;x03IJmH+1;mWSr#18{o=ySp^waLu9 zW0Ch4x|KiF-sLG6Ix3YP29}pNwx-pV8wXoef8(9rYHD^5`d|7JxYkIhvv4_C606E7 zV5c_)1@jTCFR~QcOYU2JT^+-)&>u9dgodI~JL{a~t=drs6$2X8u&(m~be$shBVmZp zgD#OD$z!}k{{i-OdojG?f#JZ_uv&$Ls?MI?I<8}F5?5=87KM+1MYWd}Mm%OCZgJPCF z)X1&@7q=Ukfh5qHnI_<|t!1I*a~qG?x-x1TPjTvHAtx)_Khj)$lsaMbiQ?wkM920x1BH_I`(nI$09pw{)j8wzaYta11(ExM1*?d--O z==2?>&$<_T`*BP)G8$A^ejIR%!Iy0G$I%lvy^bq>?8hcg+KELQ z|FOpRPm*&&aQyrVe@>HGLT0Bf{4A?PQA!+nt)-0Jw^_}nak&4ML_0dm)U*~!Wr%hTyY-i1dGleYyJ9Y;*#CWY*^RuTZY;B zGWXn;`g=RG&KpY^?#|kF)IdIC4c5p1&g;baXfS36!3MdTaMJrEM{o}2sEXiaNf+U3Du4nqc=;L8GNXEC82C+xQ?Bz{XPq@EhpJTMn5CEcELZH>5q?&!a29T7;fj0c+R=v$ zX6z?1TNSht!nxVVPjpfq16MzZS#U)J_TAV|0*i1x;A*JQIai1IiT0e+Fs$Wqm72l( zy-lb-mzalCo75MQ)O&felz$>L$1|{^ELUt`%BBUPn1V#8s-dH|-)Xg<2w!Yl#m}uP z5B(2oD0wR<+iEt-qZk#3rkZg-fo!gy#Dc57!qyX(Uzsl0wUz*!rPuABi21sA9?ggA zvVe{N0hBG?Rp}X6G(G)4k9G+6Sb|;PnhRHmgB(hoC>xuqR16-pq2?S-Geicb&_P!* zyby3WDt&F86}h9al;_+M*X*Y{!k&SQ|7qaDtr2YWr&_CEk5xUEIc_IXD9x2c42+*@ zQ@|HG?=P0|p9b29o*ReOL{?jNS9wa8kHhybC-7s79xlA{)7W^as_6O^(?1m&T|LjL zKaKgVOj#}eUM7l`)di1RAGU46I|Q}KYb%QlKE`01KNWu6Tp}VluPOKVy!#Gb*Wiz{ zmRssND>nF(Z55lpu<;1*VihZ@w^>)k3f7DBfP0z%r0EJq)7MyvI(jlZ@EF}7=U^}L zflUF0{9zu4`k2AF1#hpPhQb*lb>u+1BA;ym^W)Yfx?G$)8mOx}z%jC)1&&zx?(AaZ zXEAAD-mf!#5YrnT#n{hsjW3?M;0n-I{7i^;22bf{xzm=G+_ruEXR#bEKnG7`DG#eW zDR|QMD`&=GCcC2aj5}MJ&i{$NW7VGtYj;^qy)5>egN0;ul{xOt1sd7O!9#4_Rej78 z%!ad2o!WB}Q9eh@T7p1Y$}zG2Gc7tE7jPdj#mNy!8_U`W71H8)>*(WbEXQv4Zy6)0 zp87Xcw!TJ8aBv$a37gF}m4A`on)-QLc5v!E&V0;%9+=<~ zfh-yMc}&E;qd|7G!u7qGQ!<5x7h7;B$nlix+E!)~2ge;8Z|CPRG_jFN?j)F3 zqZDGl2y~owS0BI7dBdUIwb5S$!d{Ev%`Y@}ICOw&<>dQCo{0;muyHD7F2SqOGhpWk z2;>dBY9b->WaruS3t?NHJ+1r;5pGP4Jfy>M`bFRY@)W}27jaYk)4X!;<=fGL&ep&U zCj>e&s)0NjdS2lZpQ z;yL6ZlD4YHP2b6~VYrEAle*>x7>Md?*2`(4F3Yr^h1oMs^$dO?D$ApB$W!X(?mQAU z!%Z27IRZ3MAPR+~;wTO)6D%h*_G>#?>Yy7O*e}&v>^3^VWf}Qp;P5#7_g`v-A7NvR zDeYW6D}Jf7^FDu|TPaJqym|bWdAzs-Io?|nztp)JZ|TWj3U|m)kuyeqIqMR9&MH}# zr_P+KdrvEGhGWdCYLRG++`Cp&&e;pB;JVV`k)e^LmWbN2Xsf@}$~g&X#?&+g-cJI* zEp1KXW^p=^$bMY^WvuDq_5;>RK+70{qh+B%NTAxuUgE7kNcpa6L8@CXv+C~040QDc zMGmEHpj_W3O!JernL|I%c{t2za?hK#l^M{Tw*5=JTzCh=MR?Y&v@_Rzb=#&;N2HVa zb7#2AM>b;!8&AOT-PBsyHElyjZBrZ7eb!i^lFkJzZ3dPX{T3R}T8Q&gRyCo5#p_;f zlbgE-FE+23(W@S72m(K|JYJ$-W~#I>Gi%k&SWPwDBI(Z~{vb=B7oiiCKef+Rb2Jdl z>|cUXH?!2nCv0s~r-xr#U@A*O33!}WnDQz~=o4#elq*~Z@DKI>A*{N)IeKi>pyoBq zxb*Zy>}NI+(GZzrYe&Kr?`D4akprr7r0y$}Rkfu_aM6ZsD;*vOcd}R<30<>a#oR-N zP;TT`+E#d>u+d*>PNz5zSTbY33Iwij%@vt*9`9(ZgpwfOLIR3s{8urlhxXcp{=OT5 zEjFaT(t6On;r>FhCEkfvQ5xNML?#Jr@GuLpnuVMhUHet+Uunp}ajmOAXF2j4zX}u} zC7Q#f!Kw-dgUb(rrXbF^DW0wEf^PwST_p>*x^y=cEj1Tra_t=B+$j(Xsi?%Bz{>Yu1U@rn2VGAT!C%&EkK;*0ai<6V}RxIL2N1psixgHHYK7+X=V4Ldu^Y zaB~gEH$iONYkl?HsclLPG*}3dP5sZxTVah4RU26~oRBSL6|g&f2PLfiGq$57EG4tw z#KilXz(#(fF=D7Os=x1YhR@h<@@i>7w97?p@f#f(GnXPxwCb&}`Nw~wUE)06cm_{o zNsk}Jqc}mqt3X)8rhu2B0{hs9P>Yr9&Utt~)5^iqr@twxztQct2cd);>ejNJ3?}No z5o2OErmS<=8^4KlbOQc-*5xGUjfR{}*TKI^m+(l!NpYV@d z>%Sxz;a3{0sbX<@k^W8yV1W|iq>?qRnsU|;AFwFfW>cX-KK)yvW^a0Ht^PLF6e=Ak z+x|8b7_g7^AcWVI*y}k4f>pp_Yp4P?tLWUSA?M@=s`NReon;|)I@BM@M+C9mA-QY z-HxF;1t}7*z^$Ace<9M7?QDoc1(fe3!1D3q6|>*v?L@@nk>6>nu&H=yGx4Lp3ru=c zw6QF=A3~(aGjf-ib_~=8B#CT`c^W87u!TR>ZM5-YWXQs0^u$Q$U#(L_)@*fbn~s#D_e7G(HnxLe*65&2P4P!b4bbp$6@j^)9d=R)4Qk&Ap>o zQ={5=hF7x6xNOMQv;2PCUbK`Z@+30j{Qm^HqaDGCm$P-%o-@gvQPj&`^N3&?`@h$D zEJu`_nw&@jb+vNXHh25;K@d622KKTAwv@j1QO?0_rB}|;@J@~myL6YP*&j5s#F67t z8TrF5H4e>6Tv*;#f6#cD3bfSnW3NZ)EdHQL$IlMrl$HLV+wZ~^(DjEvz|~u9BEd)C zYn$BtT3ngsy846ANMJMWH#pf9frs?N=k;{9g}&dMRkI?zGj6t;T6W^w-S*Vg@`^{0 zX{eUmjjkq$ifafrRjN1)!({Cbu^Q`G|E>Q)n>T#)f}@sNqC-wJwEvKE52fJz0e?kz zvfIuMyX-oE-QAwLRH~xVR4D%S`N#xYe=r9KP;(c6TL!Ff#=B5}X!jVJ6 zhtikd^;8+pa%?l3P);R|RGUoTej&QPM|r4avDb;CN`9b33*H2y;v2P^@b*LW*M`Ej zn6%7vUHvN-&;d|61mjTb3CuBIO?~V|85Hs&q1CY=URUoB7Wj*%HPlk{z|g#?+nSSG zZ?sN!J2VvNc2{lU6wP|s4zca#tuN4=pr}wQ4QKD1ZDx~)xIBhA05|Wz(2654|8Tw0 zu&~+A*05W`_7OCxHcuY**&j9S2ZP5u^2fl<#>1c~Gn`MuZS}|9J&~;JLM3l{{mZSC zC1>zRi6e_Y2IBAHZKWco>F=bH1#k$j=1_CW9e`rzsDMX)HJ&a8#?lzYmiv3Fl{?37T%@pr98&~HKKHJjD0iul8#24@Z;CyBu0>9 zF9i~d<43J{DX=TKu$7cCPn?bMEQ1T(iI+63c%wpb@Fh*98{7GIQn8bM2xc+j>kwb{ zr9kp)Utv|1K(pFv*{fMd?z$TG+X(INhU&`rY*lqHG_08w4l*Nas(_Dom#MA`>J0HB zS}$o8@SfmO#R5MI-VTp-#yWz0G{RSR1rMD87=hUe>|CX;{)Y{6aOhAueX^O&AAqGd zRG~e0+gAQttyXROC9QfahZF~CCnv!Lh-QC^tJZ(QDTWe$?_}S1stJzDB9AQoMK)SaW zHD(^32f3aEE&R#01nn3m{B4zSIveg$g65WEw!5k^nLgRI0U zi;lx)e(i(8$GuEcEEnqx{?VU>+2hb!YroL#sksm{<1(rMY+d{is}b5nH4>qpizOdYW4w_tISg^zYf5F#k^Zxz54RkStEPk zT(1kKZ1vAN16)7A6+3Ywmo*pA%7d=Z7bhZ$kGB5j-3?FYTY{wEZjgfN3ltu&sq6h} zST|c>pNEkNj>_^MXI7xYNt^xb6prED1HeGt#lwwlDresc#y?OhL*>k2k+qd$x_Xao ztGV#qwUeMigY9c(e~HO=e_J;4mq6Iu8`)uh2~>)&02TNbjqB{jQ=m_I1V19=3iQNZ z0=K|>mP2+tOTxh5F|4ct79fM3%zBXQ9<8+em)NOOcUwj6`cj_$i+0*Rs<8XPT2|t= za4B`|FFLYbh9yJ*g+JELlsl zfpISPRMzm7<}{!>$<=CIb<&pvZ$001Scx7lBXFZTmG@>fj;1S0#JXO{BZGTp{94;SW$ z$ROSSDhv)j#iOD0SIzR`Yz%;Hr$7FA(M|j{kh<|q1nHj4%4pf3@suC_QnHGgTy#Qw zT6J*x>_KO%f0eHlW{JUby*LA+mURi~!cm#!?_!ywU8pZ#^N^9NSuld`HM8Jhs4lpK ze~ndoVwo5bfBj#1Yi?%{U;9BHteK(U-X0HxE!8RRn6hmp|LdNT?c_AOE@ViA>~Dd*H}JEO zzr_yYq;g#_8`Wn);FPn><(NwH$tyXtF1ht9$K~I2mO6pUyX$W;8}1Em;%~8`2>3nu zH_gjy{>i^73m|SSIV6?FGu>I6{+pJ8q)%(|=aaPWhv!@wZq{Sa2~j zs|n;t_*bFJ%>;t@*RQm-toUjaxz^Rom)~kFf$+cpsHkZi#idKuQE4_I3__*hl@PKu zM>Pn45vWHH!bo!glhaLY2Ks{B!*RN8CU`VI&e~9LAh;po)zo<&zuSL{9VEpIaKEGP zf|p1Lk1&>W4MSLa=in7?1;*r+95L2bO!vS4yNzw-q2zPM-Y!&-NCd$-DsAb0!?ec6 zw*p;P-HX@&uXoCy$*NO)>pfP>|G5aT zSyk`$@3F>~Ff2!(oKUL?`{2wu-t!xnzd_t7S z&Cf8q2{(v`-d46g_g5w{WL>4&Q{jq;& z-@$%HQ8zyp>o8VJ`2pRAJ-$8|At%VhKeP*znPQWI4#yO0eC2UwxKZ_!R{2LD$=w{e z5NTR{TIP7F3S=!fWwq?oT?@X>>Iv|+m%YpynZ5`)yZR5^bW&7G*8*f;YVcI7VjOfBrF&9COz|Rhi>}HGUgQO3ux)peGOGfOn9@V%(z#sm<$%6B_|KFc&MhE(No!AOo zkLl6cc9e(zF4RAx+$vt5?1$RO3g_TGewe1Q_aqO+@$+c!1*`a>HZ?*YjYIp~sInjG zEJ_DE7%>~wssJC(6#h^%;i?sMxvmJyFU$c8aZkDpz=zV}^It zo*$+~#JG5f)1e<~5#d=UXZZk(qd=QdF%H+!+@MXX*huQ`OrYZ`Tae+bl1E_y{YiaJ zK$0kEK7jBl24A`m;2UUa@rPQ7^e30zBqOi$92%fP)=iDpeyGhU9(RyJ1-{LG9&QLc zZe7x@C=f8!yz2+JCYI5TMj=0FyZU;)!)#k{;ktU$`BS>BaX`ceoo#ClaZX?r%2@qo zKQ=I#GVrgBEyK1Zr7?BEYPZu^AL+EZ0CjYfd9UA2OJEp{fR=!Ij^n#+J9YomMA(XV zsTi?pttU`R-q!uaeJC1owE()75AY|42@Ei$L?JyOPDc)XfV>a3}Q zt6z6XyZ{`2)izY1hVyYVMgYBN+uGLM*yWiI9HMP&f9uG*cd=ci+zC8qg#9SZ3Ho%1 z?iRMmbLqF2cce))N7c>a#M`>|1HQAKDKKogl z{*jg-uOafoG$ROEJ3(6lA?Mb4gA8~m-J;0nOMPBN-G+1_5JZIpg@DCKjS%3Sxe659 za~@~eZbZkpAaey%S9l%8P%_4o02j6q5RRollbdSj99MxYeV%61*$LE>DwaRO?fyvA zJm`Be`hfOhonM3KH2qk#wE7KLjrjSQ&soKf(?oIoWA$Y}PD42u{%Na!tnmvy7zVdC zfFSU+I(;{c^TbgvC}<`Cr9}y1W^=V``*B(pV<=bnJHHx67tMN99a9;24Si3#Yw~>5 zKI^>Xl*)T>l4$`#%LM&aoB45?$nJ|atKVH8%tg6BPLtXA;I=y-{hRRdT2QUeAFxFM z?m**nSpbmex_s@1G8%Y7T#L@)a^BVhD-KNV#*x77Mb0*(R0}k?6`fOs)oic-|um6eWegYRnEou$D1PI-vBxd9)tv%io))eZaKcs;)Uae;Irm!EqdUuQ274|h(oC2klcvm(Fx2k z@XQo*Kzk6C;1PP^l7IqRrehI5B;hK4`{FP}r;I&{RW;av`mHFhrGqM_eS6o*; zUqyaJunj?M9J9@UIB^EW9q4aM)!Y^nRkR(g6Xc}ssfQzPH>L^q!}e2+vEOJE?eeFZ zD;_ElyOk@|!IKkQOvO({CA_v<_R}=g$MBt1{nIpw8F9rYnZ#NpbK$eQP6b(TRv)l& zYsBf*B53$u$3Xn2X>i&)t?Q@Spiw%2o5%gxPc_T<>>)lCekK-bC*Y%I=&C%ZgO5oW=9%r;io$8+2E34oZl4|l|6}8+j7)G z<=$=tI?%Qok*yU0@(8r94t=`YHuUup8=<;#E`^NT($C11c77_BdSWsiD;%V@t3kl4 zLkd%D_A}WfC9a#*C8toOKht!$gM#w-9(S(&XW9+VBPY}+kzcG@pT~J0to@nB7|tk3 ztLmciQ8(54ndToaOgt~OD2F%5F>CvoX5yUNTXzW>8vEAsvov>;r))?iSWCnS$p1_$ z3m^FMS~c}EEfbz_3LNFr(I_4!Y(`m_n5TVgHgG^QkvrmC6zoG~aXxa8A2Un~(FA(c zcwBHLKVL32vcbU-*fx){4%(}ku^2$sJGi_=sj2ha?s zY!+pY@jimQuxfvnLe||^VarrEv=&b zop->lXrLGSM#Q>;w|WgluA#uhmBzVm<$s>0m5}FC(a;$evrGp{k_EDb&qQ;)Ue4>#2ags>#?iN)$4YqCfHTV85;nug-6(CO>v7Y-{2L47i6yFUnO8NU*TYgYsvOT35 zBfWTXzc1oFjs(YMR0V#Y^D8oN;CR~Rq6wJjd3|z(*n*Hu_i%r+TZ}H8Ocrv>0@j*C zI&dTEV|cR`sJL*6Rlgq1q<6KA0ML)?jUn3DP&Pp8Tv( zRAB0^Tb&!7j!e|Wn4VvxHTA~hHv5Zr)AX7B5x>MgCpZGk7L;{(|9-F8hDI*ceQF) zf5vKmscmfizsv0w#;0}4Jf5@qU#6wNV}-To6QPH-1zcQ2TGubrv?mq9ob{;C7*1!0 z0+Z3z{|^7B8mkMO4^zKXACWpEOmWtFTDdQyooX}sPVNV11>3hHG{xtX$`Dlei?LeD zT*MyewBn_BQ9tK+f>{m(S}_{78-W;3P}WqOGYQHGnE2A*+}%))Z!)$SZM*Z5CEE&| z-TATm8-#*1o>|fZME3W~L>-BY{aw<2rFp4i&!=O|g>` zp%DS#uhMX5o#(9jS7~~QUr_ri5tg(|>Qo&qO-xdi<4$qls$0~Gj2GJivdbvO-5eqC zVY6eF)J!KgkD&nPE>!Zr5BCj({C)w;drdmL5EI1EY5Lu5Y^W)mQjR4Btl3@du!uGM z;eD=%;D2R{oZn4E&%iwh9=0GP4!f*zQQv5T!a4}gXTS7WyAiD`@)E?l77cbJ70q>( z#Ja>b^x2O?=*{ToD`U2`E6UgH#kqzZH8kuZ*4=2d4+*RNI*mA@MMkjn*BbF-#O(XE zwmN(+TZO*QC7)WqPLRdp0vLA}f&i&}~P2(`JP-(Alet-u&p7YJ`@s9tNdofty5Z62~+mBHNyuT;M&o}>TcY1mC$wxNwzXukhP`SKyK~SA2o>{3b1ox36%mznoMXhXOlvu?T4;sIJ#A z70qFO$Z0pNj1ko2W+DUAHtJ#{Wjktl;`fqeDzl=DqBT=bs1SBFta6koGsMT<&ZAF7^+|PG3&R?#w zUG*Y+^|;w@MLF#V3QK>h3G$egR{mS9>sJ+(wL<0osQSQXEsJslOF(?|Tg{V8snLQ_ zs(1D8{dlkYZ5m~fMiP?L|5lX6!!E*L_`7KS2oKb40-Nkzt*h7u^KU(Y3*R2fZRodY z?O=&(`QN5R=DNgM%2U79orQa(`#t5j;$krU+qA;ISYn@6TsgE|;PEdpqCJ2kU zJVh@quvB*)$0WGRWSp!yIoZ8&&mX(_{{cl9cPR_iw_-!;|Y9m1VS; z_B4+ZlS$l|?WO7RXA@|;S_LzYGgKRu`cJVH?xksh^Fw~miB_+?VHXXw?4?zbOHU1L z0tWz;j;w1>J8}#AplTFc@qTD84L|mpdDV42<&!q0(!B_Hrqv#5S12vOa?AwMdL_46 z0ZT!p=K=(txCU%q8QF2{wifh_90Yx6p@n(_uUobp(ERNRhVe!~n3zN@cuk4nq`I*l zId0Zr8#Ri$XXxr#o4}o6tL+km41|zF1cVTD*wF8^Qm}7y$x1#zAeXkOsE61C-XdB$ zIqB+oGf}Jw(eA7uCUpn*$uXyrC*(MJKF}X_ettnn4Z{g61_W#hY&jZ2(#KI11A}Ka z_Xx;77~_Uh7nek+>_9FBsVH_BXEK}mMp5}MZA*pRnFzW{fw^pDc7=kQx!CWu^P!rE z=;-&_(Re@OI$0iB_(T4hRs22;Itt9}Elfi8`!v6V>G_&f|6Ut2D*94NMq^raS;Z`& zI+f)GjCWssXNMVy7Jb8up`{eRzs^eP`hA>`+;0VExZu_mL%-KF5iix}JsZlzPYRofPPv;VPOD=3tcf|b!;uwapoAEnduu*t9kaBnquwjM zJd)S`A%Si@YIT1IDfB!0s@1EgJgv6;K|>Fp<$-kD0t^f7FRV*o4zu+|sV0I0_CX3j^wm&#Mgu_F{NOl=J;R=V3D+T|Ra5D^}2V)_^I|)C0J_zu3jV z8bgLNHm%9{^iy_8&AGwdOf(@CvddB39|U0>W>pT2T)U!1Tu@?j(O@S}zgN}5aJqcV z2Qao?M*Es-w>2HHg{a|ga@XzFV~p1D1X^#1^WCG{;)B6S3cgD!oesuj0cnD*UlIOa ze!*@$m>llJ9Jx2uuABG3V68=`TZeJ7csmm7U*$wwS3>CBQNl$jy|$rdveUzMH*kF! zg^5jl^R6>)_X0_@{yp^3mTIj2<4M~VBwVU+*zUwi!Yvr8t9Zn@FBn)s@ZO>C7FSc* zi%xj^?z_O=b<0U2g$*uwS6Xer;~A#(T_U?9{O1O=E@v}!u zRecY>Q?SZ+C8A7%58r<>)yZClKB`m@+iB*qIsmLrTaD^uc;H4w5Fgf&)!bzV-zAbl zcML|kPQae@&{DIKRR0gnJAt%2T~2tQVsq_%)Uv}5bk zchfjazDwWd-6*$P(0_1cn*Y`lz)lowAo>G~icw#*iU(P}hL61K)}v0S?`$3UD8K$+ z8JI#Ok{^~S0UhO?)3LQIk1K77^|)P3Sn5n9Er3m!@{ zwiq4shijd3OHuDwFT}7MlY;Dz8{3N7LIipMlBxf5Uq2@{L~ec<56j_4?`yWI_Vz5b%T8HLlN*u5BlI5^+3Ej5>+GqD{dAvZsv zB9*7foZVOR2TyHkMQe}pxy0Uj`d+*G# zZpz=CC=ss#Gwj0f%CKpwhq~UD7vi zY_PxYP6uq`7*d>*C}^ZrbvcH5g3v_RGZnk^e?bvfB9WZ2ys?I-MN!0!Y8d<+-S z(66cZ$Z&4;zu7{R!Q+En*Eb9;0w48pazYj#ORApZd2=}+wVt;X;gq=Ux9o;8Fz>LN z(bc4anfy;gGw=*{TO%@wW@St{yq}9G>`tU%6L|3Dm=Y4hk`3D3$mXt-!Z2;dJcP;T zJ+*%trhO~g_wze#z}u?o_uS;WY$rz3O)MttzH(t#T?6Z~?FP1ZJ+M7}v*EvJ_K^6! zF>I{d0;=SpL?ABgEqy2v5$?PvtV}<@+S+O54{0@`PR&zndcocIkZA81y|xNfgUK1N z%7?T*oM%G!5DDW$Y3c0$yj2DCKO$H}wV)HH)@tbywyG=>PJutCv;GauynXi z)hU6F19WppiR0;Mwm#s#%nE4=0KRP@@egEPD*BagRZ+})PorB>mdJo^g>9q}|J5W&{l-2n0OBzN^q4L9~~_oAUjxsuun*qGdKYHdHjA}Ocs zsCOPb&)L4b-B;VI-{Bo#SMbL!TtINWr;MzTr+h0(RE0+xD=E>Uo*3}}qovBoA(JSJ zGKd1pOA_U+ex0LypMK6dFhP#0?yhCRDpjbL!_~495ukg)wJK#~?rOcdB*+^>1z!L) z0?|J*ocH&a5-m!uNUkl;zH-tIMhzAhE0r5pq$x9qoq!z@KBMSY)R%}x`f`b-Yl@N> z>?eJN79r(cQYWl6aCZ?ZHkV34X+%e{&gh9-*mgzEn>fk+XX}oxw8QN5l%$o9I6~Gu zZ`pv#f@U&d7h>liiIKpBLlsdGYFROcbg!T+`1Q#oo19YC8|X}46u9t*G#yQ!;sSjs z;5AR7Rl^v}#Bf*tV0zvzM<%<}`9xy^lX1>)%3g_F-KPn66uHM!%!sSXManC&=6oR9 zT(E0u-;FqIp(L*DDMT$h-gVXEm5WFVhu+K`t5d5`+OT%yw&c2U#K2>zp= z8c^}F-H4LI2rX}_b>1v(O#s;&P`qQuyB%n=?I0ywj}%6+$VTo2c1gUu;yRkKtZ@0J z1A-~J8*2rR{pS$)ZAP<=FrxSL4LbNf6S6Ng+P2!5)bq!VM!b+b65S6x|0kTIK41Lb zx@}Kj2Hnol-lLW2j%R7SF)PviOJCo>}gNrBNgYJ!v-SRl+FShQak)`hp5c_*M?S+*c~Ql1K7tm1AclB**)B!GJ-gaWbfUpSADqQ;j$KyE zy=O3O5(xO4KeH>E;@5G)tpcYB*L^iWAy?rnD|Q%_QzrGA&}KK1WABCNhMQ`Qa5!I& z$>>OWyNdzIMX$D`cJZCZ+lIcg0y@DfG57<+b|at_onzgMIsX?YJX;gA{G=-v-VO+( z9`QgKGHbmP2>W|AB)AbXlythW0kpenZuJ@x_kb|~z2A%P9PR|RV)#;U0A450+jbyE z9Af-uVwEZ&7m)7XkHXBj?`15sU1g5?txWZbY=_yyBBWN7n@S!|gfyJcEPYt)q4S-3 zg}1VYMUCP^tXx%b$9cx~sj90dB2s)f(G1ris|@fhoQ9dc`-Wkss{(p@rqV5%V0qaC2u-TW1>J=@p zSG7h9=p~_TVQmlVFduL!mUTwTjVK_nhb2=~Tm{b8L3Z-)hqV_VQQ)$|%Yxv=0c-;g z7ipHL1*;3t?2ra%aNJ!Lj0llVbXM{LN`;)MjGThu|VE2P(xQfJcj{$yDX-iuww}?`PheM)K*;+EslKyLet- z7cbhiVy~Ro(#>xnM(XgS@4eS!s4*^F420JIThf4CdN>ZgwCnKVupAh6u*I$j2m`Rp z7J?A0^+C-`FXpoVq-8|>l&H#$hwSy%AJ6XBQo1iv7^VD?QRU< zsov2fR@#hW7}483wWMxrDKU(L&Hg>tlqTPr?35-m9gP-sdsCEPZdHK#UUM9p9TMXB_L_a{aviPh9%;y;mpP7knX9 zyjN?h7~7HCS^VO+m0_HOb3J>n*0bN|&s&vJjp?dk)dGfK4Cc8eAiw0o^x8Kv9t%+BR&TObk<-VgyO>gmtf7EZQJ5iB z_h+cC$T`gJ*R2W{sL-QNbZl%upWpvF-XJuLoIA4KA5mo#4f-t~fZXk2LEk8_@T~V< zZD7qQREJ#@G!}$S>+`jSR=X4(hb3FF8P)D{X5?})!}XD7i;1i3;8y~!@3`nA&c*2S zeu>cd>H}mDb*PyGe}bqZ@EUdNLjBxIB1l?r_cJsYy`#kZAtp(CZsm|E#B9?^dMYST@b(i2CJ;d&)-Qk=6 zyKTJp*85#p%W7g%r8#ZbcVi*@Ma~~;HdQ10%nNoe8tOyzXj^K(`)S*bh1!n+D53zC z=1%<5JmnFJTtsNS7O1o({nk!_?7iR`v|TX(=`3`ac(=;3KG3kYo+4_YK{ODM zjwHtzLuvz`d=oYjXmoI6$j7(){3!&yWX)_-%E5u^n|4vi;wsRl1MDdrA&uFk(gz%b zn-0&EipL1+X`lYf8i+4`(Pjfdv5efn1oY-#BjpGz+7QHD)z9O(A)AjL54eGg3+=82 zPM479FQ{Dq8P2H~`E$J!=wb$fiZ8sR6jq7ul6g7$alY49gbjxjdOQr@4Z(2V3=?)U z0K(~FJP^p8W!2t}40mU0>yd$Bu-%ECL$JyB+PN8_7-#HaL1bkNc6#`Rf=DU>fMQM%#;l#!3g1EvV~}L|s`k zlveUcA`_eemOhf`47CdmwK63#*p~ln<&TKcT%VXTWz6V<#vXG_s{ z6aiRC%Q2?JnzR+=wE3k4S1^G(5$JKrYd4jN=7x)!)&k!Vrr0<4+p(6~ig9_@qm{&T z*&QJT0U^rS2*A86__Dbh2xIJvFi{DQyp^+iO1Nl_HbF{PB${tNL=L|50 z4UeW3#{nJskG4;>SmK!CJ*o-BtyKSM4%W&?6QQ*Bp6P=V%LH?Zy{bwX8Lmjs45(HH zazm>TG@OW|Cj6-O>Gi8g``}>Iaj&4znNsO)iO^mUo`3$cR{y9r0-|l(OdHXKsA0sjKY25ls;L79<*EZRpI0IhabP@(vP2O;#Qxv65OlS<~rXsba2^yz@r z=h2rgsbq&7wah#!N=a`eqmwQpxg1Ellib;8CqIPyW*75biSnG&MUUchfhn)`r93sR=)25K5 z>&3HRGf+Y12G?7#dja`%cj~+qBlM0|k8MX2E`~m7JHiRh1^N>tSIqT*0k*47jh&vf zJpmmg`WhkB9@8Q92GsXhqP{ejrH=_YM#?=hCRVuYu|!K5UX@t><&Pzz#0mhx*uKXS z31etj5%BicSfxI(fU-=Wvy;3LRO#ogW35*G*sXFHSb~24FMn3^nCOnU_Z~o{R!Ka# z@{Ypmw32d|fCxnJkmT7RHFLptmHKG%71mZ$VBW@!9mW1Ykqx_5o#1rhd~MV3=pI|= zW7-9QG=S~D%?Qn8AbMFDXOfox ztSX%2LU1Lz(g1DD#W%iP?W*u&RQ>IKKER^pJ!aQb3_%iZl~nBQukfJpExZ0$aISu9 z(Gk3;0!V$`RBI{VK-2iM5Eg@+z$*gT=%RreQKLOY;<_2B6aH9eYmsWhX$8+@T4s$$ zY(3y6xx<|(G=_Z}>K-u*V}pY(p1vEIIG99WwHZUr?w;Ii-;2Vo-3Co=39MVXjoP+q z4iBbxkR4$>J_+{s1x4y&tb*OZ2D9Uw?Fq@(j+vEd87HU66IN1|=quy-)=HIi9?xUW z&6brVVsd*kZfIp%d`LD}mJ_xw>bb-83Kc}{5N4z@;Ja-SqmxBm(tN8bOBDIy3s$Y4 z5#j->rYw>B$m%t#4X|g}jSs3x7M$+1bpfiQf9RYYiUhdktiDW}Kz_`1%bEhlxv^8$ zBG4I6?XCJbF`aGte)})q{)%-5!fe~1V_nfvS71XfXWapzvDbQ33(a}RDw;&b%gHyS z)TAtNMjjx}FhVK$z?Gw}V1;PzH94G}Dl1-vISE}$?_ybznPa3mpr}7B=(sTOTiB%l zkS-_0tqMero$vVLR=X?|2R)SftiCzNdhJT|3ais}aLp+X)y$k-4HR%t0>v=b^Xjtu z=D!xGC;yTQWx-cJFfqCwjSqM{R9jSv$5P)Qmjq>Uyw8@^T6PrmsFi?t%;QSmP?=$3 zKHLlp(?J3fYwG6VIR9>|WdG_nZ9Sm1!x-Hu)&fEIdb%O#{Uc+jCfrq>Y~X@z3bpjC z`7OH_5c0!Qwxt@$!-;LjP{2vM0}Hh20|Ww^`!Virw(D}mm?!*OL156xgzc#W$GyMw znmw+=i0f6sN+Q7>{JU2A_$`-_WTz=r78!)1>a%iX!2T6%-{XlYR=o06lM7QrW8L#uk@4inMz7Z8sAf^|kW+nmeqQU;o1 z&{t%jwT`I%aY?gyKK%#3b|%sLvJI$=+XirgX2=_;Qe194&hBh9*`Dt7R|5HV zmq^UTI3aa#D!Qs9>VeLU+kCVLnG-9MbMBgkJl#pr(6fcdMO|EV{M`r$*Ht3x;3nyJ z?Zp6*)RF#~Sz6K$a1clZeYaf>d^+7{?}|{x35L5+yAg=@~%E{;bePi9m;@Ru=nXOLf)B}vkc9hxAnw3WRt(H(X}4yH;) zFMR*2R;|wrF0w`q@g#!YNBlfCWC!1u$nP_sv$|-mgMcP>NTpE`tFd~01g0qM`jVMbIoPvtyQ*>?B^f*6XtFieXV;X0U=61z z3(-u!-!`rXG<<#dYsjKHhgfGRM&O$`PKXM9+)!*q3Ah5*^$is|>Ee#~eRToeV@<#J zQ{;9u;Gqs6-}S(%iT&nIj329?*8?&)1S7@ZwYxC@&Ma)|o1KoQuUH98#+_Uwgkuhp62sCca<9fr5gw z{H5>Lq_gpT*8?k9+56K%K7G>4->*{$nbg_ywl7k>XWjQtMS#T6VAv|(FH%MKF?oN_ z-k)e}l9&lq?@we2mx~iAK}f1UFoY_U5vQwGh0x1JJ23zg-55zn5ist4oh>>PnS;0- zw)*#L55Ujr6nr!5rAep{pR*Q0OixIys??9E{TXWuh@9vnt})RW5Iw4dbw$GM-mF{L z`c_8EJy9>|urq@LYQQBcHl***?XQ2{Mq+dk+#pxBysB}3T1ImHZ7V2ESOuj192iro zI_d_|7vFy?+0GQb52UA#>1d||+%E;}(|$!qr9%Vg$I#^%aAd!;#SVQBnfsNf?Kk1M z82!<7ue=(wi7Pa?@cBqVTTH>V05Lwj zyU{=Ho3ct9QcTFtJ@ak;KAHlRho{IRsF_!);IHWfq5M$T!T*rAIWFju!sa-`|#0mH|;xm{9gxNK$oW&+jI7wmFC?O^Mi z4gB0Vw_sP4)8YQ2=2Y7S%ZTd(@fMWFY+eZv89@C|3C=Ly+ZF`PADz~}XV;a2u|qOv zi%Ple3ksV{(JuS}%G}EVo-=3IRsucX+YR-Ih+sIa8?~F7V>gk6tzb`C3xxdweOo|Y zLfLl6)?+CAn!{SS6V1Ic$-A9xs1``B*xgukCMwQ%&zKecp)re~bCC2xlD&o}#&pA;MUX|_O zkhi`$W0(Xxr=aMZ)>c#$3D%Lv|(L z7LtqKIn{T?-4v((YLsl^AeoQuF*p5oEizoDw;;&b+gI#*{M^CzwWy!Zu71;&1hxa6 zZO8|$!~n&UD1=w+8R8&##Sf$nw|&fRMj^KUQ??ePk0u!AtniC5h@^cFU;n_Z_aPPV z2WR*#_v7#;?zip;0t_*uxgm7kz@5(BVxPI{aq9}4+hH8;#W!o;wuH$m(4Bro2M1PB zzm5;ueSIDHqU{E9DVR8Rb$fw%CkiBH9~8B{cC_J$m3%OflRG+`Aj0kkQ(s$n5&NL1 z31Lt&_bC6MsEM&IjNuy3_I*&N3h&w#DvPS;GF!=;R;l!Bye1w#e-zpL2aA{CV)%}# zfaV7&M+qVhp@B$;X||eZTrcQ6Mystc^xY1AFj4L5*V(@6^fQ(Ob}0G;brB~v677C` zH3>3LPj2PYqUzZzsGPJ$D^LXQgoLDzYU^KzJML6w(MOkcMd4!5t!{y3X7L+nJpr~A zo?}2c=^l^km#Ejk^SG-RjKr`N&RSlTTm)qWeGj{$?Ue9nPHurN22#9vowexzhG&fQ zqIF44W&Z5ObMyywZcn&`p7M#_n*#}bV2C)FH^AF51`~PtOjbapt7<#wi~Gpu9rETP&(6>muQ zVARGTuzNhMD*`)@+y|88j`s!c7?5%wA0@?lt7BNwOIoNWSo zl7Omt8tv2w5ZgR&UC|NY1 zx90%N&=hP*br3&dtf>rVJ_l??2}~t6BjoxB)xIIfLpj#^UHzQ36BqPB=h-OlMdHjb7pAS~KJK7=2)85I zJD#-IccLA}j1A$wQttd1MsinR(p!k_MZH(#fXMa*vxpcL8dQo{|Dpi}M?(&LE8SdS-YF`|bMc-M~0qqsk z6l?Td7;=?GLC*5M(Cxu};!TH<{W5)Zk+=R;_R2$%fFZ*XkJ?RcHZh4-mRoh=Dm}_U z*L~Kia*6-=wzUa7?mAM~ht5E0g2C+4cai~g2LxCetTff9)2KTPM0F&89&mmriecys zfvFe|qm~z_6w9Wd?^C(c1)JKZ&HnJP%g3=<&)LNQG3qhEr(-7ay{q3uc(1HU%;0b@ z5STi{etBQoy8KDQpMGps=`8u;$8*A!D7*TfxU>mlS21y^>($5_9zwxNtvU4~sJIqg zOKdg)wA7my;mNK?9d~%Ss5;QIubd@e=%Pd%KqP-PZ zGj_q&^vPjow^e4MaNdIotw-2 zHr!KHRH|@(7(HpjDtmih2-93F<-rwgCu(x%wE%8XXtCrlI2Li~wm~i$F z`>@E%FDa-nemIfP+y9X@Tl!(0ekT!o!&;SnI8o7=bnzIsm2TxkXZhx`}>JtX*v>2fzABg$;%EqlcB zs&&N8TnbTZ$meq^S}tI+fnrBw9nPV?GaU#c;z%*jFR5tS(7$IhfyV?c1TVicV&7&D zNsndfLn9M)C7M9rj?Hf_nz-QC^Q%fAx8>xTkNN#m%!ggeLle@59==L7Cb!+!qkf~i zh*{L|aLW9J>mMSlT~fAeI+lf_TQS>;GMa&o9+gl6SGR4s8L59DxZkv`eOOyEZrP-u zc3V(*{mR>V;MhChx)63JkQs(qw2jE4mS5@b3jf0$wkZHChzYwFgMkV@{ohjk5gdkd zO;ciGD93E>6d`lK+I;~?0RncF0e=PSvRA|v)z6cDo3inT6 zZY9504-6jYaJ@Qtnmq;ek0;VzG1_M>0TWgC)@X`D-rDr@=WtchSwu1Cgxd8)nsk|8 zI(c_TpItG8KK7_I&$+I(GN4AYkx2|mh5~0vP#x!y$bQZB@A7IMb)nN%h|VDOvnhSM zNWm`Z+h@b3^_`*iW|xHSFrvg6{d@+mPS$ARN2h{K_C?8Le>Q0jBjpsJK*u)^fg;o!d*E|*qy-L-Aru+1b-J~ zcfYGFRI7+>t)W1cEKQ|*g3$->hc1;+8a{d2o$+l8C7z9KCxE*R!hO}~C2SndE>(u9 zu)RR-a1Mc(eIyZC_6p6Hd_;8RY7aQU{751>g8#5Nm3>5ud4Ol*2mN~s*yAF!9zN=4wFnvZDL@<6xAUrB8gb`CLT z2Lt7o*{ACC-O=3MX@@={TK1C*nXlCglV03>HAP=|4@463k>VCG-cPib*2rs02bmJ- zao;)xoC6xev0^26D1^F)%0X^yc0I~tIVZE$ffy^?5RVXKfUR{XFdp=4nXp7ID>JIR zuzaJt+X{gRy+jrgi4J#la4}kk<#8*9>FBA)fAcZFq_U{MK>jnu(YuxuEZ(%sG4P5K zI~&<(@F<4)6;)=_Vdc+#M7M0&--3MNA)`5rFOr7Ne55Z-j<2q;Yg|)@IX6aw`&&?x zuE~sM)^&BU4<~|)8VMpEPS{e6%F)q5TmDGe?apyci+*#O{h8fRk#W~cxmlb6f}hjP zH37Z(xA2i_jsARl{hw?-u=7!L?C+=`JGyNIY;IOOq6s#-MVP;+`PbX+o{IR(BwE2y z9yJ9nW<@x8+fmizX6t^8r)({u>0Om;Kw)Vw>i4=)xm6^B>1S?BDiUd7q=Ja5R0%zU zl&MhG{beh!NR&xrc}}p3v}947r&nWEQIQDs?HLB?D^{t3+5Y|eEfX-N!Y!axj3wj7 zKh+g!h>vtwjiBMUaoqQlgwgP1|sS5E$ zx!JY^X6|X|yVe>dlez$|JR&*f0q1x+Q(1gfxr6tvinPO|OExdbaMImsp63qhi7pPG z=&*qbk$EF`S+6)0L)O`DBT?)X)<-_RU*)hZL>09A(wR3^>;d7O+50a>(baDtXcSVY zVY^D}rKsq#U*A$@qOG<;yByf#yp6>UzZs-qnzOpd0jY6pAUzp{Ci zI?dJTT7|Z|33P&n{JnHR<$4E*a(F$U{T-Sf0d3sxCplXRth=kbZ8^S^=fuF7Glds2 zqT314R%nRXx+!>Te>-ez0Z#^Ec015uc5pn)-nt$W+v?%j?g$FY&$+9O=sfOfVZ!f5 z>us(#rAiz+h_RHj9 zRB)KHqVl5;)>qQr$yAQ5mQ__H;OOb+t@@*hpuY6H)u{IBcZePK(L|{%B{rXNJNVH= zscGMS%j#5{w>A_#0@4|LIn?VLSvC)3`u5i|Yl)^Ab==}8BiYon2}k4vJlr!Moq_Xs z1Pbg@+2gPJwJSQ#ZO_#l32+)6cl8?^h>l+vwjotQyL{Y60w8jav=;KJlU$QoL4C{$ z=Rgb%kEnMHKR*{E3rkfGA3Ck>vr#z*a7mf{@Y=qZI+&K%m>o z8oVE=)$h2FV^^JiqsR7&rAGbj#XctH&*h`&(|=6V(&54M+zO@iV~NCgM`3KrlyJ6} zo7Tq?y>YYPoQi5#srv;Vla=^E)9)C_+Q}nbY@n$Wm zoqZu^tty27U;@2FA%$h>EMj&BNpx2<;NgEr|QdxPW)wU48bRL9C%Nu6hU) zW*^r{^}0X)lzd!Ed>NaSkEaR5+BrEhDf@V$DG#t|0QsD1`ND79+j=*HxL z{85c{2Woj_?mfcUA92v~4dghx2#0P%g8s&zblZrE`Su0#7X#pmhE`C!NhRr9HuZ7s zY%WYs$}tzCt;0ONOb3cczCrO)G}hr}AyAbHPQqB(<*0Ou{b2Uvaqpa*K+rK@R{}`G zNFUeBxj+Tq%6JlpPUD6iy&S~X|77#ZcMTuTEP;g)KysnJF%5P-5MdWZH^>&F0gTPL zeJw=;$$j*)vXTkelC4D6&=_|1Hv&i_Ck?t%H;Y&f_`}ayAST7$a)8sEGKh=wyAHNzGG{tZ>~{j{|YHak!@u=Figg3J0I6R zPmAs|iARw8FM3Uvo%X;?PzRI)!Tbrz+G7;eFHJrp&sz6h&&$0lDa_3pAio{{R6jiGN8c4h> zM@DN7ylN{&8C(;%5gx46{n|;G?IejStuBhl@9CjRZ3TMVv3m6@ZS@}MvnkMh^%ZLo zy2p=Ot6I-^K(#h~qh~UQ%_~|A;Y#sR^q4sHOv#L@ zaWvWG;_MIx+#O-qtnyahJY!b`l55Qcn+x!6Cwet{bH-7akI^F(OcK1S9JID%3u^SJ zZzRxEK+D10EC#erKc7@8EF6~u(V<>f8(vX4q@(EhMh+YjM0hhGpyFU_fdYXtMtMiQ z9bH19Mt6esN-+S8AOaj&!`+EKz$1>>M${l|h;;4l3VH|n1-7YnlPddP+C9}{dvXc9 zvQEf2b6b_#@uy@v`d*)90o~U(vU5}u165)=x!&SlBs=`zA%xo}(!8ZtnDq2$C7(!S zk^SI5{1+?zM53uC)_B<`L~B?|<4{|_yCvp673YeE%;FP?KAjDobRM!YK(*o%Is1f` zd>Sz>S)b6+z-<;J)3H+>C0@<@25&W=5K$47fK#nDn#YX;(!qe+#&%v88OgoRA!T@g zRIC4lHUaiimk6M;@!$t%kT{E4RCV?1F1v1xE}@~;rk2x1_D$=I#(0;sE)^OZ!n0Jg z%R)m}BT(m2he(t*pepQ&s7MLe>fiS|A&#gPYSV1Bc_n(&8B+K}+Lv;tY)Xw}Jzuno z0t83OxfSEVvlL)2Me~QAeBlL~i3EmU-?q!2(9%QX<{qBXZDdw(hCQh6mFSN@@6H9@ zh|%41^3{ODdWXuQz24+*_nMFbf-OV?gd-nEG@0%a*M+0VXl=ep347{*AzZWg_4q&MWI~X13p%VkZZPoZ}kL^S;F4@jB-4EC= z_qz1~bGs{u`w79g7a(#|L)c*0Cv|KwT!@I^2|t-g0TWup&IIE7WLoIXEg;$`Q)V{d zl0!rqpG?$lt=)TwH3^#4uX>2S1qzK1JQOI{iMu^dHjiL?_`2Rj&kgJq&J>n&~^wcZc8(AC#p6V*`CQ5F?XwZ!M!X6o{kc z%T_-kM4H+7@eU9u9vd#;;#bgKcU^(0m=jdnwKo> z8!;>l*o57T=3I4bElNHxF}PhE4cX|d3xTt%*X&N@@Xj?sk2WHCW(>vw@>0RmFa(={ zOM42Oa4*W8%niA{_tq!1N4e{2_mJWG5!=xjcZGz^)^;M@0eQJR;C_HO&BF~|es&{C zNxa@_78UHeao?zxwy9@YfFm)S3@ z_ETwvIj1u2KC$hfatWQuF<)G&I-D$MH z@XW->{_&(+8(tRJF+|fgD=;XU*%kf#_TR9x%;_7!FHgy;43+WZWAiZ$NE%PtwdhZ> znTCIiaph*>yETT*f_?!eZ!wB?CLKas3NUD*@S3b*V=zH00b&9!!$W)6jp8yG<2J-r zbyL{*#e}=znxMKR5}kah z`YAgUEw^|$tolH)k)6f~uPJKr8pY+PMYXd0fVB$rq%-Di`nlszF))k%e z7~b7cual>)Xf3Ha4LrcySN4!f(GMF@jW<4(1C4s*Hxz9u|&k+sNN%eA!FybkHN}LshBdvBNGLUt+hlaW6Q_Juv zyIQQ1Jl5O1+R7z|Dt0aAi0zs4kF~iF<0ymY>qwXK+^2BdGc9SQ6w2W&^<`Nq?BKYoVk z#^70r&g98{FYsm0g5J64*ppi3T=!&xR`R4s37xGzD}6H298!S4w6Z64CLJAh*Rkcw zVJToa7C9pxEx;-Q!217SAS)x0cM)2q#IdBsSfxaIudwPT6RopFx@PpU)jTO$#Br*7 zL8w(W?`1yE$UoGeuOd`8>z+(RHpXFlD0(AjhnzC?LNjZGXKQ*gogIg{X}hL8WY<#c z#Q3DOMw57Zbzw&hk0;q5j;t5yV(XGS2jdUyV{Y$tSvmPbf69A{0Fv~7L zb^0C7p6Vk398osrsrkUM7ek}MlZmj~+HESDWUv$QT#T+I0i?%Toem%bWd?~$f&Ro9 zZt6oeqfAbKWsD>*D}}2K@7iosbbmL<8C9_%g#L3%HYM9t6n~>EaY)g%n2$zIOcs0% zTodB#Xi(mh;)0Sffi~>x>H)Xwf}1kgV)OvXtzxS!MV)~ZE(QWVZ_8?7EHr0V)B>k4 z4`%v?eTD=d42IunAddXunniha32%A+6t^R(3f0YetWEu$M6IS% zpUADOEAaNzXQ#F?#v_=!AMlVBL-hg-xrl?-WSVFRAr*=UQge$^$n@mt)?Jp0L?KAgPHvl$v0!X;n9d(=P!KsRNHw6+X}cXDdN<&24MA@ z#0=gaOj<6fviIBl;@Vo~abZ^=uoM3W+l%kN;!d`6=0Ox2WRg$4vo4A@LkFXs*TrJgJ*A`bpK#IO%jQsIvpL~o-Rc8< zWQ%!g3QRn5XG@fGC!-uytxw%bPBrlVILYgHf6gV^ZBfQ`A-UFeMt?_Az=5tvVgJRF z>J}QVHQ@owtVg-+((W2iZX*wLeJRs_1my&mFXSdF&BeAvxcLWH55Q0RxX=*c;Ec79tejOPB- z*5mi}zhfcX(eJq9whcj>a3RWFeP_KRJPJg4wAN6|*C4*sVOvq;7*c`lSfe0S+(&n! z783apyB{NRj*|)2Gg2of*_mxGQh1_*uB=MO&g;V?$>~i=mDT}{{?JC@j4G{4Yh?c$ zHC9%Y2-aN)VBc9Kw`d-VfsM{>!F@a57xj}`pvyojR6jd<#47bU%1d%qmB{jFgH=@} zvc%MdGp{OW*G*9gWgA)UL#j6LF`q zSg+}3r}gA~uk6Kc3xU-;eGN`^8RRUTOYok#~? zP?xRp=|oV^zF}E31OfM2m5LBW5bAnb2b;^}v4{0js^)1C+zG|fsEyL&Xj>kPEZ24f z<2u3r%opvD;Ip$Z5cNXz-&`G<0tOxitmSEKE>H-|7p=$WBwGutMVml&oU_iSwdr_# zIt-@%#k!)R2P<-GFA~>AzsN%~!aYtXd*J3XHblRyt|D|dhiSlq=tcJXxC5G-T z?qP*uE~n&>O+`nc7c?U6qLTiY?fQGu(Veq7E_Z^%r1|w1CNt1(3NFGfYc%jH;bJFW z&a+YdH2momeUJODG^cL_%RI&j?@1Wi`GCl49#*?toUaL+=Hy5ZyG}KL#>rnKXOzo! zJ+N=qrE)O<99vz%#|~%XQgLV%IO5y392gCP-HIx^#KiZf8?ov*oH%~{9(Yrzz43gv zt*IV#$Ke(*PQ#0?2LkJV!=ANhca%k(rjxdz*})$flW;fcv~VHVjBhvfzo%~wGPFrz zVqQ<;hV6h1BgW=nJ4#NXOya2B7jSqaR8n>WpvSh_(>IDFoNmu(eV;*QM1M=3Nff#M zb^gs;=`*6pqr9_P*)zJ>p<`7ZpT|0EpNh!aQ$+yd#AD%DWmLzmbI3A5#zRd~EPf^t zCQlJ#Rvopt_u`(zYE&@8e-I2st(wZAt#L5G4e(}abx|ts{!HwUz-7buYpH)GkrbkP z-r@Z@AoBY8ZYlQGO#u*E3P`d*e9PLD0$Cr4PE}jI#vQLKK)`2PcQi~CTSlTMM&dNK z(<+2vH!t1#?)BI7k*LtwaJaSE@=E#M!eLoZm)hMn)uy6<7%CB+LcMUySLbqV`kCT> z=*I9a0lyT*aVCLsaU^hjh4?ZOulR*#Hk!wLh)P$Kb#BOy-Z^2}QXpdBRb{j#9(g_h z9LAu>u04~so)d6VVqg{`x4{?f^%xotnz|Sn$y9ZzI8Kl$EMp8jk9Ga16|6jy_Ts7o zb|dPX9D%vL`CxkdHg0Rm^Vf$_yB!GxdFbk^RkjffIb#Ys+H520a}csO*j<$(s5XlF z>P1Fmkx{O8<91J(>kI3%wxz*#Arw!sZ`t-UItTp5=nwQPm7M^MPMZ5fy06}yMC+b? zdRMUAL#6EroeK=MJ)0IeSq)2PEK5~nrC&CNFYMj zYM)I@3)fmc`M&lK1_s#8vJ=+{x78Kk9uk~9H-viiNLBDq5S_)8#9CA;Lk+K0KOY_& zJ8f+N_T}Dwj?Ye^m?2wsXTH`IBQ)${TX;tmW`vd@l%6Pz?)11H_5*>_KVow|6ctmf zIC#iNy3mz7bUp^@C=aYwP_-;~K4(+P=uJL!F2rBq@cIx3SD*kJ!Q-|721)NJmHZkmnN?sI5W0l_af)zxVv z329WLul}=EtJ1_XcGlrwG=Dgm|MUqsdr)I>xP; zeqD-go|t0eLbW82hjth3vdR^P@!&Ws?D^fF39hJED@JQ5ZUt88dAMye++qO1>JCS! zLZ47Xb}fESDr5@+4)H-%`sTF60Z0)!ln0hn!K2x5NfV~5$>h{`MdkV?;VN##D095u zd@w{}_-!rnaP4?IKEMArwyxS7$%y*(jk53s+lbbWBH6jC*0G^s^=vBR?XTix@?MNt zJFfQFthWMe_2+CmR*5UCCWcTTR{!3OdhSOxl)c@%$F4AiRf7pDtbW7xVtBZkp$iOe((0?O`uA3$Ph@yj zc_2|}pAN$8K%#@pT{QTEd6jZ~+xKHDayfL_z=o{mK%!^-aA3!&Zyp0-4F$dnMy5_b zXPvx79E$3L{)h(f`p6sP%x)6Sg9sR-V=XKncfMtqxj<=71i24hg#gH-xmS-c80aJp_Y(6l7ta)*Rk1WJ+ zo^j9cQ!0zR^t*`3E(R{imllE#!Q_p=E=3LkJNNQ9BN*+?4V}$)IRH-N&i2}D6g4GZ_qM2UezwP! zRI#s@fa=Q!itH>8L+SdyqWXxZU^@84=hJp7xeWv zai4KVb$Z94s2H^WIBx00X%pFEVh0OZLMCFdO3@lgiXdRl&L`9 zQPnsmFv@NXYOr2gJ0)!k{P8K$~Y}am(Q;FvE74n6eAOKs}}m2`0Os?z>dY8 zXdnRkW7^Pj+MS?|ClqWee0_0fiFqakOS zj?SNP++y;k|D8JGHH?6O29nh9Q}>w zw7a_#1hz6)kh&SoG$Iq7u(c@N>84`0g>_S#tw$lZD25$;PBeTR-D=q(Z7AQhSU9Ec zM$cW_XtUT~ml)j>z&_5d_!-+m{CY&9Z`a=;k8IcwQV*?`hlje41jD!~Rm_XuG?v&N4HSRxl z@Od44lS#!N@w{43;=S+MAr-8C_S04$?Qq#e|4OB@mtYF;QKeFN2C&uuH;4w0W7nou zhfg9g?^Llt43@i8%-?TafNtF?*5?M0J?aB&lm{TNf#+|1n5W(>6yK5`K)r37Wq{*;RdKIFqvZKou7JD|Ri$ z-}S1NZ9!R+qlo$Jy2dYSJzZJar6cT@p$FeZEBd2N|V76~_<^l0y@1X|3j<;}oz^(%HS zpu<(MQEtWJK%Rj`G1^!$2#u%{?L;0oSR$+OFs9MVlJf}XFLwhJma0tKUUam;OTN`4 zn&9S@gcuw|B{hloP(&mSMU4muT`AXUE2~LV$8DP~mMej02e8?vN*OU0twI^u`u~mo ze`Sr1N^IOv|HuNitCLmLq$xh;f#D{`@R3(tBQkC~W;H5zegJigTH(M(?ov=Y7|@%2 zRn)0mV+%ob57ng2;WN)#y?#Xr7#2~*VGkIF7Bz`yMKmq&&1bD@^4TvPur?uz5{Y{# z>x@>Co3SpHa;v;5DsL{p`JtEdhvYwe3WLku!y(};uMQAoUygrw9 z0AjKSn!PaJbJ0ffJB?OxF$;!mKC;}*oke*~S;zcsYeAjESt*2kT}_>4#1@N9{g-rj zFWXYU8)ZXZj_(xPC59FUtGqP}bNC5Yk-HiFAbbJggw~Wo%<9h>w*#d{76QVzbwOPH z4jcC!VZklSHezr_(Yrn=j^wU#vo<`z#V2O1_i99m>+^1=0>|N3m8}58L~jfJ1ys{) zC$Mj8LajJZWOy9UG;zWBvS1=A~OsHQ-BqZmqR-t03VjgAd}Y&8KFa^UjyLZUaMI(9ICa=*8_0Mm?k)6I=8- z$QIYCLMWUhrR_E)5qhBMfO?gH|`SUJgy z#z;UOo=9hXUMS%5&I*yk^RfF8n~L%K7W4d9c2OV((b2H!zyRi8z%E5C-XeSb*(;JL zw%}6HE=Mw{t|E~al+5+juV{|5{$ZSR!R^fn6TG?ERTaaZJ?;qJSqdD%Z`rlzncJ$t zA}W;~`>{8!qi2{g(R#hi(hLe90Xw9|hA$APHlX*Pcam$0lv@@dlUFMEzSe-w^bCk!6e_;+xE@7k)Xl?? zAn1sG=0;DA{eNV=Pf*=QmgjpGT4+SZJYz7FuZWJ}1BDsygNoEPlV6nI})?FHh#l^Vho6cl+flIqQyUaP>}aF&sZ4 z*++`1h$aRlSoDXNH>;7WR4Hbw>r)$#W`3Q85t~x8oU^3U>KNUmQ=ICJj@a#6#MG5S4&Sbr}jegFrYTlVlZ-6c{`Hop9R)EwUadwUeqLEPR0 z{S3SN$M!f7VbZ^HhaWo(hz&Q~oZv}(6XIa_4*N8)#JU=@r^Gir6I^Uo?785kvr5G?RAj{nM?=*_j)25l62eA*TvV3AeO|g<8`g;NsP&kzn%t?4qsPLoOoRX>h=OD zM#V`rC0qr2nw?Vh46ITl`YZtBT~oqdINo}0kHRff1}JByhdnM-Rs6vCfBr4vA*+5} z#II1~?nzb?5PA^}U$@#o^t-qj*TvAa!+QbT`551)Mh700VSs0(S$O6BbR( zKMWJxqrUM3Q8(}DafHMZ_w`8VOIwKXvtfE<^hK3iX>Ya%>IMD)N?%eh*x-7k-sM1m z(BQ+iqN1_!ttFVg)hL3Ag5(6vW!>wlg&f?$ZD?k5el2l%9-cB9_{44Yi|v@o9we8p zTGAx?H2NMKibD%v50Mqx-C&n)`zIBKI-QLTgX-FL0BS{oJ-sFC_}@~MOD5~qB% zI>?VEFEv$UXOwY%tza}XHd|e_Xo&-oyxLYT5CbHIvL=BbguM$%q5xZ9YpoWE>;B_X z-&~;B<{#hkfky*zvpVCsou$FzZo^R&zRbr^QX>yVF>Xx0 zt(M}|4K$(u@c=u5YpYEKCYO>FcUryo6}Jc|qGuFx12!8?NXyt`b1~r@xJ&|k)qMQW zNTyQXjqIM9qBq*G`_Vxk;oTv{7+wR05{prS!h^l#K>$fP(0xByiat{)w&iHo#PPJE z=`0HX*hN;O^mN6KY)#*a`1`gV#sNtzn3U6&$S5+KvWy+N14fSSZ+1G^-H#zlWLV=L47Jp*rkTq zvom$)4kH7u`m7MptO2#ZRR%8Y;nGkQZJS0hsK<_2uSxy`fz*62oeP()SgpEiBk)n( z_tJ)diz}LHJ@a``Q-4o82kL#7E0a@DZN7N5%UV^BIMpQW3=}Ylb|Wpze=luD?fnR~ zi0B>Iwzv;cmzrhmsQJ3%xsCI@5S7lkc~MdQY76(P^pN@;M*~z+BjOknKVOZoe1m$X zG*DDEYXGVo&S8Y=`WkkQ;jt`O}1F=U~*@+RK!FGxq z1iR|*!YHb{y{O8W3&+Y;Pf))85?3{K=htll4#yx$WA4R&5c71cNSsuaAnN`xaf%KD zVS=$;wkOeVu7A{b*QbFpw)Z%{q7Z!+m5Hi-!=6WFK7{9VdLgj#S2z24w z*ICp=964zf@scZIyjdx5S<_{!ipSI6wd(Kd9ExCLPG)L^jt9QB_WOyvJXnv_sTG23 ztJlw2(?qW|sXm6&to9ZmgM$pRWo!L@qSylV!}>jA^I)Om1!-%8vrUk^wRL?zZB4Ak z6s^j-zn@lQqjQ#?Hw4u*)D{R6ZUVNp1Z5Wknae2BhGN9NyICm;8d1QH$1@QjY$}j( zt`xJ7n2fEzw3%q_vP7FzEp$?T*Pc_YYh2&vRg2x4^?xtm;tFW@_3VOWThR0QOJ^_J zq97s8_mkj(nmZFrehR(79l*pc2VQQNb=Ot`89aWFO9~vUF5(kClZX%TNg#jevTf++ zj1U^OskU5qKxI4D3n4oSl~L(ZXosxbh%&Jo~EBzzF#7S_{tteI~ZzT3j2Yb zbJ!%N&>PEU2+s8&R&^2{4fEw6t1UUOO1_013LkPGLrPC#U>#c6Wtn8TLk6 z%w*!j_vH;SoxPwmU?-zm(ZiaaQuFLK!bm-SN}Qw$bs{k{=Uk-#z{=zJXH^2tJ$k^Z zuMavR84|rZWI_IDgmmM%s4VrC06)-GM-!0T+Y-pWQ`KMfEw3$eJqWa4vn~qL~$Q?E#46?o7#5I35z8V?L zl1J{GU|{nMzVk6B=WdN68o#GE$me%lA?~Z9oAE(sQS`fHyS7;F5bK@?0iRWmq!FX5 zD7%f_%W{Ap?qQ#_6(RiIC$_3OsCl@=1;TzuLLJ3buKY26Xd9aP6s#PI+>EMY>H5!l z-+n_5S9|{@g&$r0_HPrnx%0+1$K7Z3Prv<}qz_;_;PV5{|J}p* ziszl#*;l=ZVJvhW37b1$dJv=OjO8q8WG?yNoSmiJz68;a^p^d)Ya(x zYbbDQJ|0zJ9$bE7Cu+2a5p1|A&B+>(=sB)Hr)ttVYCdP#%U`i1(etgXWsbCf%q62> zRW-WQp@XZg(IJaC@S%c!V;v0yW>9UTPI-^Hc^UY^OE78d04a>d|GpnDY&D0ju#K#@-h zYdRML&U@IE`SSM)ADeqBbSF2mdIlISRxU|hP=9zqq1`So2~v>y3GhH=II8#BQgi?> zg_}&kKuPCr{deH`W+@ycm(o7c^n*fdju zW|ul`N8d>ChHWQm@BPFXUfb2$7$uk8o*oINZx3t4^rld#+C29Ii08JmM`a+KqR{Vw z1`4vM=K@)ekLVGw4z&zlIc-m((GMq*WbA2t0Mdf7ojenGBy-Tx^Z1AjZe5~kaXqp( zMRkOwNo?+sH$_Th&*h3?N8_~@og}C3u{SjpADPO4LHCR!%+ku#gwn~Ab80_-2h-vDxaGY(?UF#E)h^d#1x-%L`H7-nXM98U*P+1YEMZ7u z23=q?ULGN5e4blQY-vRQTh>VL4g=z>J(^U=k> z-v83>#q$mJ@B8X5#T;RwFQ|y@2zP8TFyS(i_)QhD1UUose90fL$K=}Q!AHQoAT_M|#t?N0L_bj)) zH+7Jt<8^@aaL#2vRKFfB$^|U@K{*YNzm=BYB(Z+fFs(OfI8MH$^RWcKIBKWj4T&&Ya~42l&O=bGw?x(K zQGUOxRPk+sm?Z<6s<$*2ts-=4^H%+qPCs_&el*8q`V?LSTZFT!Rc}c_10SjZVwc9H zE(Vycg2{2aNu>i*tgx2Ave@q_M_b=Y8^m?yB^MxWb34yRrBOI0LjDM#r|RYO@O+wG zQQp!&!ey;HX4eg(3o1bY+yX4Bb$0l z$Dl7<6s1f7c9b!L7tw5^SD(>rBui|YRY&BiaBKd#7>Zje&ByaNZs+&(%sxxlgJ|LV z$Iop+z_Kl<$C)P9 z4&h10G37)*9s}XPpRxnhyhSQ>d#q>7Hc=QvtHl1g0_2H?HAWnkr%}dU~@^+#aw*)`}a`f%A`YKPqZpYphRVtR4 zGoQ!b*77IFlR`(Gcw1yjBosIR7A1!&thEvgrJN;$qBzK3S0!gLbh{5W)i7TT~ ziaS~*h%gN#)!|BC9bKa3K`g;WRTH%_>B?Dcyr94(Q0v=iTR<9EuR&ymNo&$$4??q+ zw?*!a~ErvNj1ioa5Gae&5>x*D2Iyfg! z#Oav6@gmY2n~B2fBxhxAXXDkAui!T~7j-d6qwc)k2-N7(f_oamE3dp__oKTGx3XG@ z5AWxPn-w6j|9~1u!>iS@Y{GdDb=pzABFOeoWp*rT<7br)!sE4x z!kpE+V<&1wk8Z7th~X2xB72beQg{laQz-Jfe?o4)}+ce@LO!r z0&sKY&m!3X&aa)RS zoO9K*iwBqED{(hL0!aE#0fKN{7qc2*olcNv29jfUnZ@<^f`~FmY;PN?Nu)Qw9Byig z&|&*Ni;f|!wr#c78r})eW0M20DZ6@O**RCS7aa}yU>@poWyB5K*Y7tyY?(c()qyhF z>(X_T>60#qpMTVaawWXARWsv4XUA2N!Q-MgIq?IH zqupItk|ov2s5{b+UDyv2*_}swo&7+^AzRk4RYV=4u3KfmYU(V z&^RtRrvPAA2FSH3KWl68?XZ)@eR4hC+PR^T?OJb8k2A-cddtBN0dYGfDvj{BF3+?b;glAt z?FNE)KjTO615r@>@7@mw!)i2s?(G~BA zPVuY9y~pn>m1^v#rB$geRwq~!a#Hj9 zz!3K>w(lmj9z(~Y{oNVh3WTxe#M2=0 z)!=UIHlp1E*_pS3K!#po)rvwDHVwCechcU#WO7HuV@Hh!`)oYMExFm0=D8VY(`wz& z+R~xgz>F@l1{7!#uo?-WoyxK zAG_7o16$XS&*Lr()*JCbD@ZcB87SOJ`A4x6Q<+Eix1E@H#BT%?VRPBlJiJnAdjX<^ zrvIU4FuO>+t@v%;MP)3O-|&i8~~GrpMXEl<8HiXu&eFGZJO27nZ|p6Ryi-9+lRVY=M9 zLT{N%DKO8B-UqjS&Bd8)ZNd!s6N&{rbDX0me^;9talc@q+!7TsRC(5e1yEfK4><%* z2l0YwDKRDE1#jnZYl#=q$KiGmh?3s1cht@KbMK~Ii12Kd4=h=E7*TgdO(vY^vCL*y zj2d<#OxwEyatjw&E2y=rn{Z1mstB7zcE+KYK75!U5-X|8OP8_A)Z;L;KlN_fB0l}X zrd8d8Fc#TuW;A5vQJj45O26gi({A!dGU2o!=2V9hoh$+B8g)K*;CtnncS(fZkJ-yH z+J*8bgJX1-%N7I6YcAH}0Msipk64!ouL6X8$WNkW;hl9?+Di1mHIr7WF(qyzXKMj5 zSgG}xA?Lrpv<*`iw;ZkKycN-*0A#BYz17cu(g;=dOwshPOITR-oy zhtYe-)_)nbeYLr{wSO{S44~e=?*6CO!j|rmMQvs)8h&T#} zTm&b1x>u;{4EN|^t5gB{01^j8fyc77Y89YH{+-nbzwGoMT5X;7h!HL%n7{4Wb0TTEOD@=D4hacTc}>D>2F*zZtua7%8Y!nnTR8rpN3i4hiuI zS~$u!)Z?AIwiyVX@sNAl@q${hU^{_}#{p;Q?Fv$fJNurhP4;-+hlkNa^ABxbEnH0G zfk+;y!xYi&K=Z<`*EiUUw)}Ab$7T?EwL?{fQNd;Sew;GuY}ozy;K3-Q(WfW6V+iK8Qyg7CE`yPZs%@W^sX4kZuTrlFnpZz zIU&*HVp+J$=&+yIpPhP6t^r-qIj1bCeq%sbXBVRW*!G`TQ9s|r^l>OqLSLfLfu;Af zbGf`648{dc8|qX{Ly|Vww2B7Y8^mT_HY4mOk+-6^+gm@ixp;15i<}P#mt4hb_f(Q3 z=l4~@I&~58g5LbP<6%tR(_Z9UsGHUQ*dAyE1-84T0C@)?7M`jC>6R9@60@14GknF> zK;$CSv!-X?{cJrTB>z#{P^+*EKq;<~!TgLD*^#%^5UnsGOW~D8h9kP%jV=)a@WDwA z5H%F3g`L(O#xKd>H?he3f=G0q0ee(lqbc!fx=Q}S4uoIC!F;TtU%u3Ahx!8C=gEWl zM86YP)Sl|`79ZkSEQjRg@w_|~NrGOeY#2vHOvK*T+T! zRb4`h3vqXJkq2TyzqWT+QL}(|hi^g~tn{J4(NnhLOi5W;7Zp4coJF zBiBq3j3GYrIc||SjH@DqM?(7#bh*Mi(s{@ag5miGR*16}zp|qrB#Ic>{!=@q$L!LC z9sfY&i3bxAP(IKKh3j=M-IGzR{2cmX2#x3M)#}BsA3av{feuiOHYT)I=({T!2X393e(}W@Rv!r7oFGS6Q@rf`KWKqlKG2QR zR~|OWEc@2Lf9ub=Ad|dQ$PksYVEHK8`|HGKb$*aGne%?D@tiGPf%jO?gmnuQH~GX& zuMj0AMpRVy+>Br&h5{E{_lK<%FXvn`HvU1{k=hzN6KjkQ#Q7K|% zx2c>h&!zLHep8=|ZqUa#7*dNF;Nlw-6RiaWJrm0-4_ zXRM_$%dAhdiFXPd$*FPypEavbv_7!)U;R#7QJ7cC|u4K{QWPYb&ZDOcZib z8>&yc4|!!dPhDD3Qq!%Uxf$TNO0#ytBw|5^FhxVb{$SLmV_0WgmJ-jsY~Hi=ViEtC zJ2r8EzWr01Q)#~o3+H1HtrM`S_o5jlEp}hcuy-5Aq?WLIwh*PG=tLH^8t~Gy2m0AF z%-d2RD34A^a#^Dz5RMC>R-!Y=f8AE&g@oI;ZB6x@hh zZ0mQfSa>?^s6Doreh%#hrdJzUZBNe~NS^IsU~`_8j{H)K3{lIYSljstJBTH~cf0LM zo5$+qEZ5;NI}B8M#2j^MGM+wD#}hcP*z-W>5`4gm`2OTN^bm#C zp!24MT;n*epf6}hgx1eaakL>()_EK+{g_Ex`uWpWU$v?R9kqCqlsA{^2GKkz8r;0P zCa`daFQk=$1?GS^+!wA+?^u-hy`9^gx`F4wpH3}3V2;~FM@v__`*gSJoeM4m!Fx_f zjpNUo7m)1q=_>1t?i=p94`EjTN$FCVw(f?sdvpw2A)4cxP*jtofB&5g#c+rs<7w$L>|k=7VfFfD-$?MSF!Ip+ccwg3c{LJhw4x-IM3-I;79rf38v z1R_&99Z`|0yuNclwM2n{l;-ip-_Y^Rf8noDl^z5_;W^o)Mk;A;-3koJFNj6nEF-Q56?u4!kpog2(#@?W7*JmWJ(= zAp1sd*%?vORkZsYeieGoi2RgYB(fCt2{jz+M?1o*I;Ga`LA4a;LsTdP)R zC9uZS^TZ7@w$uwXFk^`}sUW+^CvOP=zO=|!Vy$O%jw8824S^h<^EP)Tky2Bm<ZLSRMAEDrS| zzUhe&zz^k5>}eo#lL0?dMXdOH>TxSDB6i=sWh=C8^7^gpaJLm(Sbrc{E*Z5sm3_7TrP{cT#W~zw`h!X z2d=MDzjw4+l@K6UT8~xhd9dd$K~ti>=Z41cxiM`E4F$AbR@W#h^l&a1Nn5?DhV5dL zfTS(1rCf6YtqLEeRbWs%A%V&1g`r~&kXKD^pKj>7YMn6-{{iVxm%8ZSU?R{#x3DK( zUO}BXIa^V4mNhomP_*Xqgyh3YjoKRBVZ!6b5|LSbj}G73`X`%;q4vY^alI3mSKsZn z8A0cA!M!f9O`82VA@J}2z0Irk7Z{1&Q*A^cXyc;xm~-|E@zPTO&Z26O#|J%JqxRPc zu2p2k<6w)LuHdlcmxJ)!Nw%VfSun=%5lOzg5wFLjW30z9gTuH`gsf~c zYQisjWa8}rd7YppI{~sa*}&|oVhNP}Lbj)h2qlr3lxGW%?2_#ZV~oQ5M2qq8$y`x1 zZwE0R5AT`7yWvMJHk7oT_8v8aU=DV)~vCe42v230PSvJ*{-*3O)_lX_~n zZKs+Ng|Z29V-whHj04arnnVU|a;CP*facl~{Ot*cqJFdUgaK;`L5`c1xb^#*)ifna zbvr8G^J@i~{f)_}Q(5LRLG-8fDkIeqP@7^1#311M(jq)3NzoDAH=x{PN9P=C(%D8x z2gXJmLDX*fro^>0yUW>2ukg+$k%vrydkCf973e~lEX;Jj=#HVc4R8t-qOZxVCAJ4{ z8O135`tK+4Bac!RL2}Zul4i<{Se>42JisH!^}L1Y$cs_A+>@xXn)9rI(`-i1SQ}uc z5|Giu6wr*pZ^lHqUYnINuNn8V<(`_cFbC|uo_lVvMi=6voWs{A`YaN+g%d9N9D^{H z0zrI}e8+Hxds*$^V{Jv9V)ennj$S-)Kpo_c=xgOAmOMY!%U_b1n9%e_e952nZT-wP zW75V((V=d~YMSgH;vnz09eotk%}LAQ`Z|3(f*9$0juRZ&_M%zuL<&h_57qma#2owT zftx#sJ<p9V! z2zi*5aI#sX>tW;IG%(6+7B#wak4vUAHO*iMoN_DF;Z^P_Y`m4x$+e!LL91$x)1mja zA5u_GweZ1Rf+RZX6A6>ms-ldHQi|z1Rs6`=!}{p38T#iw*c1p7RvcM52(<_UJTlzb zr&VCHZx<{lWC)bTp_q?8{KI(03fNAy+ge6C3S|aZ*HtGf?+&odH|~%VtRRH4zVRt5 z20~Zi`@nuEdThb^=%pA!vSJ*zWnqSK zk*%oYYOIXaz`-vXYpT_aza$`dcy%G4}v*+po+iXso{iXA^9dk(bDp-*n!6H7~ zAK7lKjqrA3ZS2J~!csldV`Jkb+gC@}*&=Iv6a!`NM7Sc@n6&wo^Rdu|h4f2-9R{$j zX?qeAavK_WDonZj{g$iiU)r-6UE65Uo(D#a9%+!=E_fb9=|xy!JDbLY@8glPiNZK) z{jz`bY^?Dla`Tvc$Id2N@`OOg&nEIi#MWyk^z3(3J1JlzLw4${XgB8zGE>X!(|4^R zTAt-Ft&Hc>n2uJd%JA(zt3E4+^g4_i-PXiw$k%EE@c7oxP>!qBAR5Mcb^ZRU))Y1H zQNqw9Ua)+JnT!svbeyr=S#1Xm9%a+h_2$p&@WbaKS<(B2yE71n+vy54kgs`Q$nJRE z`v-REFRh@8&e9YG=hDBG)DJ~x$#l&rs1!f==SsKtap5VZZYt*9W#4dQu)))UPXJn* zDc5va=j>UjQt?aS;C-nXm=kuRqyceuW1Rt1R@p9aZ}!w1aWH_YF<_YX%$yg<@M9H8l6`D&;Mp?@mz!s*JI{5r&fta zxe-M-3B=2Q?nQ2x zkK%a}um>?BE(UBQk5#hs_n5RCMy(zqtUQTl?11fQd@E!}E{i7ctUw?nb5`ql%$Ey! z>_xQeMe1nh61`k$?yw{0bktzQIqvF%qvu2&ZX0FC&S|}0W6!yZkVMVgCjG=Yk=c39 z;*+X{P^HNFv{L~AVZelCQLh&rVnsaPV%1beC5#*4Ezc$LocYL>TCJKXgnXUgu~-n2=$W+4zq6)ui7(7XZ;_y5;I5=uE$2j3T=<;_`IWVn$A;B3 zw2_N>V7Xzxz}Y2$18!Q|c`j{4?|j#~qLD{-1G#P?2b=Cq3IdThLvjUKQPXu3=tD8| zK^8?RU{KG^bjI~MJbIT^LIg045Q|r)Vp=aOan8IMb(SJ+jM}V#4|Bhsi@8kE{pRBZ zsqv)U(+lzdcEI~FuUNyfQWj!93S6^>T}izd4Ly*%Jy2cf*i8YKqKM?vIP}EJ-V(^K z=*2gW9$PI>u^XFv-uSf`^(7YidNjk94$swd-z1@*hbjL(Q?;ofT_qRHc6?5^nBCFy zq? zxC_Hv*{qPrI(|50a{@;ROK)BUh=X7dHJTEV`}=xymTN)P2NBT^+oC!_(uCOrM^<14 zlTUt~_!v+k@p93-MV|!%9IOO#&Lz?7YRqRlX8!~yQfJxL-`aY#8S5XgjQ~!vUL++^ zaC>XXwxgq_erY;)qHG)w?cEpyRKC}u?Gp9t00wQS-r|R3AM9t_egHyD z&T=3O+}{l!aY4SwXFODg19ve^exf=aN_E7ZYM6-;p~Gk85fBr~p2y1sv}!K|%<~xA z`B>p@WS)W*+L803m4OlX3;H{HUL#>+cP|h^$(~Pym|Z8cFUBV4ze&F30D<|IA|Jly+w!|KnA(=AL{liG2a#hs-kpmM0Vx5~P;2GlbyD9;79 z&1mfM>LKk1$)?>p%WYj=iCDwBRGo-IP@Uj#7vP&00;uPhwc`1-@o-x9yNSk7VBSpT zR2{jc^NG}Rmu*~CBORAbm8T?G9l=6sh$(Fx#21|qhm`bI8ZS0DHYcob7sPNm2ITtd zd+|Jsn)JS^vHtXXCu?dzWp=XS5pr+%}05pn%UMr-Liui8u{M}u8DdaWzNMM#@Mh*LG7jj zL@5wnd^!AAuzd54?O9aCzzFwLy^t=3mlsiz0>{_|EykNf-n$^S5&M0PvI~jAu+tuP zdb^O;2|VhMZ|}!1q*d|hdv-!?dQ;M?lX@v8(d8*M$A#+_Q^r5%f@ms4g6y`n7sMeJwtmXl%@* zqw&6=eGE<=o-3hKi2hv5>I|S2V;HXEc56<;p#%nmfx2O(07w{8>QyV48Pz;`D1&asqOFx!Y)l(iqT^O38zG2q&v18fN?T*ALbR|;JVsT zTa6L5ov}5c-9JL)o^>^GeYo$N8y9qGM!6VzKMbCQ-z2?4INR|}FhQRnW*EW}0rsol zt~&HsW6)xJ3XUd6ki$^jVm8V-sAmL@#KP3TG7oe+h#2i400R55x`uD^#dH`Gg%Sr& z;E6yW(k17x`f{H176xpHCS&@~daR4*F)g0ok;ChS%3wXA1-qy<#6g3Kg{wT0$%{G* zul4tOKruUdQEM0x79wVlKXx(E5;9m62p1DIk8J;)ozTx5?LBt#qWDV>?FNyZir2jj zLWTaZWvYn{73bhZ9jsUYIBDt)J&p^(s{)heDaHlV>WgU?7{6{c7qxS+%c3|41RFfK zx@SbFi&8!jX3p_~)OFCB0!O#UZc$(3_^LWB29T7-&X+x5gfd+9elFmtsmf`Uy5GpuUg6El+9C z%^%rx%p!q|8H~+D2Pqe;Q^f4cZ|N3(N&Pq%_~rUNi`cwIah>mWPyHb|MpdWhyZvkk z3js7OC6C0u7(d{1!N!gd@qy6Z`l&}`T8h$Z9&f*qTlsQ~qYt-=6+OcZqXk#j9d2nE z^T0|-guNa(l9(3ObfXLa%g0SMBIu}X$7GQ}hs#||1iRK2$;5YK++`nW*rUBzIjC|< zPPGp&roFqo%EfBz2TbgYM(t7bdxo9e-`m+Obw` zOW-f(Jy!kkXzZC0lF03Z8jzP})K2OdlSPP6&qUzHbG7x~aEeKsUeTHe_%`9|D$4+1 zeX0WbS#}fG{ababDEbb!UW5cS@k&xy_j;|3>d6E!F`$7l@dMCYcuoSuEzt&*Ir*xr zDPG+K*0;mxERvqt2JQdL$QU599WP6ooM4dDY0^R54e_o31=y@}+b|Yy7X7 z*>&rVj>tygx1>VgT40Y!ZXm_hw3(z-#WvKc4G}wXry##(l{7^IV|Q#^-DJ>$O|_=o z=yfiFt!WSQSjK#qnW)otwb5n;IwLRju+0U|Zs+Y9zxgOU=eCvi)Jb-E*zW6hcPJo2 ztzZ)?+gX>zcte85uJk|{e0ar{)Ves1iXgi0KpeQDpI`a(HCv6&3KM*ZwU`lSyrA** zXfi^Yoek9+og|*UZK{0<+1s}2_y%h`8i-srX}>Ex&R};Ly|noW^-$H^TQ+U(`{jwj zqWY1ZIlM`mr4N#K7YcU+vd2-ei4Em2Ai6rwo(P}Oi79)kI>=1%Er|NKmD}?eS~C|n zw&WL@uoU_2f0(st5jV)d??_uBw~q6dFWJ#Hoq8mS8gs?Ov9{!l{D=R`pB--#1%=-> zpYcRn`uZ$Xbh~!4EzwiPMt4dDSuvTGWNlhl?qr?>lfSZxsNm_S2!AOIdsY=`OZ@}u)X&)&H<{|vT;RRLUY^Vyx`hTAqPPHAL6s+yaH<%% z4^O0&M?-;$^QnpTDh1A0&!4w(f$=nlfSJ-T;qz=-5aCl8*-YTl+K>EXHYUJLC<#Iv z&@lx-*~p5YS8>7%xr@ego2-&Wrc*Hn}zXRNJ98*H;M`VuZA{M=M0BLhX-jxXDB$#w$6M%;0U z&!DDvhB&!xFS;EFY^7*$P_ZL!k|u zofwEGZNZL5pa4H<(xLWLeV-jA+37R2VGcPOx%&l)+(Jss>@)Zms?Zo-w023{8<>&O z;8G$OoXza$r8u34^`ks6$5c3qO~r9NHn8uVxFnvD=rH)3oV+B;{kUJ5C{A4xktf4o z48Wo|#kXKXt+(tV}KA`HJ3y*#nCb1Nit!z(SM3P zZgm2}%>0$rM`z@}E&7G6DR6OL0=9}4y>jr348Y&Cs^xiKoVDe=0^sh)e;&Z{00PFz~`^VZLB=h4p|cEc9R z&3}RH=wDFqE?&~^$Tp8wgsUrzW_*LD3G8i_V#={Z=c2wG&D%V;04gL~38V|SPhonn z8nrKB_W{Ehg?t1L<+C0$S@6ie$(U^;#(1u1oBDuq%-~aRt7z!Xi0#DiSuM6J+{Q?C ziSM`yU`>#^M9)w?YPWq=g8_zgAH{euBf;4De;F@#w#V@kXCem7*ll5N8C@3W@=1Bh z~caR>AShB7rfeIVCGPaXC@#4V)8CUQX2Ls{j`ElnS!54VFbCK9DP> zAU*XkO3*ajZY?1B|=tQSB^ zp*wXQt_)6jQP_yoy)~Y{-ayDH5LpxAGYC&zla9tZUv`b?1Y%Rk=~q9sZZ#R5V6)?z zCty9UeJ%ztq%VwPNKFQA4q8c-;PY_j(KA+6LpG%WF#v2nu@;>zWecxsRm|qWZ zAXmwN+h@)NYNUejxN!4P5nUII@V)4jGzIr#{`~@$>^&A#b8Fe>l;m#8m1xYW;-HI}ckwkx_w$%wzhyX~ z*b4!~nHD#?oDMm%Z;&)Hm&nESM@MsM-D6JCXUC!rTQ8!NoGv}d;*rsvP@C-ZD|RxM z$or4*U(F>#Y;g5SIv28Bnm*W%MAQtoLhX9)PFQ752c@iXs`Tbc6wfpAvOA>1sJ3eY zmtGv+tv07Kl)IhZXX{iqa|;DO&+DTu1_;nvbb{EHA+-oWZb^zhw!pL!9F=oUbCAip**hH) z{*7USUy2sE2uzTVjC-8GWxZ?%Q!zLaN~BxWv`}>C5)P&ry?Oj|?vAs1^Hi(cQ|9!> zV!+PD=Hq(};N~OnZW(I#HAXJY(9%N8D=d^NW)=k?Ye-H64+22XOBay2Orp@@@JJW_h0Nl703FRH+!s# z@Jn_W-=q&U^OFFE|H`C24ZQpK1Tc)x;(PV>lMxQ0d@i(-MK7-80(b_6ihY=ft_X-D zAL@+qBECiEcw`niB`A%yG!wof0ziA`_h1&P`m6y+&0Nm_o1j_(9_o8 zX(0Fm+ka-+hh;IUi{6mBkbV3^?Q5QnY3sjwOpr?bA|VbR3+2OyI*~oj71u|rR(JpU zf8FEXYE+uJ+?mg5wL%Qh9(o613@-tbRUchlavzSAjjc(=DO`)S#2`|(C2Q3i!e%$! z%?V}(r(}6Ovl|e_LC38PgeXqEq!_p#D2z>@8DIENOB)M1O7S;jgIf&9?cnjCJltfUgL`Yd zWAu`A*30Ud09&>a&A0{Gs(!}d+1BE*snOP>vCNTg*@jx=dN{Z@HLwr9ux{B~TsqsPHZ>YSj-ID0aZ?vIFK+KO}7VKo|>o!V{IJjFx@@>ryW_kf(PG z6?`1sp1=yxjjN*x#EO?epz6krybfs;SzAA&}TMV#gC$|0}C4hMlD0E;Cj7~46iF3PT%bJ!9E9RB>y#q5BTMdXVHvXNh#f%_f z9oj;*9A2E(?-XXPeO@9_S;3w*C7ahixZ546GGfE{0>}N$~T}HBEZ~ zy?CcIikR)8#`*Psnz(KI0ce~^@8Xk?3jy5~i z@lCQ`Y2f;DJbwQAcXlFv{sgn;lO2f^QTl|I^He~?SI48_Wl^`8S*Yj`#V7rsRd#3@ zCa)e>5h7XXNF)#Y&Gc8R!}m^~wi^AOk>73HYCFZY}CO#U8>uGaz2@`$sNtWaF~Ds-^gAxYk7p@r|uZy=5*B;;LNHQV!i`pS}%8z0QK06Aa4ER zxXUwV1C_xMuf?v1AzzB~e zd!Xl@aTIe)ddAdt%9f)OxAx@dWd>F{bUv~S6||O(dR3iq1iG?vO0%& z$h_C>xU2P?^?_E6p8HWETdh#Qij13holws%z)=ctoQO>-Xl~3~iyGhoju&dHI`oU5 z^QXD!1d*@h^TA^Kg~GSp&H&zb+qwcz7P(VIcV3r!)|>P{I&caxJ8-6hR#cPpy^-$? zMHwP^4nE-u1-m3;Ts0X`x*kvEwV2ZZz$Wf=H63`KWns+(Bs4N#*=!)zfbBo|0p|kc zhBljzCU_axz37h|0%%(AtI{Rz8w*iz28X9fTg=N`$eqR6suWi0fvULq5a-L1+J5)D zr){~62mTkfqTjdvJBM9#JWigBwP@^&J7Uao2}v?G)S$7o!8QfRy~R-4<+Xz(PMG5l z1F~3y4ct;#m9`u23VXz@F1{NgNKEno@lc(5x=i+l{TS3s>zuF9Xg`V{-8gZ$Y&w4+ z^zhHc?^O%-{cbyqDY`Z=h|cDTaP?%r1NQXgmqIdtJ%x+UpT(>oLn35YyxjG$fiDEI z&}Vif5sr%pxr86Nq66^b2|KES3>J?Q;MkQ!j48*cU(b$Ti9@g#m!B(|rUJM8Ar~Up z$pCZuGdmSvI+DwF7H>#Y;FBu?2r4G4REb+=v#zSHB-$qWKC$nrRSiCI*lGd*w)#*- zt@_#e8CmPD1ow+fIf1BGO`=e{13=T2v?Jt*&+HpwOMo0fF5jxYGS3ERIgQix#Yu{u zk9WU@y8B#bR37(TuPczbid_%s>jHz93fsaJE#c8#HgIBT^GU|67*q50h7Ad)^CKxJ zftB={k#uT;P`l#-4wpy(BAZennn890;ugisb=*N=9_>5;8QjfmAjReb|2S8k7Ad6g zaE+E@7_8lk=S$}r?S4$*4_b=e3u(e^IojUz zT)ry-|6k|++E&YmxD3n*a4p97Cv3mJwRN={BI!7imVkMc%Vqx^+l(oze8;v0@fJ6F z+bQ?F^;0*c+Kq9S2+J{GdskxP3J+l?hlf#P5O2nQ+t(ZHhYNll1$tu2*+dQkJv-7rnqx zU>BYb{8I$R&S>wkML4FcD+*y25v~Z>2={?Pfan-!U$SDvvxvD*dGsrz=eqqxAGG%VHnJ7+rm+P#IS&g!10*T<9`a?5u~gauJ{=qr<|$Krx7?S`5FG@u4O+uKc>uLLL3H*Cp!-WltUQ^~8s3~_%dx^_=Tm;}pbN-sx z?wfAm)G4HgZd#YHnsm3zZarh5jDk)OxtjN~*>2JcV{4dKm5F zf;n|9wj)+uSxaNnF$K4fb^Kg^S7;1}_!YrvZEomjw$-SLRutQW zH4VA$JzI~th=55Ofm90Lv8kn!;t?fgW?NVyO@IfD!IRC`cGW7wL=ei01qQ>Mlm+%s zW5zAf_M;-%hf$>qCAJR_cYdHEV6$~Rj(Q&8#SYa|<*V=hJ9`qnmPYT|Q&nOjSWwSY z;NB1RTw`*d8$VQEX!ZI1%Jq9_Y^*b_;|oZ-?MP=Lq{;r?LHNI;dc%AKUjjR(djF9> zJKiZeWvUT^`t|Nar!d>gJg6Tl2%7;Z)tdb>WJqJC7W+T~o{ zs-oYUoU$0MbS7Fy`a)zzoXu4)+nTf5&P1La?N-+*@^rB~r>CFX^+K0e5gcyOXb_D^GGbDX`s#f<0-~FyV6f`p7C3Wt;TJdpQ2>VE_+_Z*RV0y_K zZeh3fI9@#8tsM#ms>NnkAU@GVAZfluj&Lk=pQ>E0OHi5mpsKs=J=dF`95)bRYR;~* zOW8+h{azVyAs8IxM`ADg`g?Fh_$bjH26>ogssQ;Uip}FH$j&00KA{4Hjm}DbR6dT` zDG8cFE}xImf*NL{OSVDTM>-5UQo_HAs87%*P7IHWP^t2wyv(bjb~}*>FjNt)WTJFU zlGO+_lZyGr4=1cv?|8NNm4~A($}!j(b{~-XXx@%cja{)xh-c)fgz7#@+rS_LYSrTy zd{a){`>gRn^8wEp!s~26otgy~w^-LlqS{_p-#Q!89aH2U%Z2E;G+J_gv8WD4iK;ae zwfcy(S_%-TV13Ub8g#kYtx*>YOnsF0lTZJHO@9=}z30C1jr%K`5zOqB3Y!&7PoQOU z`nmEnO1l^?f(2soMZZ{^+kHJx3=zy_L6vT@wZl$EixhA#zvO&IVI zQD9m4HRA`Qf{qE2A*26V4LHr*A8bv%5$$8h*5lcw5gUQg8E9ZrSl@(+-HtxeEo;Me z0#wTcQ@pDt9?{(Pq5vJ}ZF{IjxF&D?6g>CK-^Op`Zsqkz4Uwy(DTs-?jag>&HHI{a zm3yf1oW&AM%i!GTs5`DaRSS`oQD+}( zi6|N+8}E^i6M1F%J0&~%aUu(HMY+)E*vHz1L#A)Xqb8Wy3DwL9lm;_$GTsX8$q?Je zI_9|76fF~wUBUjKDAGZ=#0$BFp1|Mafqquywd zRoYp`$!)Bi0=qued?$;&DYwY+Ev8%DCik7+SwX-mU-?5Teyr>981~?-mm#4#(u)eR z^szRo@|~BML@|)9zZ|hCK^&G<^>pAg)H8|SYrw?g&>cu-KNcy%qOT`X{FTjpoal6% zNVIx-pFn*A-j7DY?gzMjk9WEdQ^6hvD)5j2-tFgc+tgY?4iHQ;jlosoasaV|Gxrgmu79 z8LwG}{R`WT4pV@gy>i))$!zu)_Atsu@LaKdz4Q#a;Q3Ju7OS7+dUT*paW`^)`Ej5_ zXazeA7;ww@p9gN8SgN15r|Rt%v8R&6Qmr@=`s}$rJ&_#jg}O?1|9<{BI|H)5+r5wPn>eMHq zZP#N02VOEt*(!UiLU1PB4h(ULdg!~$_Ex25lwYU}qDe3EE~`;zn2)C@1hp!Icij4= z)kVd=yEp;Xe-gKyb?gy5?>FaxH3j00q|m_>GwQ?0Vgem6)cXs|#gJm<=3^8gizX0Q zl($Y4COOBtqWqHYFHVo$LjUV8xBt`%pQJtOQU`h&r?z5DWDBc$C>ri9fFk-cE5#>h zzG350?>s#Bl%7e$^*1&hh&H%vWJUu@;+NmrY#B5QdM<_v9dHcKeR ztAX$tm%gpV%;7CR$zHr3^={H;BkElwWteTMUN2jjhp^u+4|bgh*KIFKa=5J%v&kODGVV$4MJ{dMk5T^NC-z8{nY&dyzt-&_J_a(bTX%dxJ`P;9 zcDb(Ir&Md?~CAt>W(t+)|IGk0;c;& zmyWk#cuAK{AMFw;bvO%otSeD!-qRZ&?-BuGuELIYqAQL6T)UlAgEkMPWv9BrR@LJc z$_34X5M^Cj3hsdsz7-W}hW3|}voc^`b2nqFQm3FwAwE16rKx6 za{0Qz`ho{0s@IF-31_MCVR1#r>})OS3R41)DAuZ~Uc%9*elb)=PNHVXxb>g;P@RFU z2N1BXuGnGRTI)8Qba!bx8fMZs1Pf~XdHh*XgZP72ZKx|eN<5|wHpMLPFCq9JTPYwA zE6~5|xUh8l2_}0=Z%OO_E1ND)4~AefWlY>J(LeI6szz|DxhQ6}xnaz&Y+jg26hpW7 zq5@IFCsA_{6*}-pans;IqZ~ynHcPCC#rR=FMmk6~g$>rO#IMyPd1`W4P<53dXisJ! zD=N&!T=})CkGb_*6ng>DRS@YES=SJ@eoBXU2*qeUPS&R8@yr?9j?eK+j!4@HoG&H; zj_pRrFe+%K(dudOm2W+sfahUU=|$t^%J+RixXzOBfNqb}=S1Hi?y!OO%?TojKJL;6 z@75P&QB2X-?J&NMZ~0%@lK=&8?)S;30)Y{Cs}?0EprUv9c=FJhFVc-+{h zX?4L*g?RYLry@~qGoFphj(#c<#WHNjj(sZ9LLZCy;~ifE$5oD(8wd7@Perx}=UCP! zKTQPPxAn_OJEdpmlr7V9b`H%&MRb=mnlK=h(SwJN7`TmuijHfoh6x_6m(~<4rAfH5I}%CHTA~w=mIz6-YFbDO zFeZ!coN5zW(g&T7+7h2xXLK5|Agn7o?H~I3udF*pRL&a>^g>Ki^S}tIgJMhv!h0Jk zm$bKEZnsjr^|*njjmO)K(H=zeQ_%-9Fic$n#2Cl%Ou4|-j^XvwP7Pe{b%Gf{MA}9a~U=%;*+XkWx}VP}v`@frOqvz0&6D`)GV8yX&H0OygH_ooP~kE6#+ zm_i)tdEzEB{zT8nf=R>`@L@NJ4WR-|%%DKcj9b{h2sk)~nti4T+xnRu`79Any72zS zj((Qt5K2NUz+L*-XNj14T~L1fGf@OV81O!_6Q3m-?m$T=r;L+=bS2l+kZ-nADkfkb zVn7f4kqLC>IgVWgU!m$Q{$$BiM(OAXS;x=|RDG7{%{>=bLY_XaS~#~}K4&$8?Xn@I zQ>qQr&R^DCglt)01?*1$M$Jbb46(#*ujFL1J30O^G;RdVuFowW=VE2alH)uLX@XSD?Zpq>Qs9f(4df?~$ryO* z^Kbrlm$tD&JvHIK4N5s39r(3$*k<$z{3eH(X4Pf|WGrdfqvk;J6`EJg%nhF*7D7ieeGzs zNaj48CoJDFRU92;0*pa_N(enI&D<>eqGfBR;y6L^ty6veQPMM>&p?02vgo>MbYxENf z0*epR*lauTVK7wvrR{c$eAzYJ-w8?Ko;qMk;W~!xVIV_Z5O&pltsk;9C%H(HN<9(^ ztcx`}h+*7X|U)6ZWKA`*t{3`|aat{N$NvNGCf4u{?`U&^!f02)FoL zU%SB_wNke)glE{EoAn5)a+|_F;G^E71u}-KK8tY7j`oP;&U(&p(ggJ+y6EjCK!hGY z#b4w^Poh}vb(3~7fI13^OF~2wF!&ziA-7Bo2S%o>q9@VmSpPVd8hR3V2d7(`1IJa%R7gay|k=<881KbH*|3Z8_Om7D;`*oMGMb)h&m+ym^J{HO^ zxGXQdoLWq9mR0Qv=6Wmoou!K7?`m{)`V(7=#}83M#0)x#lP)Nx!r9FUZZEdkleXbM zc-^+6OAIjG6MrY#kx!0G;cl##k47g_n3uhHN1)>7)AlgtyVRS zp2@mlrGS+(P~+NWC_ zrD#Mr5T<2W-v=vFTZsZ>&3vKN7-RAocd_M~=Bwd`tp|!FMDrWH+Tz_r9v|$@=nCEf z;M)TJ^N3HO2ClZyH) z=HhW+pU!PN6pCNZ^5vccx(NU5Y1DEiuw>6vvbBtm{JF6DqU%+A5nprCLu*@~sN`kD z&Avo{4f)0vJKC41jVP)hcuX%`aS#$5@6%zzy}^SCv64^pB|^A?koBZ$W=t~?{{%1V zz}Ym5=hLjcioUcw`-!vJC&En;@7)ZvO2CmiatHeMZ@C3kt05wFWSN050*WNKtqq`O zU6$FGb_752FtDmu2`*d&8`JZIOLJNRLSw{;Y8AfBEDUi@AhHfr|09-1IdLhskp-`KkFz>dJ>@*91L`mtB?80TU|HwC}f<0NkDIhzFVj-J2x z;`1NbZv1}k%6Z!pL^drK!99$x;Bi-zLUuo@7P%rk(l9Y1C0@dT3Wj211V&xM4nFzkhXZ|N{A1Uu=Xg|FNm!;TcR z?vbAM+)avvLL$Aajcf3+C?u{yUkY7vT!qe4+lhi$t|S$(lhF*XV>=bkMNIZH)p8LE z`s|8AqR*_Y!75b=9J#<15{ZwE-$uS#9dOak!K&2+tQ-A_%%0C&_a>`#~4Fwq63)5d76YD& zp79w%1T`JQIgd?{&Aj|pC|#VJp4GT3D_^xaRU{HLzt5KkS`KV=uOJm87hN|=^T1S| zc<*<1Ul1lrC0mGrc*c~)cirk-J325eh>ZvAA0>+((@P;dvNUBOOy&`{$=5yXXO zLcH|efqU53|(?TI+~DxzEec$ESE zDl}T9Hc&E~S#`fixfhLO(P{!5nY}P2QxT^eN3PXHMTJDD*PBbV-KM2qL_$)Sq!n(_ zdoz-|WK-K3Bj~QPoO+N2aXzquBCPWzd<-$IOVzFqakN;sz`5&UGN~x2kj!3CMlsrP zH^k0#s6UVp_`*pjgw*7?(mu}pT4+eHAVvjAv4DEmRJ-}nBS4SQD|O!gFUKUV?b6iEM}3K+q#?dv(qvBn++vamHH@C$Yzz}x>P zW-I}?=s`XYAy)fp^5M+>zSXc))|{C&3qjW2(9^wD2 zqQB#BkMd_UD#1o?wQcc1Voc1%)FRV1V!QOSqqf8L1X^v#RPeBiF3V7J&N=Uk>2B_Z z5kUAKpPu9#iupj`y^=fYpt|B9_;Cs_JVwMSg?@}q2>6Sn=tz$)i1C|jd0)+5x>a_K2+EPxJ(RuKIkpHqAPfi7c|~*PS1U&_>XwP-L-2<{D_viuA>NDYf=g1pKD!Zho08TT*QZG`ceuH~bpxVHpNS+gf(@UzK@F2wbUm9>>)R0jm zF~_oAog>U5N?Y8nVR>≉~A>e>on$qVl?nucEOC1Tw15<9`!6xbwF(q;{!0Rfb? z2xFw_)*fSH2iT_g-Odf(q2KsL6l`;#>b7mLK-riaxGfORsbY2*xlUds9;pxRrWoj! zlduV0U4TSvXroa^A||3w6J0D&CB8?+CgP*bZ5*UQQs5q2(j`0NGJ$Q_Ll zvsV$>9#g~V_j}<>a&}w|GfhxyQ}H{;o;#u6NXQUa>Nm!coWZEGg{$I>7NVuK(a!46 z7~YdYIr<|*DU!2u+6QgPT+nIs^8sNTY5E00C}W~}F_sD8tZUaVRi_jctzA}GJ8=^0 zkr7;p>2HjXaaRSA;BuIw=yzc&o~qXa5w;jJc0*r0tgbw;Tsmto-UBu(mkv+2N5Zh# zxpW?WMaB=D} zn;-ZTV6!8JR^ORJdLnGta~`!ft0J^_oa0Tp|Q1EwxhhBG4C>MMg%nd7i0MnrBC*eQH?G0(ZAR+ zRodEz&BE~j#$2M?v#I!OMRdZuec4R9BBCBhOO;?~E2|=rhsr8kYQQbnMDZ z0zBk&vj^$u2ko3H_js=8^U=i`>7FjcXc|2M&qdY4S(9X4!oQv{CYQBtZWZPlQ+&(S zNY4IO)rk2;F&BNq5&AN@ul)5ubTx$9jaUqq43)Y`64aMAggaA0Ww8Hd^`*0j_ZSK} z{U#$o&gR7L-*VT??MqZ$eXGsuOQ(_?SiaHc_vyT)u+#Vm^`)bn@nEi&iI188jX0yG zFP-bm43UlE?>x|bRvRBHJu<)&!LIMC407!(jV3F{ZCSO+^uG?-^6H1Pj4S%|tPP@8 z!)e?agf!A(GUBNs1~J@d%`uvW4Gq>3zk6Yo+V#7Kn0mG;Mnk5^3YS@jW-5b{OV;h1 zqntQ_ZikUkZu8}3biC_2DYmuuj#Dz|!u7cNvgqHaFhPbA%=F$Rp>5dKhjEc*E`e@N|k6sH7x z{xD2wOoajXIMw9&tFW_9#%iWmU#DUqO_*GrsjjT&n{nw$RFn<0K6v9%lAa5!8*s(K z_Bpz4+d>2qXPPc(j3u;KzR4~IT#l+o{rj?&f0QanoR72&sLKu*P{Sof!&B{IG*fGtzWy71fSevM2_y)Welq(H^|xj>A*mcb9>F{*Mo;C z8ivku`%focFWFLk$R0$kWAg$4vQ~FD#QdcHwCt9`#*f*r+E6a&Pv;uG5PRt82cHHA zO?_lQ@jq-)wH7JQYU6hcwtY(inL>UH;%sSuI=*$S>us5SPQN>7%lmcuA?2L(S`lzb zN=qQ=D8ogdt;s&;&+Czx6}Js94NDYzw2GL|!~qaH zrX~?T;&?P6%N9ynZUVO7lzQ_>Xm+As@0rse$rlc9A_2%S7#@vNs+=J*teuGxm*L?_ zq!QkmqRqtqC!7IE4*Tq!2Eoxpk$qktV?!=exzL{;n9_({RD0;qak~_QZui^ya(v8i zw(LrEd>)F;4WO;?cxjkY026T9xYsX-$Y;y;6z&`#Eo3Ln)Z3!lE^9OY9dn2#F0@Z9^v#!xH z13ENq9)SZPX8>6}{gy2XAVl@xnAS#r(oJy}EeWXHTpq&9mIfq$*%2Er(`Zn;+487K zHn}9vTv6?_bI^k^HVo*QZt#c|O;HAW`wGNkb2NqIaJwaIQC;*>e6*_zkL<8Xf9Kib zlyvCtxOv-VjUff+oU*J6x-cin4d^%!Lx5M$DWv>O{0jpJO^SjD`^#7>`jCiiR8R_C zQWN2jY>A9=LJri2gq z8nL%MQC)kKKPT0qoF$B{x~^}uGpe2O%gW9MAg)mO_n85m(DE?x+)`+4JEz*oUdhhK zIudr3t&YYnT6_M?A-kxaiEoEVO7%CZdO13DzSge9j3hf#tS(irJd8xEPMFXT$i@J7#+VhE7yS`oh&*>r^o~oAMWIBWuUV6^k zZB|8JtmImq75ou$99u4cctS}h?ec2LBjrk?qGoNl>@-9r2A8Z={+;)7RCOU1DqM5h z1ZkLvODQD#gnAippQ~L$EkneZJ!%By=q%)9z(hwXC$4=eCW~3VbJX_7#5@A^2yzFs z9GPT$oN^l;R3|V@Jbi+N5+^V^EbLJlahV-a&AhAok4CRZEEAh-#{zImijL!wo8!?l zUM`+IZz`WIje8%m6VW!gMR#!)PUb~y+?20!22bVxnkb8~#b;s?$jE(Gm~HWZCpII@ zGFc??=c7SxIg3|6wgy5qhJ_f9SS@Otl8IbW!4Pq7yX|re0HH7pzFpD0 zdgwfM#8tKGBo&ff3tW=R&Gpz0{Sa!dxx1le#ANd*Vm5d>$t#APKV|0FP;kPt2GbcN zAKZk^9@MLq0&{iRoI#z})ovq%y|2xUy7+OvPm(R57g_YPn4_}*sOGF z!=J>#bjMV3NI*4&Uz5;TfAl4o2v|6&BxX^amK3pfkLvM|*krX6kC44j$Ru^c{y=Pi zc+Uqk^dVR((rWr^eNYfuT&*6f@8kuyHIA~;b)r89?1*|GY7x;NjtURnUw;~oMd$4} zK-qDD%wVKF6@Mp3CwUbH)5%MeS5>5OG8Qs9K+4_dl&WR$ekl<6Dq(phP)k{xoVPwJ z!0b|{enwM@Y*!)n&c%rF4Y2d^+r5M?L}w{Yg2#u$Z#GsQuM3XPB`rYB|FX+5hVc=* zqAGsHLZMz&mBtpk7EN)>LK}Ns4Tf?2xS=j_r{b9$(y7?!nT=-+i7@@;cAGtv4j~7U zI7#}A?imL?{m!(KU}oNsPNNI*kQfsxDS)knn6d>!I=YaFT^OEchIFm{RUBBW5dgRD zkYV1UXoIl_)@fENe2UH&MhV$Suq?KvfkyAgJ8hYMKio}}!{z#o0T%~Ozj*_dxflX_p{#?ZX49Wiij2TU@cUN;9i z_uNx!*}xPZ66B(miy{KtT3)}A#$cf|Vg%UG2ET;;dq$(WnbTZd;VO+O^yR z`6;YMiL^PSOD4VG?lw&Zy%<2TZ?qc$jlkqF3bB1LPPcor{VE`(3`cG#-F{Fu&cVAz z>|ktzx^_Dh6_4P{tw+SKTEC#{k4@tuVWwGl(P}Z`eXw zjt1Q6Q0J2!-sTp9D|A9l-tzC+yl8{z59h8>C1Wcjt439|K&7N%V!LMwX;XI+eGOS} zO(7j@ViA>Wk*YaAPaYvv%eWg&w=F5?^s>dB&0QLwDd7^?(glRnkL2|##4b!>bdq>> zMd0He3Ms^2L)34>&#oy7xfk%;<#B5+q?@4`VPFKsy^4)%F#lki z1a9UZ!K@>?hdqXDO{ZzIh6R1&&z|afnorBc5L>5D;QpJ}icB9Kwqg{(pd$jSXbCb< zYFJr8x7etvW?IK=TlDAWh27;G`b4ZJewIi{2tFO7&GLHgiRxH5aYrVScrpfqeBHU2 zebL$nkW62q_hjJ)ufEj|1a{nN+))SPt21BZNj{{p`ab1^s$*^uoEKE>vzO8>bTm+E zBEQ117|hC6JFeD5L}rsuMaSanJnGj8!SK)%+@~ki?%wS+cB;A_|K^9x&IC%2v^EgJ z{;c4%Iv>xo>J+KUU8TbIKPNcYf!g_iLr9h`?&1s8ecTDf@AG~!&|bG?rCo}7!k9RD zLR?m5#6|>ca5Ky+fqy-$QCGnUqpNBT0SoJ>p^_*BbEoUta@80Jl`+=ghH!D;Gv9$$ z)Tu+??k+#2v~;tII;n}*m|aZA7_GJopm1v_rW20L$>tX0^t{gbfif+ohvavQY<@AF zzRZKay4Mz{Ql3-kq2DsabWGh#h{>)I@Qifp9M$;79?C6CtyUo0e``zBj$4k zET+5AV^w%ML@=rZMqR_i>Wmet=KABd|ATFd3JqL~6Y;y3hPyO>IOA4+7YL%(StbmhP)XkR^oNiv3wL@y)iL0EyvcpluHLf_J zICXy{Rg7;5JQB(rARE~vj%DO>Wz zn6CELM;q;u#)C}9BY6$k(rt{3mo)F=Bz@wgpd!2447gmJv{dq=H~%cAWncUf2-SdnFusac`k&ioVN;|Ben{&)6ZlYSQy zXZVCD2Lg2^L&z5OzWr`%7k(bNZIc>g){}9pBL)XLJe{^#MfL8Fk&WIxX!aJ%3Dpez z?@)3Qv%KmiLb4e1?IK^s!8ueR_cUCge!~P1JVFRH)!Bspy3vCV?FzL0LG=hxdt#1% zeXmWb8Z;bo(wM;(GJ=M^@cx*uYzecS1MxAEA}HP50=T$&teitn=-y#$o~0pq?i`L5 zDagVRb>;rzb~G@k`IQ~h6s6z~SYI9wFn*FGS0qh}@F$0oxY`rZdQH8Zj9DytLR>o) zUl{blE1l67k#qgVJgY|QAc1Ct2@&l{_N?FVP*{cPx9frJf_`V7-2Q9381sjmD%nz8 zif&xqgk7ixKy>+Ta`4!dSn}irkN7k`p~XC@{k525oPOVmfFIYNIPFrHI;7*C=DwXM z0{D5~h^d3V95*Ya0=`Ls+G>N6qgL&$>cRkvpze+Y&)m&yn z)+Uq|g65F`Y<@|n4^0A3zAY%FvQoN1;7yU6FIUCPx^5|%SEx|g;2TuvKFBGoOq2RUx!}}ivvB-X zt+fdGBm#i2X%`I7w!|qE43{cl+&_YEK_Q^LjkysTIA;lzu~Q4cUq49=Bt3`O$qdL6Y)z zN*%Nf*qNAs_&{N zZaC=;%W2yq=J_M*IjfuwU(hDUY<4-F(GfTEo>NYz3Tg|HDjLJGjx73xd8$RC!#+Om!G||cI3?eVKQ)|v8BQy zgWa$vG+ZV?ePtpT+j8|i{Vw`@!61z?b~Mps=8uW7Xe#R|ccTenN;!MftlAI|n6L@! zj2lgG`~p!TvJBX!0K{qV!_g5neF6GxvpQb~xNHmv>I)S&d1nQFom=tdqxWz%bz-fk zVh_`66+!93hIUkc&a5J|**0~>`{8nu303l#T)TwQI(S-t%lD`xQ#WRlfjPGGW45oX zcPx9;`5rnn>ONUx2Vx|!$-JZo)iKWv#%hAnUhtSOhXbS4*aRJkCccZ%7?yRiIRt(^ zCo^a}7FAHcGLy$+@*d_bWsauQ`}Xa>v=cEUyp7JN_&83M)0Mrd)lNm9ZCtizVo3EK z0r;#6c%AS+ib`wY9L@=Fae!pcg0hu}y3rDeFKHR{=k>6Eu~2S|=$yc1wQXn3u2eUk z8x4}d)~-e$_0V(IVk}$Pu^ff#)%A8+JKr~A;7OTfR?!yb*7oqE7^zj#p~`sJmf4jm zUFnfDlB8iyB^?x`cD&egqwZ>WlX;bN05N*T^BQxRXnK=VDA|IF-Z0vt=*v`enEx+V zwR;iQ2r&J642g%EjcglI2ZNbP1lsf%X(y2BP5E2Gec2)?W2n(!1_P_L*odML z*c4c;a@Mb-qSt_T#ZL_@w^{Qv-c5$1tWfjlZLC#JU!EF(3ob7(=n7%ViqVCO_F!`> zF(mvG=wno02xBpuuxOHadjk5p&fYQhODs$I~BFC>x~^~pW2Yh=nUX$rdh z*j&~S^}6EHaV3WALDZZdyDI#rUr)iGuf^mQpgXRs#0fpfw;MlW=XN|coQ_*2*-XtE zP6sHgX=m#T!z4e|bB5Eg#e}cR<__yjyJL3JP|X`o(@65z`D&C&mMykmIPKKl6cWF) z$jW7g)4`j5cfe`_%FMsWz12SKw*|^DZ5;?+@ zEfZi5ezshIU5tfRmgBKPgXDaoZfuAVr$od}fdekQvSeu%7}pxNRPpm0#8vI>LSeWU zf##;+bh~g}i`eZL)=if7Kwr{{Y*v*FVThn!;SM#t2xx0L&DnjmmXD5_3ho@_PokzR z6G7hiOC_eY5ic3&QX#pOv2Kgetajntgh1Z&54KBxhF^y{*rV#1-#uxQ`dNmL-4`ua zx)<60;b1e4vq{`37wdQE@&nOrF@@DS7-LkFmF~)T#SR6K%JlmL5sZG;_t}wXh&+nd zopJZ-kSA{|MXmlzz4%Q8^R9 zTRaX0@yw#a3Oxbj-}eg05DW=N&lr7Gc&mb z4+TGIm*Y?VTEi^*gMB3i;08o?H30a-&toaj-1Pr$*Y%q?_C>o9wOo~q7sW_Ad5Udf zvqp5jdi-K=_m$Zr>G0KlJ@JywQB~H*L;uem(fK80PtN9zqyvN@QNPdE{E@Ur$u8?# z5SI$om5EU%8a5-Bw3?B0pj@El26f0zhHR1g%#^sXYt<)NBHaLK$w)fGXa3Y_OI68v zRa>UsvE^{e(w4_(S9dQoxvdCrb;ybvMs!%pNllXUHLEv2bFc#~Dj`h) z>O+-yAO+_KtK;#sEgdT27^8%>%`x=4VP`*aRvJkc)V=puF6P$VT$7R}FFaj0;4+0` zpiQ88CpIf;*!HlEMgcYg6{!AgfeJ(^YUT-5t827fLaHr2v3sIYOXo0^CN*!1Kl(4* zr=Q!2gRx)Z>ml>29SG!H2jaAW9TYS_#zS_fIz+eRw8Jq(lGgN>d3KHnq(_}R8Wo*} zCybzQ+qmA2kBCl9rXsGy_q{t68v!2Z89Ol|(t_>o;jNq&jc(R(@o<6KsaQwcMeIzh zqYRt<=CHG>UYAry5H2$UNWju#5diU=0N{>I49@_bbW%Jr7xayW<$36p|FnxyzK*-x zm4=su>-6(3aJsBfVtPqHAiJXB=Mec`jah_w#0U#z$~A$x85-oeTBh8qXespNMp|4> zw)M1&ZafFaUmb>7TlF-df$Ty}KYMFBk9DKGXmg^7_2NCUx%%WCNX*Z?t)fZWP)QKj zb8DJj4%r|MOMaOwh;ndag)>{lB_=weW~*paNP|Vu2c-Wv?#;ETmO(d$T2Z)U)r89hJ_4OT;dWX}GtH3>{#H_2q1g>r_tvK3dr z;x-tad_D1|wFmkh+XBx)l_Xc#4s{3B;b9Hmv&~Tx!iRxoRg;IFk*yKo;&r#c`>RcV zG=>7Sq&hnmvl>@3lKbO=(#bBL3iyfKCGL{28uB>3Cj*X4{+&!erLxQ;2+_~zH^&1Ul1@voY-*(J{uu58UCe!?N1`y z2M=QG9ZVU~>u;?p`FOPv_HoM~F%`6p{_bAWK>K(|u16>M;@b`V&a7n**l0RlJq3=} ztWljjdPBHsvq!b7ohJ0Y=ZvO9f;pWwU& zL_!u7ts`)1#cY0a{Qi34pDY`|I#G>dOd3s+6?I;t-Mra~fj4(NDIcv;of;3{?BUKK zQAPvFMmSyJ~=9KrR4EM-A^NHr&NyVAP>KKrAc zjqb=uL_FP5okGc-{LIB4>W8zqe>)Kx<6u|0tg%QKs9Ew1Se9$sunkPawdsv|NtY%C{(;YNCo>wmy$2!zFL)Hw?-K@r_0SP)7C9Uq2VYvW>Hj~U_fsRkX%=>JI zV*CM3mRq`TF00jDA}|``>f~D7Ca4?w%5JPap|7^#E{_PXD|&PnSrmn_PKcYc5XO@M zq#kX+zUYow_lzgJhbrWaC5v>7!8x3RQIUPk@#8ogiZ0wzs>=?mDzTYH?MT3HCUxOa z)yj`;Enqf(Y%E>1?Hld5>XiDR-KI3cK5`A}ahBZllynP

  • >>!!nCO`|*tQo{9fFYS#x6x) zL(^|12l&eYJsB6le%O`hya2Vt(-)AK(vn?^$){us@rSQR(;jk;{+Vptc4JKBck<{> zRGN*a0|+O{VH{60_WS>7v&Z#7AXDegoHL$IEbf6YEOS+AT;?aOh945bS+GEi5 zZ$&Ai~TQwUf#hLyw_ zwIh6bgPm1Xk2uX;Iuj#Btn`YV3p^scG$PS)?fO=aM(T=DyP*0UXBO#VG+yU1wl2j7 z_DjT%2}B=*GQJW}gG&1kyQ-15;Hq>j=JuYywd>KO!A(hpBeZ}kbL~l;DPDZG!mKCL znSzHKAcwR*GUy~w1U{MeHvp0?&fF){q5RD+ZQhgVsA76kwE0iQh2(a*Y~}@Ol&N$M ze0&0JpsHpX*0)(rRK@lM*$eT}o=nHKfd{2FT0t9-%D|SyCsX9Byi{1hd9gIhqHLI* zRT;MBPijh?#T~$l7ac`ZXk+JLV%nfyreDXQHxTgS;tzYXkYj?8fvOMeE!Z#Zfpr;w z&`t5%`|i+hOhYTSS>3qL=k)71)~h`fIfo$^D6Au_B_h=Plkw`iVA_epqE;Z0CFmon z#Z#ZXglysuHX6+bF>7G;wne#96Y0*Lh;d-eo&pN*s(ufRC6}^2YQLeewQbEK>upjC zx2n&Vb_#hzsK3u9 z?XX6^v2{&*qaBIaat8Nk3}Y3_6rPh~DniAKl2D71satJRfjloo5@DYZxXgN20G)hN z7fr~AJ;L*;n2kr=$-8e0uDsD8>(MCAMybbt=V>RTlX_i=n8(fqH2)BNYv%(gPZ)k7 zkXqMb7h@P*$bK&A&v--IW&OE{JJhbITO@wa4_5}Ct#0iHQ7|k1!d(HnP@6V;n@$X*_?&Hed=Nl-Ah5YnH}Q(eY;9gt zSdU9Fci8-GX%D(NC;JF%%>~=ialUVnWwz;6ojJma*KA8?8;+bjE9wsWDy!AcKDf*! z`dR79+fx0k7}aO28aOk@NXX ze@;;Sc|a(Tzd3sl^;~^OWYl!4Z^DXxXX?m_KB{2?#Rbm@M`P}g)8fsvO}iBizQ?t- zO#7~YJbcq0Xpe^7ogcKxzzOd`7q9G#PY4nZO|U-(zNJu}K57Sq1Qs#lb}&lTl-=^} zP|SRta)#UC0Fx})kd~^yOyjT}6|%@OI2gwS1lMLKI*$uTO8e>#I8$1Yo?bfUjaFc5aBMctAPlCy6jr442m)6&nO_Bo85@th<(kr zYjfcnSMIY}+tV3IiLcTbnjOW6gV}6zs$Z_0wYl5l)V#)%I&XVAHCTjUm9|~GfVbA& zSUlv+g6%pS1ZW^tG1+jp>+lkgo6S(gR4w!XZFKVKeIlj%K}t`MN^Az})nwO2#lCCo$h8$tkk?d_qhM__=CbSIY4 zQjN7}pcr7VOznb2g-Jxfn>1kvlrac&a;8J*lVi>{tC{k0Wdo%;(iC`Hgxq$~pR0JV z5j3V4Qh6cMH-u+GF#sb=pCEzZlvQF1mY`>}}zQO791NufB1NjSkdQjhMcnFBt zkMFO(|9+VL>r}(xz};Pwl{3eV#EKF0)8#Wq)gnVsRBkoWwM75N!3d?HzB1^?xMZfH zDYS)e$`kQf=fXfVhXKirOngC3Me%wr4r-sNZm$+QtLe_v^+L7AoFygdx%j|cr)cK| zuj(x)Tn^0`g%Cj&#r(nx>=(P`4oZ2gP=-TkyC*eZTAS8%} ziQnN4T^aSfBrIfe^o9D#U2Ux;iZRGWI%Mt9U)s?w8rl?d=c!hBCwE9@-ZW60Y8&irk6LD*Bx%B1a$9Z`=`~UaPR6du$@m zF2lh4+b!?vpzuIvO6@~J@U!O;dK?bKAHlBRNc_eF*^UO{?Ze0pj>X5C5$@vSg3S46 zGO_TEvSgicB0kEUSj3#H?pOwgC{@6--Av>e{VtSvgw94SPhv(`wgAut>tN@qp#K>} z&#PX8UhE6iqW_PJ_+sGQMuw0}0gg-g2%Mw7(Ajx3yeq25uEUOIN4kS?@d{rw%CBh; zuz~G*fRPv0jp%^rfM!pnbGRkvJfqEeDxIv4|2M4Z?5EPn>OnL%=c#l&xtg(jV7cb% zdo{wih`% zFi{(oy2iKKlBZHMz|L&8(cL(1+G;ITopi*P>38S&I4XQ~*hyq#w~w(Eg6wJ_-iwBS z+)x^}CjEAat2L_@f?GE~Z+S{5l*Ba2$>}LQaVZH>lA+*iuLjBPRn~dsU>#rD_bDyxNPkOceT9zR! z`w!?R>AmcrAg+VvwnKr6^B+XTIUMl(Rg}E(h%n!RT^hIj(HLWoQ#;4v1CL?6ECP@R zC_~sfrOzxunFiS(C)9iY^dUPLuN8nn?G_gTJ)tcT~_u<}QNE)T>uKe~=fey+?E(vQ^QK>;FvgM`S9tBZTfRMd!ICpHqG{f@WMSX+ke zW^y(j*!FaHXI;~0+hW{l=F7H2<%xnzWc`Gov1#C{b_O&_TewOC8U{!%aLC$jA&J{H znII>0PY^(bDdjM_#m`=K=XbI5D3gKfrZsEU*h?{6IH^ER?~5@y@5shvreBWAWpKCs zs_cSyhW|>;Acq*J2Q(RYY1ylRaYJDkjuOKU6%FUJ_F8mMPNCr4v*A#5(B;~@H=;O+ zIg-Qi;g~82!M?$=y{WObx-$1j6s`3*=s9~kMnZsOP(2!7#J6I{{-p{9r37c?*w0Ie zw}R^!-Vxm2FZ$bi?RZr7C;?P{zuK1ASU4pda zkRNJj9wKnuP6n9pwE8GM%Ver#f1ipeL&uyB(&_llx{ic{bw(JoMYymy1!en0HL8gR z?d-(qU$yf8BSGp zEuf~r>OZIe<`j-FewViv>_=fWh=VtK>xP;niK6|a?y>)WFS%RW(>i4AQdSGv+7HSt zPoJKJ%^POgh*zIZFKFk$u-*E!u1PR{NQWS9@=nzVlxsrcg$ zr~hpCMb(~<|7f*=2gZsV%f-SHAbt6*?8GH1Wt=XBxj(SP%m!f`4mP$lAa&w(@PK|p znxeRgnx5DX8%GZ9gM#czpR{Dl1-aENZA$i#MmBx8*H%28$dNWz=aGu3*Xs3$Oz-qt zXs6*a8}x@n?c-`&wpChE>HVK%USF8>E8Zh^5zB%LWT(Ledl)T;hX^Fc1&^fI! zfutwc60wKdqj^Kx%^PAvp_AAY%e>OBz{dmxp*zV@17JNw?c*^Bv?(tkQ2T>z7C2;5 zF01L%S7LZSuxzaJNSE~lj6n=L-6K#=llq|2Jdw%nf4F&3-z35I`X|9Wb zA;xyyR*DZK9LKB@aJIT3ClAF)4AO<5y*3)9j5wvq9*@%gVK=ha7J%yAwQ@&%_>$Q~ zAibJvf2RtF2*|s)OIvwx)OJ6eF2@vK*2!I|m-bX=2`5RU0?*jq81kA;HW{7cegyyc zQhcpLP0K48y>lFn*~P_2c(Uu^H|XIJ-tO+^>b-X;4$lig1e^%!s~ z26Z3a-1lLGWG7=Pi&%?R?4tldXiOL>eIT@Fy-%y3hZg?Y&cq&@IYIEfr_m3-XP*R= zEy(clb~buk*|xzxjj3~@#GRaNCT?IvD%cF$=jsGI7O3!Z(fw+kRr?}fqBSO3&iQD@ zampdB{8H$&qgK2Sy>(+R`c?dHWRfnbN~UgueXT#Qbgkm00J|D~yW74|3HugbLG|uw zhx7Jrb&#*I;O><3j$IKz0;=10F^{ZZgiO2|s3Y_nul0Z;>wnGwPBJ)F3 zg(4&mMO5WwK&qUz9|Lj)%8ijiquo%A0-|%QRJPbpF~%kW;n~jg$|3A5+buiA$t8gr zuJtx+r`|eegt1Y$b!U20T2|Zaof3=USby72Jv|j7=pu7r;B$7Sr;djXhlgM2V*t$! zQ54k~;lj&yy9x^Uh+`PHGaf|rAXvl`)xz#jU0!4|^QyZ(L;-iI8R0qjQ$Yohw|Iy=bS1KfKaC9$pIV zcVEDEGTmxp)NV=u<4+2r#R0P^(WgtI??+o2>QHsxU!B<#P*4AXquNp-GtBGrz|Bb^ z-j#l5dhn)wQ2lYJxPF#L=g5T1_E60J!{}?G!5YkT>!S|59*d$S&eXw@@9`K058vUlSp_6ABtLwhW|-H+pS+%(=>^ER zAS0zlN&zf$fparaxcUVe?j3q4AN`b(!`e`+UN%|%`=KdDG2UhsE2$$~09=}05y)iM zG7^j2&U2Ty(O6ZS(mDOyKI4J{we0U(OgTi9n=$NApKH0_+7jPp69EjN8spg+)mI}T zWWjf-kUfcF&~`^vR5kc{?TMU~!9Z&%x3dpcmV}9F-Cvh;A2!^LLwiN|ex4ouf zb}13V0_Q@B46XJ?eAU6-(qaGH`yj^4VPE1kVKr%T$uF2;bPO%4>(f4BukaHXJ4;Q6O1-u52O7cOV>+Wqh z8$=Ptiv|0zTEshyPpn3==-$n>_K}b)6~^t9ex7=!VyE>pvkv9Ani56Ocf=>!KHMj0 zI9w)jHoEEW#)t9Ko#IJ+YuyZ?I}`VkGb$-6Hv9AQ*o6^W8r`|T@XRsOU&Kt8vCBIj z1w54n`!YJ)==lRL=z|-Ze3oAc;kwoayBO12C}L2>)AzNYZ6pAjT?zo`C10^`0tqxf zqL(hmT#(@XTaDT`GLp6{fe@2m-)WTy*G7VPv<&KP75thfx3}+Oradedfz{N|bMDv= z(f0^`)OKCfFu!vYf7Dk!$B0vPBS5sfZK?gFFL!|AY}hmD>BCE>Vz)e#9<&0+;m5;%@MC$r*ygLyeH0;F?tCVM?;IUC6o2;w2#HqS?yA;>SL?LQGr9{1 zu7j*5XA7UvlOELk39C_cgs;#UcDMR143^RCk;*;>NG!Lk-4~#epoS~5RxQzHIRCL& z6GoU+TN2oiI@j(OKulxOo4GWQ^Stmq_CS0o``9u;Koo2bs$Wc05c@2TxquZYF;Ndy z%SiWYD>M}>pTE?uey@Kf(Ijo_tU+iH&X?y-xDzYPiFb?_)Yk}k_v)ER1Mz4l9lx5duwW}5;(hZ$DRE~R1Is2muxiVrx^k1uqz+N z1L<@@+qNjiqJSW$Ow*3|1O9dLsGU%KG>4vlWM>SrzRwA^Vvei^!XJa_-NI-Xl6FsQ z0rvL*J8y3w*yFq|TYfS+@r1pEZyvIj0x3VrW!o36-N*uig_i}l8L#O5G5vL}FMTCG z&<>JeS(Q)@*sFnKlHQ_SLfjb>@+aPn*My4)ZfB!m=P3ODZjrqqc=bs?d00&{Z9JxL zs-TV+m#mih;HrXNcw4o`IM#Ml7-WV@_7}k%p7A3PSp5^T05?WgxxRM`_<*+ zmh5vwWmW6MXzYV$^oBV5annZUL>0BRX8SOv>R}XT4CZ8fa?iL+-$#L7FZ9T%XGEdn z6@&#^Iy?tg35nYqXibsK~?n zxyD|z-Ok0>;RlE9i=PLX?2XR{06gB9f-eK_92x@F>q1m+47a_n1c~c~NA_aWWvBaw z{W|K(;3jLAgv+oi+rJUQIkwP#t;uPIFf28yJs_ z!FDwUwWYv5{ysiX_>)>Nwp}%v6mOq+aFBS*c}))djTjXM`JVaw zr$8k+T$0SkY?t1;o}M1NWtU#1b|+IY>fV)}jH>9+ZdKLHW0=3p)^D1&7VkperCGKjt)WtEoUCp2}c(>>6(sNYrHkm&CcSJixOEy2+rJNOZ zXLQ#{GFV%%D?QIB|LrdQe3X5YiN@$XY+(!vuRqT*R-+<_)^58yie~=k%062ZpTz8N zRp5P5vxJRX?Jj8nYizN4$#>&QEJTn%v9<1xaj%6ebmm}bpitmGe;@$YRTwK?k5PoJ zl07IGZLr6v`C??`4u_YEL3g4V%-f1xI%UpdhUH^@R6x{0e+|2IqEpcCl%t6^aFvjC zN7E*CicC0XYh$ugfIg==nyVAxiRO3O`sgeL2NTuSBGhD?)*5&cNib*aF>u&X1mtjg zf(7TzLpo@iR4gBLgiVjBZK3E1EIW3Eh#-f-0z3=i%oqmq;mU43(wl`l$x_SK6}Yqa zrJBpC5S>`wdIGTqw@uFlVp$^OxLlz><_jej8!oSAc{gtv(r;qJ@)Sm^*6uRoKf1v9 zhAzsw$5o};1&*FJ5+ED7B=NBpn7%>S@xj+j)k zGRGMD%hP}K-#h4nOyUMyg` zU|eqEQFIPP{SC-|--rpt@QI|K1ZIe`cGSpkY6N5ta!uKh7(0(Q2kdR_Wd zA~?7P($)C;AI7AmwCX4I!S07P`zThW&3(BY?^Ds`!vrEO+i8tr@jv{7oe3D|;-0Wi z;-hYO`k(Ern)Fq2s4DE!DtHfEIumHg#^m#uyIw>j=K{s#;>W}Eg?j4Dj@WrMO0wvG zv@c_ppuEyOfnC*w7(0?%SWf#&^uYs6?5Tq;Ro6O+*%7sS;IMBr z0Uka+*<9FVA(j+0c+J_jLTn9YCRYOIW;85rP4-=M#qx9MUX3|v9wt`g_tg&+zNt0x&C%z9Q=F-rDsEMibQ+pVgAOvV}6+0RM_O0@0Uo~`=C@WQ;CG=uJEd(7rM zo8H#dPV2h=$u9zElCXLTzbTvhte(}Pi{)?EhvH_=3w*Gq<^? zXVY`|_b`Uhc?IVlyHA6{#@H`sLK8=}Qo@2_E#E4vuZTCN`=imJdliTQ0 z_2*h9w%`K+XrnLwG68~d^kk(adoZT^HyyTI^~#8Vv-XhC#{jF}R_HfIuH9B2zui*4 zAw~>c%Tn5^=!X~&|so$8!zI4W*sUdh6Jnlcy!6J86cKqjDKab zbwvmLh#0aO8tg=Z-V<%6Uq?)8xfpi}uvxPH_?$W9W}KE6#Llf18&bdJKA>1JFzm2LQ0np^Q(x4kN8dSqRrO~xE}Ofh)F zsFro}@qO`89G-s1URKxGd{;bv+y1x{)^_GfIaUMWuxzjB2sAuw2V(LVgZuBist$39 z`@J2E5!OKzy7}X4fdDqINC*$9CMHzl_C}0dhMR{2n|$Z^n7yfdkvV9N*kWYhlqPl&R^n&&JF#1fQRGwcu8NJ|7Nv1jbC_anjZcn6lN~8K;583Hx z$d!YM2kh*bDx{YoZ2ZOeBoM^<4O7jt0f;l}5tcp`TG*ztOJ+0&)9-fK=lXf?uYP6c zVxk)fhWABv+q#iU3fF1p1zzF#>`Q%dN8!h47osWGynUsPN2cFL_<-4qMo?F@uT=?( zjW<#Ilv$Wc`YI>PzKO}I8+84tYl<%mx#9AFeXIVs7oA?Y66huAvqyZ3b4Z*wI8bU2_0E*W7g}@gR`V3>_=7Y8$bYiLnVZ-_0ae~MMwAi z%4~OfGcp??*lyXKo+^yeRwW6>tljD6!a^t|AHm@J*4=s$MYtxZ*zDcu$%HEJf`qwE zU#_FzoX^phlU)#uzff1Wr4W-wpfecsA!PAe;P%~mOCKxPyub-9=4Ur8af7P6cBgl4`p?d)AsXGCUbF2F*}~m=a@S(dZ8fTu zfp&Dp_HK2O78PDGx|F}keL^i;;QZGHnk_udi-rAt_uXep0$NH9k5Q=FcW?3drM6UU zkkfSA1JQnvsJ_c$C=GpO)Qw~IV3ffi+Vb5IOxmB*6mt=8Do+>ZYRZRp%iY1}0MQq8 zkq^~Y#B7$ktX`m!HJ#@G=McFWy>stMWZNl&+{@%)+vc9?o z1fL?a<8Hm@sG607p*3cf43Oj29(9Ry3d^@4zFh92w?jP@M9 z3-)+S4Q67UuA<&7D75jLu`BxbqIt2IeX^E2#)G)nQh$qS=Ohu@S)xX|KO$quO+1{Wcz_ z=VwDs+a{o$h-^A-N8sZgy)IfT+eA#t;K%=NJL7keytumpmqEX-cgIBbc5n57f_q{{ z@W=BglE-YXDtF;m+h>!4LJ+2cy%aU;{H60tbzeY&gmq@_<@khCK$2_wV@El!; znQB~VU&Y*zP#evghpD<4bHhn>E-h1OlEE@>(()9M*L{*Ge7flpG%L_?^f9D(bqaRkDnJ1 z+MrMFcup)uHyIJ6b)PSoMaG2^r% z8;Q%Tn$?}tvRt1-wq9^`-&u=Bvv{+$23j~KB{RVGn3i=R#IOxoflPM_=8JBZD`u0r zLpp%>BLAPT$70~{$$l$!1g=A!xj}n8Al!UcfUsHJq?`nat?G&v8{A+lD`?#*OI+4^ zRJp3fAtH(9j&Lg0uW|VW&gRU=?A3c#ARE%y5(Vw9&czrjuE1kX)0Li+jD?2?2@Dk6 zKO!`##2=FY#APJjazyhrPJnfx*3exZ@PIMnfp*z7o!g@6&l$lP>5iz;+?^w)LM;09 zyS7tHo2<@u={L-J$#&~ES+aR6)h!;pdxbhoG`AzQCS$&n)_lrdijfg(vQV6U%l5^0 zdjXb*3+iRf%k;b1KGgO5W0u_VzhJKjCzjU^1g;2_w%V)FMcbAfbf+B@E^RQ6YF;MH z0FTI_sD{g(8ymh6AZ8AtOg$Xi*RSX|qaJd=BvVzdTl)L#ZI!rdt6x+{0|NRjUPM7a z^+7y%VbHY3-iazMrLp4y2?F~4>ffi|D%zC(PWEg2Kv3M5(N3t&cptV9_Mr-+A)o}6kQ&%72Mh>Q zw&Ay`+UTi8(lNfGKcrx!FvVl`UDSuDX05KqwY(DZ@oq>iw%_+LxRp8Q4F(~SuBpjj zU)EiIe$b>95YrFXb&VSfIhQ4s?Z=oks6%K(l#J0EfdJWyKLu!rZx24it!R7n2y&G$ z2~@ddPr8NMU@vFw(Uak#2>$nYp|`4Z`@&kAy(gyhW^T{q&VAdS^oS<^pE;_XS<`I4 z&`-?BZLVr$9&Ky0+xO@RTAyNd+dN^@C%-AC=Z-z;;cQFdW8##RY<{#LS1r*n>`uY0 zAj?{?Cq2`Nl>CF;wI|WDH>FZCi_cVtTV6ylwu`=vwYib25g9-@Bb#Z)QD1`~elK3j6phfo!4D1qTV5vTcsKweo zAefl7_#1^nQiZ*I_u7Md5}{EVwdF#IL?rf*=3^`3YG?0OL`N%AK973U8|-SbhU#J} zXo<56tF#bhUPx<-L2mD`wehogjWw$V;VB1gy-?{xvYWLQb<@VK1?yH74Z{ET}-5&bcQyoRi@Qvp({23p%6SD zM^-43(W7iV+AW#3ZtiP#Jb_3Am&vsL0J5s>&GMAe0za&t0$t+oB7nRd>V&Tvg@nC#nmUMaO?cC&EcU318TEY;WvEkb%4FvPr?oxf#E`RGm>2BC_GJeX-FI zUwD0aPrWQWv2-M>P;|(h?X2W0Dnk0xWe0>-T|;#O#fV!)kau84Re5O=i(x(z!VmAi;8gvj&Fn2(X^x5$ne1d|6VJkCX97A2cR9zO#O`u0{~q2A z!bk+3*zM$!An#M5Mj~gVtFaJB*@!U?6~feh9snOsxns`77xp%6pf6&YM+#$ZA$DGc z9-oxM_+_9~-^V__5bc`So)!B_?N%agF_TKZ15mg~;hS zFh4}so*xvD*W(kh61R%|7)|}SxIXQM`amlU`n=?hxZC4i_edMUZr}R+X=fiZT@c~3pVzY*87y$Q5S*?;J?f`9&+FxYkaW9Z`^EEm1u?I2 z&$zkj=F~GKyZw2wo~7w`(TmJeLFSjgv^&&Um#^OZ0Ec36`hARr?u^bcZ|4bP^b1rO zwfB(SrQgtxL~jg?+qvl$3J9u7Zgg~UHPN&9`MZTa#(Ufii`1;Goh0!03H6mZoGIz4 zHd^@uijXzF%7gCCxup7)_eKK61IUWp4US@;~|#bhj{Uz)b#@0SAoB85HJ*gJVhABuTt5}tME2W z0T`K}vx-?;tI@j%q0^f6K_qI_xWwW48zn;g*-nwD{#6!ij z3P@RvE>p(+ttWoB6P6$s-9tI_!E0H6G~7tmhyk?jmRE&jl!0NE4XN=H5Ik0lfvtj> zVb_)dF1LnNgpOkL+sIASJ=}YG(MMIe5ARMJj~U6LkU;gQ5~omYhkAE`1XKH-O+*)U zT{s!WSy;KscInTErID=)Df+?amxx1}!mZuzy}0*zQLHyZw&=l{jB#w3e#^z}F9o*2 z?)sCpPaQyvSM24e?RF7RZ~LE5G}*e}+AC2|$PTz}@IZ{Jt*<+W4@MLcztdef4#q&r zyg#q0VmO8zie?CR#yjne7+Dtr$HO-lHN_Lm+MAl4!q7_e#+=wAQGrBdB-cPj_IC8r z+DXjZK%gFdB<$H=RKzR!Uv^Bt@4XkB<#+UZ3o$M2xZoo)PohA3KTs zQJv(pH4s>x7b3)}?6EKPJ2SxCUeItd9`LKoo%of?R(UeCive?HUkZ)%wFZV^JJ0E* zC`-;U1^Y&29=T(SU5?H%Yp9fl5wU$+1@zBJHhx8)Ja&DaLgPCP;)#k~eLitXoS&+2 z>w7K8P~qc0oqEo$so_ewhT9JTwg3mpa$Q&JOq&zsI3Et$k815HTN(C^7%I=+^kHnE ze$qVFAq23!dbs?ca~s~WH@#Mgnk35-j&vt+-BHppi&&b7IFgX2U2ti8px{+`{w_q4yeq^KiX#$LU<_>K&fakaEN z0tDQY8xhS{PZYy{+!;7{0wr6ZZpXW~*j)jniIqhFPPDka65rw`x3!S-CwigA<{Q!z zfJY}7T)4IB-O+v}r;-?WMEs9(sBMo+d|&ii-imQptv>2K!l^BemZU;*g=yKA#2jW5 z_jG@JRi@wOyf3Yml{_-;1DXZbi`lX$>4Jdc-K@?c#*6f^T%Qn$NpAL#>ZXVCn61z( zWE$J7ey=E;R<3LwV?UJ*>Z5Tr=_ytT@97h~`_>dAT3H$%x3z+Z5v7yG%>i-Y3R@qD zpe^8jwiZFlFm!8;qSeZY*Ln{|okb zbc@7na{yb#`gaAeCU=_1s$du?y7lOHUuF8R<@B3qjRV#nQ%UGBDAl~0!?cxs1a@-q^{a1Z%my+WgcmT5=%KOVC&f@c<#|F*p% zX3{B30O);(dc-D+VNFE$>mVR)XSC*U@Nn8L0d{qm?T$unf@XW7QLext-y0~d0%Ir< za(wMg@{>%xYazuoLJzzYSPo%cx=%HbT0rdah?sULO0w7&zM&G55+eJs!}{V5#FE3_ zj8Y7U0rxp))a=qWip~YUSimdOXHa4GopG z_tlX5z0am%#$E^9TEA#CB_H)82eOp3uo@Tj@(IsMxKk!&*tY^bRJoW5A9r3 z7#efo+K_z_gW(oA9~%U#IFE2yu`i=;Q<5bP*@gI)*qawo-|!EK>a&XhfV52#6n!mJ zdkPqm*(J@@mLdB_zkXF?m-n8Y%f+6qdbbDqZ{!Crz}A1O!E-5*GA1Ts>likwu`qCT zX!)J-xw`lC|LZK-%wLh`fce?}=X)VV3lEw?i)%4|T`;uvL!gYl#C2j5c0GD)!Cmx6 z{bo1AKdO~?+-X1Uy%}jY?3kP{`0Rk$3u5M60m@_f!fAiDNZ5cs#|!B-grfk3TVK!{ z=1Z0=ZDy;TJH4W~zwHIRx$V;1&k3L{$y4SR`tmFW9O}6Oo7sSGi`^a{h;yLN>qJ%x{T1Ur5hD=ODU*xT+Qef+!!^IP9alREGqiDZiP!J7xkT6rh`l zF4SOGR&Wfdc|nJPD>WI?-~B>*@H??GUnI1+xjy9Px-Ve#p!bAVs#U#A`3YMbAR0Zh z9ryAQfzP<*^8JCG_Y9z8w)BPcKs>nF9?-y0Q=+I^7SPvqV)jL_n)vL6eI&BYazP}= zGUlq)1xfcgukwl)PAmTq1RneH1NKMK)H2Zem^;);L)H)t!w=v+Tcz<~s_Q{rn_duE zfo(T7@=n69jn=DNG}c^QBcw7oIcgL=q^*15^c`#*7R!B@Qi|QA*1$3u=Cd`%fmXyb zfwad=ae1(Ym#6ooy4g?!Cko*b%rB*@*2n z5#6HdbFU3zzy)mF;S=w_O9R-j#&*ZPN>Qu!#2OOJ88=9^;SwZ4AoDvJleV_cUJ~YX z1^YH=`=UFjY;4!r3oq*r$r&YuRNXJYP!%lPD}gzo)7bw91Pj;6WC!?a3~?RT)WP`e zQsLKPfGMbm9TGe@IUt;ks$rloV27*C3Elr@pp{Z&ccLmi5`|4TxVVdbd@2irrRjgQcLa$by??ah(YWlo+V`U|X#`3{E18P2HmDZ+ zAj+WMhhVi%=(8R!!Uc36s-Fx3)nn;=cnGVL(WDLSIrI9FCIJz#%W6&uIASyo$LW}# z^}`Tmc1COEY2-c;ssq?s+u7>uK*@iqDqH*dZ6>f>?aG$Vb+V!E;9O+86PpLEAR%wj zV5%!O6Xz_Qk5V3LjBi&`tZEaj_-$Y*{Dar^DgkwA_bGZG#AbCFbvLw8~x}v6eoQuATsdLMKKiJiq z*BAa}Lx{8NdsWTs_?=zTbl@q^V1J0ecl_F}M{f;~Z$AbmM55xVB6DI?E9LB`ShMw< zMeYil)J@3(x&jRf>Xym$Xq1WWHcLNAac8$q>gnv|B;bWWMh%t0aM*47nR$5F=1ivN zwlBT-znIi}N>pR#_2y2VPEy>uHbS9JiUw})rGksLTZ#03q{z5^(w(%WXxLP zx9jU$^?OT!c!TYeX}?2P?&k6ICH&Af2nuF?)2}0tjWOaBnNpBER$UVUB0wN_#GsrE zc*h=(Ew*rxZPrf$*;-fpb^2Ay#?S69>k%56d+zzQe5xCz2UbWa>ahP_jx8s@#*6>xOdoIiiL-_!;}nTJKGnNxBP$D%c@A~6!NG9nnNfOUYSg!-jlzy z12Nd)F3$9;>V;@iyg+tPU(ko1C+@YI$ALgGLv|<-f+XaYc_W}eB_n{1n$A5zKfWn+ zP|n$rsD?J7Xm1BTk8mQ6MgcTtxBW#0?H*(ESX5figYr&nF>D~xDjbhSs4<_h_ceP+ zo3YODr(!CmGf~>^oBBb3YUcer5l!94%sy0sr|hJfBaHMm{ z_S5>t=GAGmtew%A(Wa*`MV|z!WJgGrmuEExxE0x_u{zs2Y$hgZEff$)eI8TVM#QPS zoeOlgur6Oj#d7zk8wZ{j_V`V(7rs<0r{IxMUx-#I){TC17Z4dBwsF;eAD6vO`#Mk= zCT~Uby$;_vq^;^;bTAJccsb?@|B<478?&1Aq^vHVyb>LblO+4Q08)Q8n)Su>a-nxkckQh&#=YSQ5YcyGsW)Y_Ulg;os&B|OOm^Fg>G9&m zdj<>7If6Fs&&e-d)ce#-c#i3Rn|_b?e6Bjkz%u61K-~VK#LPpevP9L)6SRyQ=x((; z;)`Pd^Ev-TNqFnrtL;v8M64JjtuLmhJG1H$yGwQYiR?aP8KLTSMxeLT76yP}&s$w1 z*zO9-DDD=x#S2kcEQ%5GEaDqIWcO(rvWOLYT9U@sYNP)lf(0#(QIYw;xBQa8nh0LV z_3syQZho;e`fnvjqCN1UC=2&^CW^~|EmJ9G)TqTC)JHj`6I>qckdYAzCpyb@cX_=2 z6*2ZTE3IBAW$F>7HAF3sjn8RarK0KA5iK<7H~CL1wl*LmP$4ovv?uzat&g=!PX^9W zEPRU=D+BZ9@>Wh;Ycy!%cYAa=h;oNJWJ44^+P=y*MJEktIPEbNO#c}XX@`ENT?js4JUE^ChT+9~iT7{dtVkXWR7W zt!^Q-L%%aUtlC8UZtEMho$>ooDDPcAPkZSJ03pi9c)PVs|ylm5LfP%7f-v}hF{o!C80SGSft!a zuSRFQFUk7sV9YL8^J~=^g->Aqkglq(lS7{!fb4i90P))7?QmcjW+HE@?BhS-Pj5$J zTDpdN>}_>0jQ#0RHQ|&bcapyZDjs^lQ_vo(;<<})y;I$>Dci5JF~7L35u+d+>qk@yO!-SS`wRy5Z7?vEPPdDOy6odb+yCu6ul!%juY%i9fMe@*agjj7{H@a<82Y04tqr|5Qc(z(QZl#Ej3pmTYQ| z+2?^Tcc5p&I2V9WVsI|M(0r|NTR@Knaz0Aeb!T}mzf|cuOx-+Rv0bRb{x1R!ABQgd zO4Z?OTt{(HLtO1M@O2Dl-t;{T^KMxe+ZmX zCNFXp+V$!XF-UdOPWw@V%KZOqy$^`jWxDr&5g`$si0Bs)5#ROkxax7;h=}NMC1=L* zPvbbhnaSLa>pFj(amJZB^G|1LLprTbPsd|su9=ycnOQ%#pT}H}$K!g!F^PzGP&;h*(NDVGg=g}D;{`U~Ks)iiUZCHom&W2Qt%%J*r6~ zmZ3#cXI8Iv@9bcYEHS(#JiC^8trv2jqCp5PUAZV4{dYvzXeQO5VNvU%XjUDv&qS-H zqWA+Jjg{IR$aV|EQdC&0R3jZ+Nx^xa2S$*e^hv=@QgH9Xnl#oJ|K2A28X%%o{4?Dr zFNb!0b9OMV=W^^ctZw*-@)++{dt{=J!@=kgnw3>cW4(GIE+UDb4R%9`-GFFS!^|)N zB-ZelGXro8Zxj2cFc)PVMpfQcf}o8HD%=-ARQ0>k$h|P>qm}y$LulSP4+xbOOcWjz zgk+Kk8{k%B0~_O^>Y5W!1Ah5rJnX_6IGZj0y^_#>j~F~+AYSC@6a1(*WGYJ#sCs}k z*chVGj%{j-A=GGW7b5sk#t!$Fx?z}VVuR}-tZa&1hL9&VjlFie#}bVmZR<$v5zN%4 z$6n)h5AgT-H#yc~zm|jGT&*09IAEZK$KN$P@bm7rh+xEAF@*o7bb>l*dx06vs5-b&G25SscezRFg8{kvQSKNgP~y zKdHIt=h{4_pLO|u^o~fE&N$;cu%ve zXgv9z6#5{~A^t#cGTvZ(C^)|Cn8A;X>^jG(u&@bN`yaZzw!#(UyZ zH)|g&A!$DIXXX!rwtudg%VBNf3(q7eTSnr`>LzA^^OZp;aW6)CpY_MrYKX~mTYO`f z=l->S>N#>nsfZh;H@-8>zRc(Hy*{9(NX&mQI3AJ&?fX#++5wsP$@{P~pE_KV@mRV4 z$FXJAuV2iK%O4XB%ZM(p4gV6;RI~&N=prl+@W+PZzVfj`&C=?rgju}mu|h^GlVc>6 z(r;dWgbNUe1;2@F9}{uK0gag9uHJ&};q?LqT6wJdu!%%{T(&#UotH?SfxoINYJ% z&}CxFxvyljV!jG`H(-~yz}RNA+y98W3@(L}|HbL9QBUi657!9-7R&SIMZHIblNS~v zvBaOL#Dg>}^H^F^u?a)k@K|AnAV_Q~40^kwQN6Y-Y9^<6G`W6B^ujbZ3%{<+ZqRDP zG5F-w=#K%&d#qBe3SLWbpSDfDyGn?ISna|Lx3I?8ah;{%&03Eh5kRziCeQv?bn4Fp zVZb)W)7_1&bWilC1l{pytT(DiHV9?CA^IN^5#y4|q2{3+aATfYaPvb(d2ug_=wTNj zekTt7sNmvVOgL3nTb?r1O)97~jBScZBaftEn5450cv~(;LXV9;14fSjppjaFwO9rg z+bm!h$gtQCxlhys?1P7OOcwo9Y^kCN+&{-727v`J9Sy{z#^;_z^$TOG@IhjRJ&y6g z@?e|*BipM-x%!gcVceHMR(A>rFK6CsyVNjEVUuax?S}VpB_l5wh&@`076Oj$6^yh+ zihV|tq*#3IS0$VX;(!sku&0pogND~Pbz*Yr1fq*W9szbPBXQWJc_sBuy*dytYu)}f zZ_^RM#=0b4aodruseg;3o*KLyN?xsFsyD&*IW9!gwI)s&(ebW~mwD3Da4#c2WoS4x zt&h`28xw%Eq(9@b!sWas$mLQEuCj_U`Wpv9@rg1CGrutd&`kK66eo($3~hWYKG*y%PxZwYmllZLxm>>V+%IG$zw)?vu5#IZ?Fr-X zqC0f8#i0Cc^*7p#@6?2A1+ja4uM)yJ;p(Ve1igV6q!#=~Zz~9u>}P-S>JrJd56&*O z7gCfi*gOEY7s}P0TCU5t>v_n7KE0TzI&HfM$luS4EBq5Gbmexv8cCJa}(UD+1h zy|lYpT~yNaH?Gk~4HMiK*Sc*gRy(*=uG=oEo6i>#DRjp5>W9UM8@3lxh*B;63U74H zJgfJ@uf$D49c{|1ar1T&MjS2*V^;3Git!S!o44R}K_m*o5MMKlC8u*ZW~$%8cI;FQ zIFnS)(Vy21jK$pTg%*=jZ8XL7^DfO6S4Tfup15VZ-wwQlIR5xs#I4&4qw&jsh}+b4 zkp=uEZuel)?Tnle6LE*3r56u$9G>M)VL{+ga-AFRd&}*;KzQTElA=^0ID`C?HTs!> za_fX^r55r)u!ee%v?;}>OI(K3GF_dQxs0rzcy$=BOl8G(f+lLc7*4Syh$b~o20iuh z%|fzLho-SrfJO)N;#sM7u({|h2;jI%g|#d0X>5)AJe(RNg(~lM(vA7 zT?(+)df(uU3dum5Xb`!!qtWTF%qC1&0DaYTbthlaQyqJ5zxBeGa8eV|uY#!)sYDp~ zo7Eh~U;(MB|3aL|t^vXU_r#z>Zcg^g>Dj-@B+ zR@Efd0Xt-ymm>Mvlq+m^%XEu@3hnUBwvDYT`jed=H&ncswLv6>9ulw7*zMBJuCYzM zQ-`@h_Ncw?8m^zc-a;FQYXCPg9Q%Zt#jg8>TFVf5wc>!xhdnV2xK?T!3P9$B2)*xUsD5b+P#bPMr#zb_bNgvG*pPm)XAJni{o>d0nhOYrI1*=7FiiTvIOjqB4@eb` zAaUONebGS1)*|2+@%G~lHGr5yCxu4km($}d{dqF^zqhM*#uzVTgoAOxJ-+j=7&pAD zg<%?X`y7q;)V8~e?eV@qHZ1&Ye4s0-p68${jAdS?k2Ed3^zwYg$5j;WicdUY3pN!Gmm`iO==B?BXv3tev5Ksn(>Jc!_v0Um0|Uet;wY+7pAu zF6{^jIlydvqsC*zJNs6Rr=G(Ep)I~s50zFZ<@c(D)GK~4=8GodNAK12MCJ*ARP!fK z;b?{dNnuPpUI^SE*TH3v7it5enOeBZA1?|8;{W6oEze*KYgD}+-{jPWcZ4T%MqHT z;ly;ogbwC~Im0j!BF0RW3~>E$cg#``vuEEQbA%_sAo=@TgKHxiFh_iz%Pu^PjP4e- zdHt#0xb<;S#EcD41pgy(n@0oDB+}XK?s*w|^p3~HR&b+^=Y!);l_3ql$x*G^$GDOR zk@L9a*kzh?j_wiyZQRVmQR8CRA;@2nsXEQ}tY1gHfKKo)mZ;I|&yAjYI+m$`=|iVz ztdOuqqlX5ogGAKJBNv2xes@N*hKsLRw7O#jYD|}xm2TaUMir|(bciRDexKS^nis}u zuU2MwLc&XA95-w2L8`G<^Rh_B=>!;8!;aqG=ybgdyB6J^KP3!@@Okp#QrW~sQncRn z2hvWc-)p!KNh=g~z}OL(i4!&CnmuWtUmZ-yU!7ErK-n z;^wE~5ig2(<3XYk567c|zMkalTa8~icH8{1a2ZCO+l?Kl_I7p04)>6r=eTU>Yeeba zc6mpvL>If;%@mCz_V|;GCHctN>rbN1<DSjyU)1Z-I(gdYp&N%O8T?@y#%xI zbKaKRq~yE8x>SwQ*0Cqv@Gj_v>IpUiiX!=c%Vz>2kRzCx`Nt`o>?CL{dJq zHQ1{0@JUJkHy#Ya-}u&h9U={-#={V{@H>rSI+1F>7v#d|rP<97!YR%7Q%>}w(HP1M zH}R7ZPFGwueC!YfK>?mmj>~owIy3cB#*1@Nd3FDLOjA*%5AOPk9ffLE#!ITYD^(HS zf${*mYDXb$xO~E~U#*V{!iWC2#vjWib6-mr57ykeol(U>cM8Ke+r#T_DBDeumnZ*lFS0L)10x9$)TOv;?W1-Tb* zGyIfq9*f(B2Nu4h58hEon|5`HCUvJuGss@d*Y8Rlyz>H2M_T{kGjf-@#po2`l8aqq zJeu(VPrt>e^HOCj2sDG9bgmiVr)ms!7)F$ZTP7^ZHG`l_vsD4^yZ6T-ZV^LN4*u{)Cu}Lp|2) z5Sd7sb%tT)9_0|kS`7s1!}PW5V?86rvC-{If|9Q2HX6NXfqFDx=@q#t*Lz6mOf)zF zr`W$k0)-ZyL1QDB;1|mqL10rn8bcm)aYst~p|%({5EhSj7Web0=NNxE5)5nORTkY7 zn*^t~g9r?hUX|vaw6A)=C}Sy%m1aEXv5n$65}UoznB`&*_mIcaiY3Uy?x_(5DYm!( zoBDV}1u)D!X&*K8#;%Q-nRBb_jkfp4HUntMtgGAo0ap%LRHJ;pC8xF1F#7S0=eX|j zG+cPDTsoPYWw%De{gk#ydyMspenN8cXxZxqcs!31S3Re7pFuVgcDuhj_IpOS<8TZ- zAS5Rz(j!CLvVvPe`isJR9nz$p{mZkS1aLtgN~>1x z>NCQ(wsujx=3%Bde**s*@sfPV^gU{D=Ez#hY%-37mQ{-tyO~ zRNcngKD;F}0jqL6E~p`9b@8sJ5JM)c*>GZt_uO~|q4tO4eSa~H-0mNEM~|R0$$Z`M zp;ohN6N>$OR(vG1m#3!jWBp#al)-)CRnG{c|MolLCZzLhrzCc#RSb`LZVpMQltp zHAXf1s$i&uTxQx6mYn)Abj1@|Y1|o~D5Me}2_A=6J@J1q9oiLFKOsU=2lth^hZ)f| zPw1LTBOnijNC4^+&M6hi*F7P+j$Si;H?KFoBWZ@51GvFZn=*J6hN&Eg8=sKaw-t?} zG44k^lyatpwIPu0)T?oaKJdnb z;NI!(Mu{Csv-v7R=)lJUmtlaCjL%*EB=cMD+#2_AH|K%e#GXBj%~H39jjAA_ROBLC zqLHU@ZYWW zObb`~GX-{RvQ^cgrWEEre~hF?X|+FEcoxMP&EjmL7OvHN*P;BxmYf?h+6A&oTsplV z4LoAt^AO#1Yp*V;jUEBzP0OiYFHG5d${W+~`Dz%9mFE(A^omk*%%KC-@F4b*Vv-IRxWI~XHh(&YPr+mQB`A=@x6?#`YKVh5U{Gz z)N_!f*zT?oF%P11%g5*r!67?XPwZ6PlqwNnRR!n%Nc!O29_8H`0Dg}r5sO}eIPCS{ z;KdPR#Xe(S$0Y98?62#>xNHUm`51kH6DD|=I&r&{?f{jAj1#@kiEZ4*WKeZfN`@L>PM#xxOq*ELpJ?ekuBrw!$MKi)Sc z2(3262X38$k~A%wh!4G8|2Bj?F?R4RJg(7A( zK24p;gU?7BrF7~S(6Y-;pAviob1!Q^A=tt}< zv@#_rY|zVgilB_}9=m*Jp<3MWly$_korQ3gfTVfKu24gQPp9JT%AKOYCE-EpG_KlN z$Vs`BPD8j)#?^ucJumBg&CWvM77Y_yPCutk^u%?B+FznTUvH=_>^OEMqSu{;TL0oV zaic0?O`1gbCgF|sJ%U$*AV(49gqZY8tMt!_s!ggjffX}Gih zFgI>DuHDE6(LlN#FC7FCxG}1O4_^HL9*+4t#d-XkyD=8HaSIfwC+;$)D>LRujXy?{ zCfDg>cr4k)`YLYeWeMU-ToWpII+h7yaw*)`4Jv{^K%LpBqSR6|^d><`(R`6=HXCXK z!jyawwi;@~qWacYsqt1e4#g@>^{?-b`#dL@JESM^Y85P)8EZT$D9`JU$65hKa+pSz zNM2nEbBz47Qxk-YrZ2jU!a}kcr2wkO9i*_IYkIvKvH_Tdelx>a zNCA&>(V!NiQ3A2oG%snMnASGPWX^he^EMxha{_};M4ZcLH zCvpf;>Vb9Z-F1_S$AJ&g?@V*SPd+DIM~ z-}vD@O$Hm`$BIg4~!p-5?oB0#QeC^X%#a=7^c3^pp;^tjGqMK^)noZ z_>*L(Y&E_v^A}GG7UJ?h6@pibLq%uadDH$>2o}i)PYoae`!U7QpWD{s&HpF8 z^P1Dm4G)YziO4KT*N$t{q_Pe&e(j%hmvg}2LGyUWb*?*!IwG$3N8Y7zgMntUTdYfO z^rvDj#7zPV#%*!45N<}a%w0jM#y|NDOLUf^if0wM)~6df5etT5hQUFXk~+*3IAR?4 z$1Dx3osbf9++>uuRLpggV$-uO=KV?UMn2OukbZCRU(8POms{1ItTfE)Z9)?ZR<2cI z)7#>Ha!FqS)e}?v4Q|N#;gyL95uqMnp(2X%SnE| zG{C8%jqZYmOa3H=W!)etMF_DS%LJ*Hi?Tl&gkyQXbK0X(U1ms>XfoK*GMZ+Cg$>TI z!>vZSwYDi%3grr7zg2ECz>92C+^2%Hg;?$S=qqE5Q6dZ-!FVAeYgGv3u4vcqsXz00 z>NKkOvhgVNbdp1;NC$iTji(tscVIMf+~$0MYhvFzw#rjewy*d5yK*y4od!b#RwPs zH+Jb!jlDKmu>Lq^^hegmaRHGa@5G-9&pdB_oK$@zRT*6ClZS4eag4;UpQf%Vm4KEm{M>p=9>NmNT)+O=QrRxD* z40+25UgKeo5DAJ)=z`lL3>_}dfOkD7M1DcBsLFZ!q|N&KuG|VOMRYb49|)5)o`?@U zVQu|TR+4{xBtUHgY0UGnutE=)qO?yufQ7g%Ty%eon|N^kAWBVteC8QU>!_3Qxkj?` z=ioYDxN--!S@ES&%P1gN2cTutN=*N|_}U}x?qU;Ta;o`X2v_&5QARE_lx*yG>XG;1 zzi`~X7nZ#laOwv&N{5FZ)d)#U`XU~UpA5byy>y^*y9$+C+K63!Yh1Rg&^02KNzZb* zzIpWI`NG1+-&3cGwk)pLC8|b1)-=<*a#x{hY3J7$SM4g4tTw%0i`EJ6eRWnsG2^(e zuXHkm5x&--+q%hVlvHn5A*u6b%!un<5hEN(*$wV<*%)kVXWY0;mo;a;20BAj@po2xo_eW2=DQ*%B5hF@xQs*>U2&I3mxfV&QRA*hxjXBsB^;iPs5cCZ z1(LN~qQPViBxH`fe3z-^)T_K88q{*?6dLyaXw+AVn=tNd^17w-PR?4hCV?na(JBzU z;V`xFgkX~h$S5V!hX4u7W7wuLv9tz1T?(ujRZh$LH3xK_z3;bF4 z8C(3|A!M=NJ-}omdhUq>0+jK7I1&d9C^gr;DKbCg>V3T=d_{qISO^iW4Ekuu9cUln z>W);~W|U%PDqhjtVnLZIyS&ljsON_i1#!$HUx*ZQAdU;;UZmP5s*{!GCnvpEgnm8^ zPN^nwtKzh~qyvR2IOE#zyVxyekFUAMWtdi+72wPZG92f26kC?@agmzN$@-n}%$G@qus5 z6gid$T-wBbXiyHQ@{tBi(4+X+Q@)`Wi%3#!c&3oDjKoEM)TBAvr&_m^$ia?J#AimK zfn4UF8wiT{3_$aRz}GEW6<@m6Qq0o7((gZznIpdTQf(}vi*M9m1Mkak)#t+8hVh+R z5N3W|eD6ML+9uLh=?4#iqwu4eOdZUKIzJib_OX$Ph$r;|92rd)gv*}PldVKtzT6+M z0h?mlle#__82(53%(>#pLcA*6k5{TDWR0zI)sux@qIFG|skr*d>SHhUwPizI^JJl3 zQ_r#0u6tR&55>~W`tyJ1 zUsT*aqa<;OOjoS|YzAjkOVBQH#AbSU>|sbXOi9cV7=&pAaeGqq_ui?4DgGn7h7iFo zG+pE8xtQm5%F<87EyfInK5kXj!3|W#?B>by@*w)(UgdC8x4RKqv$(_9A%*C}`sq#; zrLHXIdk%2pB+k|XeHG4UbKF%$r089t^))JP;)#d+rcTR*H4ZFBz3XPW=dzwl3>f(c zcil35aYT5+M1vP`5XVJiacN}KD4gK^+M>y*w@l!VA=FVkH6{UT)%v2`O6~B;Sg9rG zhHkI&(kCmMuB!JLqx9P@aW+>AhZZ&_ZU8gGV4kZl=UjjFk)@dFK`*pCI$>7*P4*xLU~7Odc@&RK=3KSTmN)Ov?B-3#*O2=l(bu52^1# z42B-QG?Q#HOnbJd&vQ?~ygcIVJJ7We7LlIfQFW2xRKB(f%)*-5*k*K=LoRwa6qIh) z?ET~Yu|s_?OyV9p)j{Q6F1=kUsN9|8Yj<^F6e z$&Gi$eto11$v_-XMXpTVAt(&pu9#(#GHdDJfWSh2RnH||}-AfPh9BP~(C5cWLd6k_~KuPLf#ENu)u4VcqyQFS}3HiO@|Zl{WJ3Hcf1H$G62S)Dc8`Mu_m@MC;aic&rLs&A! z&Q0oNGzAMcyO$N|=|y^DBY=KBrt3G^wu%4d-WKNrYvyhd_o8d~_u^YLtJ*FVhP%tP z+%yq$g$0pBd(eu{b64Dc7~sV%sy65TxYdm(S?t@4_z0pH@-eze3OB*xj_R260=?58 zX_Oc9T~Wy(V}U1YW5(X#j=0O;D{<4!ORi?O_BRA7#pLO&bu#JCVU--Ts}y4|+U`(J{h<#roZ1{pyEN_43leZ$ouoe~2q;K;7}K*RW6( zK^m>jDfaPP8si%A+Szksla{}8eM}mMCJxpEs!;j+x$&TzVv0xp`Pl49R|rD*?1!r8 zqT(f4qiZwK*hj<`!9?BE6OVZA;2HYkQGax#dUvZoGLBemGlXTS>>Y{i!enTuH+C4o zjYNCesa|=6r}5G*wP69^dq-mTZtEW83P(&@?lUrLxSJ;4ld;$1C~RYzI_y(NwJVm# zexrcu3LE1%;C8sckH$eGf#xs{d2QewGFm1Z{IE(9Crrf4LXq6&X(MsOK=OdcE1t8| zz6{1ugG@=_u}k1_dxW=A0(J?I4lfN z-FZ_67yC$P!g+BoY>1Ct7sG8XsQ&oGXs?9R#-c`FDXN6G{?ug*@^6;I2k7oS)p7`Df4k3=fx<*q}Av^YuKW|_nsp=yVJ&aPOT%{I- zh2&KpS3gzmC@4_TxB%lCK_@dU88@C18D{j!u3q<)4rk$Ma(f8V>z^u@Fno91pr2`1 za-&hkM1}l~>a-ZDlAg+F+C~9;RO19lWRcT_QzcDrAjNP;GlWyo>&|rDjYYIHOC_1) zC*}w&p1EDaRPmK`(u*PwVP{i13{Yc#Ult=>NmgE5n<9ger`RwQ_ z#QpZ_g#0s?&(XN!DKRyKL?|5Hox%opCHx-syBiZ5Z0LSUiuS)vbwK6+h#t=m_8Q4)tiLo}TuzwasPEUG7#rtc z40!#QqU0ZP7pXoX{4SjiS0SaBGBQ|wLW;}kjtlR_O|i+x1TpHaG(6>mPkPqb=7lGZ z2Mi1TEh)=`>bN=eGn<7H&MH%f$+B^2)9~I-i-Lz$m6JRcTRf>)VxSg!#7ooD)n6>U z9@PlUZ>Oi%R!w_HQ95iB3Yj&G9Ua?^QGLmb5tjBGt@7;XZ@7I)2!dE<`6KI1!tr7urVt#4ZhcN`pAP=CXIT5b5HqKN`B=7S3r{zx&;PkMqKT zP@`Mp4e#rR3)_1%-uxL}T7$k-?TeihZ@a8(Q(i!E!E?QE6dhPvAHM79B5o?TJ|bYA zm~M~A6dk48JB-Q)zRq}z=NV5@DrCg&()&kh4UdXo=3`HG1I+9v276BiPr2yX8G_0Z z5l7Wne$>il)Q@Ht|aQVv|Qk@?Cf;$DnMCT-Zh zc4rvZKm@-r9>vD@TO+suRrYuOo!Y+d)nvKy!T+FjnEk5-@uMnb8mIV4bAv`EB_I)d z3LWIs6SgES+f!&~%Gk)_xJN|HZ_6O31b{uFqxbgq^$tzFlrCe}sJN0g{?{625qn)H5GkOzUOy{~YvTs}#5y@{6t2jT zk|f=vw)YI=qw{9t$|XDX=eDTqDc5^zYfN`R3MW8whM@IKJr^@Qg!1nBPj=lb0nC>) zdPuihA~){2LVB#9g`c;_#H(iz0RovC)h@XiLZNze|MVGe zQ+wHLJS#P%N*YUJf2zV-?bDd=wMmxiu~^`_fv78eEYGtBj~o5ZX5OMUw_wbzO^Qbv#&d7`N)9w)MA7 zi(;i}mP!Hs0v*!%YARaclGmV=!fIKPp=qV5W-D$>OWW1XMZ0Z1jCyjJBvZld!K(k6p>v+&8Wbo#Z zw7lAE1j(t3eU6)LB>FXCSncHpe;r%ApEe*$!JVY=E01`;ZRC7Bs&+{)p&46!^mq`E z*g$l!x=}Kw1U`?(0?~C!O7Yt8;1`v2XBg=0`T0?K;XjH=8&Zqd@!9^6PyU@BV5?;3Ptl62RsV{QwN9RppYfxKB6OZl$wU7 z-^StUat*b`%Wlz!@-dEhc*)?xqQ7F;16_BO!O&tbk9KHQ%)P$5Cb}6gfLI*h}Sex(|{=u#RWq0}l(26s(;=FQgP6O{mT=sM!jZ-h7i;2tiQGvZ2ifR66OmXrRPfNW?WK6E(q|E2 za9pl-U7mADRj)CaWWwXxr$t|vmb;Tn{ko_B<_U>lwOlsjZ*;x#I3Sc*+~6MJb#iuZ z^k*LrH-yDExq4gYdRCG1d-Kz}l(7QJcN&MZVl*xVlUK(}^TvrMT!s*UfrJl#oTHI$u$6tRgR_k}BGmJIvsC8o+01wC7 z>MqD!r5QuFT`*>2wcS}QJEYfLbh|OjzcFR2J)QwF2tz1c`<;i;i2arQ)Gs{iF=U|Q z0m1EuybP%sB3aad!}@zS_w1+|!11In*SO)~H?N${o7`Kvm&Bx+rIFkNt|(h#4$^~y zvoxb~LLXJLGV88*NHrG@6Gx*VpK}jucde+6Ee3L7XYN<#;Sm988$-}A6pwo3JVQ(9 zj;&t1jPSEfzu(ydecP^9m8RjziJ{oxMYs>C+s|&qK$};kcqp;uagHG592(JcdBwn7Y)9m0dhDdEFqQbKW`_Hy&`9*i@de;GOs#%uoOxlwqTvrmg( zzGrD`($yl0I;X{*{fjv7EikY?-VjvcVzG(dyflV5nc7;kM3B9Izo1pXxi8j6PnC&z{yJY6WR?3DlU3)#sWF z>cNpZwB;PZ?lN##bsW?v5wpYmXhTj|_V8jWJo5J`tHf?pxPJ)Wl`{ofi*| zmHZnjYQ_H&lOIwO6k%x=eZpo6eew+fk!$u!?qoo<}Md29-o3Y&_HGA%*xP_z4Itd2WXHU06S zK=VDKVPpymjAcVx8nDG(#$#DZ>TC4Tz`-IExsgiO;p8^z4Kjm(ZjL3@!9KxKpNwTj zjcw1qXs8y#x#2X{D7-eo@--Qg#W32b+v9VC+c{inMW!1~QC0Ed*2(RZbjHdAMWS)qW|LNTvyjPEREQYYZ&& zUDjWoreX7U=YNe^UNVt;&I9zdPF0+^H;x$N2@Si;2pj>wz~TUl&0INQ<8oIV6a_a ziu-VpgzRuz>{F8R?5s|iP08RQcX`Tc5WISE(TTFSdtA32Jyll;sS|sJ{v@7ceX-AD zL1%}5h<6Fk9G;N})NA@s=Y?@l7$Y7*wQ20{<{CY0=Zo+T~mPre|+are1%`$aEl5z&04d|F(ecvu?nc_vlXsp0<`45XLj2 zfzXFjahX22R8U}F{)`A;(NUszoA!(->p~*fkHi(voG)(o3p3e!z7NMU2F#Vu6k?P^ zP7iZHUiC~Nx3kyIjjNw2S7Lf66idLk#ywzsUYyUa^)~@}NMwZFj~XZ_+`q%!UjIy? z(UpZrDsC|1n8XmcT)i~0s5d<$!n=t0<;>;HDyguMDr!;80%N*i6U&*G9Ry;AD-ytT z=Fk0#vdq%3u$3brP1$vGJQ=NneG`K*S9l>*BvzeEN~of^#h}tivE1vosw&iRJZ^hN z+(|kSb7kfExcwQOkEs(WQyMPX&pQmUv4wyyai=DUqc4lX`5IOFl@n)2(3h{MTNHQc z_aFZ7ht*NzNkO%kCQfyN2&G7lwB9p0>*D5pu|(6*!kx0rxQ`Srg|pe!!mJ;~CcM^a&HKim zdE!-OVwt#Q651Q0YUBYyduUP8H@D-gl>(I z_E(3Fn6ZM-w<9iC2t@T8&s zP7h%gyxuNXZd=EhfGx$fq}tQtV6;aVWh6*QJdP#L6ne7)N-7|T83tp&o3PV2#DPmk z5c*w$Kpyl0*Qe2zXu%63pC?$}N_EIAJWg(!1lNOYbg@-#nI1X=E!G#%b4ss=-> zOwRXh(4GrfJ{(G+p&>A0xADjSiOed>BWDc$|dDC0cDco5jC9jjFy zpI;bE>ZwS8_NA9AquBPxR|1Od9y&}9mArMCaUQ=*E#T4>i;ACH@(bU(;|v}Z-(6aH zJW=C&D0$dm~_DO!rTR+_-@ED(|(u^Y#v#RLjbu`BxEh#3LyHBL9lh`TA)U(y+ z?b8d1OH}fnTLcN4uC}<<{VW_nkc{0iL3K3R3~biPHF&!Rl|dA#Vc)@TDG>5SVM zcN&4_v<;l^4}7kW&#EU(_6Te4IpDOw%+_dbYI>tivoUqLJ?d4xKf@L-G1g`Ia@v;( zFp6|$y}?UzVNZFHHVSq=f8h6;REWDEhMN=7tP!MyrJS`^b@)3JxhwY-E?^-cS~K0u zQltd2YM&Ucw1gd=sP3%$1f(US4X0{xwWo!1I+5O%YdmaD#6Ya|0?+z~XxET%cW94J z|DNOu==K0=Ga3aRC2o#UNq?;OOf_QOIvoA}%H`S+1FA@NZY*Yp3?w;mOcRIo*-n^^ zT*y&3X_xqb`a=7Ou;4)<--NqyY*qoHPX_yt z8!a#K4&z~ugcsUiY|%sz!Zsf9;CqnDJnA|n*v3|)na<-Qu}v)+Xt7-_N+z!znr>pl z4R_{HcY1eZoKIZ#Ct{aD!knSJ)JHw)19X>$Ul{1vYcw*-%s#it_@A*~5Xl2#1?+)+ z=Vk1ec7Tq(2Z=@#5(kYW;U5mE87jV1A@ppB!#*E0AvOT)`L1}`eNLY~JC10!^rD=I zS9A=9`r@dOxfgH1IOfYL@AJY}rIzNnx7^)i6wlbBCxpc?JmN|HT-ekUr}VQj|L^0p zMxLfMkhnAMpbnq+cugpA?TyD-{f4Mu<6~4&3&eT(fzfiEo^)$@UZTK2-D*;gXeDo93$N{E=R-C@&k`j~&CWt}rUzSZBMsI-jl z3h z8DsbU;jky1~@bl2s8_r?q%R3R-=%v3d*w8pGw&tD6w<qQi2?SIKXAZ&DjpW1Nw|qV99ula%^Acu4PU+V>#)BnT;`+I&4;`Q zk?B^A8WCE)G`9)ube~ST!?n9zd9i4CmIJ&4&82lEljRB z_V^1v4=bp>+Gh)X9s9fl88tfPcl%whbvW&j4!E-wobiMH-HE;XAT4FVi*)JS5Zqv!qGUQ&*VPxj(x@L*(fC=_(UA_dekh6V?tq(40VnhP<^@!;9#Oc zZkcpVIO!voS605c+0dtiNLnMnKRy|!y(deROBs^RX#Li|{@m1`<28@0fix^;MwUnyQ<7qsAJ|p()b;VPJfI`CvT`${TNL*#=IVk9Uk<&Kv0>rfaqJ$83 zIEgF#MMPLg?UnmQhDS0_54j`vm%F{_dee-BnZDZfvD70O<2CzruM%`{9U_9b)}1bA z+OOO1S6W8cOL5or20q+HoJKU6`}NM`lDW}UizPl!ytqka_=d*KDoYI%qX&bE`an-l zz7RJ|3n83|u$uKq3DZipEPs$?p1hGpyp34-9Wh5hVDZVLtP=F46UY(gx!&D4CzCHk z_1I--FZbKVP#kRWheTU=n;a%NMjdA^-g^>6x;Xtf?t`w zdVVbM?;Zracj-6R)?n0lkoaIj6p#_rX?BnsMZJDj@Z?=$Jc=qlEw}#?%Y+A~TJgRb z_7}oD(jJXk3!;HAaTC$xuE)u>)of(Zey)pFui1q^Bdt9XD~$?|26UnPVU=c!U6@+6 z`}Eb)+PoBIZMA_;omwAjgh|QAf#N#W3J}8VQHeR*FAWm@HQDY?qd|6!f#?<**gzJ$ zi5@{lJ^E6tcRglfAo>l3SUnj}Z(R&{B?!D(v|U5hHT*9wXxON{rg@1B8`YJF$w#Y0 zK0$?JG49V6*kZQoCQW!ce@*(#rM{Zb@5CW^z`Y?(!<-SzAg>OAFgEW$|9`>8RZ?ya z^Odn9n9%$`Tj(K=0b%Thcvy|bASYYge#N?YL_cdrNwln=ncR4*kxTIx$8no6;th|w zA;-U614Cak9y|6I*+A;piFTfn19X|*?Ja0b`(l@;p#ihG-6}zZ!p4q0DrxH`YsOyv z?oMuhpZ^ZD*l+lM!w1Cyl@mRj1R+8;BS>a!zeAoZf^_D&kHZ?)0#3rq-c@i0JmQa3 zH{I!Skr46R!%V69=ok*1aa7&>;SaM`$1zW4b6Nx(_XH8MHE*vI?lY}OGMv*%wOg{H zr8Q2eQx-Wd@YBL(gq!1x0bY2KrFu;OlQ&C|#M%8a$3QUCmIoTeX1UzXsrg`gdz|+a zx6&o4BvnxP$2;Rq{p6g*TOLuq$MUs+DUb?h|HJCIU;r6H9j&EiBdzPCPe5Q)V=INMU#n$X z>IJ@W$BR0N?4$YkJ?T}x^PiV+SiaY9BwOTK)^9fd`uNe8Aqp}$wTyKhKdHDWJvHMw z(K+lfGXU6S&lOrZb#QbtE`P3&$4slhQ)k+9B5>qGE}GXXR9eB81M|u&UCQbc_7Ul; zO1T{|sEVtf)61x6!O|px#FFX=cdq`p_Bj#d68G#nBa|-h2&%7Fy$Yu|Zg3IzZMrkv z=psTI#7+AB`WY^!n~lpdBxf0^qM|a4W*&>_22q%g!I<%!DD!e&o}fMvGd-$y6eOcD z%f*a|_W+xGjxfb8khz-soHz}RES^T0n{1vjT12?PTl{+j*MVC-K+HNi<2HXZLDJ%O zeQ@U>X}?2=7sXS$df%z;i+LpmWC&p0V;T(%mczL~z0{*Wj=O|AX|v8fH54_^6$&@r zA9e2V!gJj3_0JVvqmS3k68*hxJ;5bnnfiu3Q6l69FW{>wLySg!rRyF8ZxW17w2RI9 z*)_H?TJ;kW8!J^k#Yi0BRl<>gl5KIHO3(slRNK}1!niBn&ucvV^gd39)3wICc>;RY z{`1f=$4JRdr;EAJ)*Pr3>HIP zvu@UJSo3%1>=@Nw2}AeqF|OA2UA&%SlQ&v<_8}0NbaPUy#RGyo#&b0dP#!e;E76)x z5=@gcS>9U+fKp?zAs*8FO`p9s9@e}Rlm3y|@|?LX0*OYY_YG5@HZw+y+v~uv@1Wc^ao_pr~T^ zXyjzfn|g8Tc~XD4Jz}=l=Q)MB8Q^Bv?~%7o40SCkvC$3~FLp8I2URX!4L;*^95S#L zMswJ8M~Tu&Tsu!VmM28EAX}DUr=TuVxAw;?zG>K-Ft-K$qy7`J{ox5E{5xi-C5K}i zR|R}6(mW{NaB)HvrVgUUB_W6qf|~cmDQ};A?8j+i+DI6aGcFh!Bw6`uo&(+v_#MVs zefADuXaTJ~rC5xJA(E+-aJ(OQ@-Sh|ml)+A3NR8Uv_~Hq8$zI@yyRoG zg-cFIpQvrJ`9q^6^yWnme^ENreCm$Ii2{eCrBPglwt;mVh|e`>WEQ963sq&_;b-H^ z>S(I*#J^H;@wxnu_*!2}F(ClMZ-gONNqaIN-x`BF=<%JghY|Rn?>rvg8=-r~Pd4vAwnSctQPRY1uKM6wuhvHLm;C%6*NF%k&4v1bj3Q}8Ken6y)TW>U`9XMa? zd03fl{5GvnzryX?S@|mu6l&{09C6iwLQ|;^qs9Kd`he&pvWnb@queRi94PnoFMjch zxb}brw<&>*>x>Fs_N*T)_w@pnqPKKwj2m15HA;qb!WC1vwl1t`SQIxI7>YC)hMQFc zKRk$WtPtjqA(|D?3U+Ooo;7B8yrqYkstC5E&2mBCATb~2=ws?|ULSKk{zVXmc^VBN zAmbMOth6-8t?p!FGQjMv+l(7da7eChcSXcbcL)@MG2BKBiX;*9)g3QgZrTN!Vjjii z*>jiif`99X8kJRu51UV~I`_i>AXD}J!fcA`d5Lg>4VClRGMA>$38Bx8amF`FGsH%h zl1&XrP1W&q5gP%iN3%30qkcorc{3Uz^-)x2j=r*&99jjWT#ctoJN+VyP4T z-Z)s!r9QVa1_ZF52$e%dr~_3g=VVwV6C3(tv^vp*OO5dZh51FWUxtn;$an3`nWB(sEZkQ#v`hN0T{FQs7JgoqgexM ztE%<2$2K*_?v3nM`kijjvBT5#kH3nYZmbAYU9n3=y==YR?z$D8bdQgzlpPuSCHAW3 z7_o5o2}mv`%r@?lDk2<691xI}6v!U*EE5(QI?r`>NW);+S0k*h zeB{9Szsn~y&%Wfbc!EH)UO6CkG&36v6)JsHJ*Vx!G3}O4jER_}YZ z{pHp4UtG&bqIuw*)OA)B>1LE!+MaUTg{cHS?FmcWe46N=@lIdSkur$aJP5P>6LHqX z8<>~LIH$eDAur)d&U=O%+B)J5!7Xnqh7xZc5DSE7AUAeyy<#`v1PjuV4kcfE#+ETd-&E&Xp2J=7t+zwoVEK?I3amlji_=nkZzMl7vl{ATs6weoug%=bKo2V-;d%h6BvCQ2Tmp?BGg{GB(Ptz9>3m@aX zdxa{dbrN*Y6(K9|FjqZ)zEIdso=R7{gf~6n>uX%n*ESf}>Nk8l5>?eAvOXD*;(E8? zEI=4;cwRJGF4dUl-snH%NyB7$FmCc6^4=d#C)%6cO&wQZ<@rKzYq+Qf<*Q5{AbsgSsN)?h_N;t%){snq(iV{YmV` z4DzwjJ3vL{@?#5+6{+=LsHC)cdD=iqUqLe0$*aks^5z?4H5j@Tbtuvoc#Kq z7dM9gbmkmIyAuQMuq7=4hWvq-BsoF+ky@%4b@$vH|C!fmjH??2>z%PlzcE=SO=J~B z7x}**Q2$tF!_Z+$@t{_t$WZEyJnx-daIu52Q-#<=z|zMqb=TX~ z5xd44T5dp!o604GzlxKH?SnGeK%mvJb|gX4h8ctH_Xz+;_yiP0U> z2V!i3`y5u^#Xi-2kBm+bEAxoKr2%p3nO;$=;SwMHsD^_=A~pVTth&karPUe7-5U-j zggo~cn%W3gz{JN%l_I`KV(8MMP0!7I+S@yWL~z=e$TOGFrT8`1Z%N)DRnlku2Php2 z!JU3i1L{XBciuzD2vrID4dd00(ly@nXmRF-4|~fi&Jw)sS_Hp?5*j?N;;?+ccvlri zp|S6|;`%n6mfqL3v7Y3R28>$-57`feLqlrQ@`nFN-wCgnB!BDmx> z?V`m~C*m88Z4CPGtyo_aX{(~Xk%@Jl({P?`3F2#}q zCQr`pKY6CHFpPtR+LiN37@mXY3lVF3{Y+f0lGefE>pbnCM63%5%mpe}_%q$0*v?lT zEEo0=G0_iN44R6vs}G*9%Ce8X^G9x^^j?05d3-FcIanxEFH+-c^|Qi5@jAE8ms(tZ z@Y2Jwuo*W9q4K_iAi8Q3#5is;-Wz!`-fYzBa=Q?z&h?66^K^Y68Kz{0;8jR}fJ|hj zu~=Rkvs{2lDLRKa?zRsJDcZ$@BFvp#-EA0OAubh20$g$1-D3F1G6wFg2Ss5rG+|nP z5)<8nW^r#fT8mJg#2xCQpO-q5aHncDb7jr!6pmWc5bcNSrq2 zwZ`~izC}Rjd>?u+O(h;!y?|5`tS&JY?>vc-!7_E$ykbRbG^iB{h?4+qbk}^Ife?TD7WMp~ye&9&Gx}lnb)+p!u7QRJ|Yq;;?ml4r``fiXOG+K27(ISg)BL zV3qrg56_x3CKym7yweb<8o(&#t;5E%J54tMJ}T_c$RLaw_f*#uG0-Lf??KElsh^cu z_s0VoSnEVQ=pOS*;qkTkpq@h<(c))O;x|5|h9ozRhxK#)$WUw%LNz4fjz_$LM8g<~ zM~w~ia3r?sqb2R4W1H}9E-!6P=60jXGXzq&!y~Na>fPzzYk&Xy*yWX7j$L?1?-0+4 z-D=l~b1N6u9@lNh=47wOShq0tsk7w-Q(BrT(EYB4z#dFOHD@@Ku>kfaeBjQIh-bJbB!2RxZBkNVxm3pl>k z@7}JdXXBfn5lCA5m-yCOa8Z(-g_#fZJA+w@DDHdzURkp+e((&mKq~QcG~(FU^u$kI z|N8of7o-7fCJA-2AeX%$`j-Jrw#4NxoKN9r-Veo&K?eELUeKLSa=~;Z#OLt^5yOtD zL{J3A&} zTCR5^l#7J=xj|p)Qv%0*qss`R!u#^3Dtd%JD&am1$Wt#~*!ZHZfOY7#o5`WXH}k49usBZs3*|bl+nGO+eV5 zHi9$WYMk15+}>tT*dp}C?M9Gy9PcA+)$R}igk_351-F(bE^jnchw3)f1SsNSfxFG? zE_wL7R7?=h5)QFOv&4<^R|G+IMqnIua=qtIp07*PVH>#)mKm8#XQZKuT{4D*Pl`r? zAsPBWG`at_l#;;;G`ovQF3MK@p1p=xge$9SFo7*IDYIggw*YF&v>3flb$ipWd9~|8 z&k!)Nk=9h}rU&Z3##*Dkp?Kc4yT`={^g1=GPJ{>W$UBf#q?Bab-_6b)u*st+Ar2A7#iS7B zgbo;z9LY`Nc0{vv*CtW3^l3A>&$Md^EF%*xu$MIeqvs04z zs0PhTaCGX=u~p5|u#9)2XRQwRv+ep#RI0w%q2KH!0`91})mXc{2pQ}iYPws)Eow-t zSYpo$h5hX(Rb+Ll2L@0=>{GK^^hGIH+;1q%d=Jr&4|pg9vLRulKw?S59H9s~^ta2t zISw1;%zOB|HcvZmsEVXJ6`d0apsD*YG)kvT-W!b4L+uZZ5OJ%7Vf)%2C)KQ}BtAW5JW~S`r#0n$2rka(@6aEJ%xi+j!lhKsTxeKWD)-}XFbD2seFTrkoIT^P>0{<@bW6Fu>s>Q<8F zM4cD!yEoWldi%ihfB|ezeCUR?lZbQTBhPdVZ!w76$JL3K)yF47XX-e|_M(CCmK};u z^-+#~e5UbEo$iaz_4ias4a662K{N+)B72KYqT6eG{h_w_%FD(6$QLPrU35Zi?PnBth#I$1MA_g%P+E*MBb!-{s zjf5M=p+eQt{{gaisE|bh|2M_ehYICuVE0_3ZYnLKZKxoxRi#QJRQ)>rMz4y9)PM&O z@F1iL4Z})Ylk3kaG@|n-uZ{aY(1{n0rWs51NxEl^Gj*o+?%F!0N)e z2zkLR8Z3-{KG<$m?{zF(+*YlN76_vTuA$pCdFe$v9(TAHYb73DBb?JaH6t~ck`cTo z=2vH)rX0`(hYGLLl?vgJE}o2IyJ z`*+Z@1{KVk_iO%(Mq$Nv#Y(MM_%#V48BkCg>Nj%TiD=dDbias|hr~0b_Gg&;KG}#> z>Y{Bu??EAggE1zYtA+P(TVjnMpxj4WsL@r>=pc9=iFSWmiW8lhj1(;;J?O43Wy(%ZR_A|m{$0kyi`1x({X#!y(~_T#RpUdAz)vv@_UXBkL zW{T)YE<^AfwIk#TPoR$2qVmN>Dwod4k7ynjFONsv9UR|8Z1qye&{OxvHjMK`{ z-PMab#SVYeFRfn~JH6q#liHO}@SuwbbH)BYq#Z-LlsN2K?Zg3%m#cg75{W*d;>>G`meD}4 zDdh=pR7J&CEsnVeHwxqp$35NaPzjt+U#(aZ@ z$FLlGTYbPvW{t^k_FA^tex?b%d!4vRPmze$TK|ee@-!y=4h9_C}PM#NCKI+4zbK% zECjDoiv(}li@Ni$#OI+e#nBZn>OGoyTyw^+e6bK*9(s98T=ilhrLcG94!hdG(r5G< zRh&8qYe@(Il`I^;v4Zs$<0oBXY4ei+lJ z=ZZrAMdii97(nLaR3Fpz2U31OiW2b|Zou7_;Ynw@99|bGB-!a_y(oEQS{kI#0P>sm z_;Z9`g;(=<%+;7G9mJm681poysh2qDw+QnMnBL#2pUsOF#clezsJS_A7yd|>|2eJj z;|?_*=|Ww7r%?l!Zf=VC#>CzWX&6tNbXP}2qM)2;7V&uWRI$hh1iPUt*1Hq@3@4*sA0%1f zt{S+safr|hp^70-2lnvAfNxkR6$^ik->5preQ_ej1qdG!l>eLD&PlFg(tt}Khz?fm zffsEPBIl5kuy4iBB_8w!z@($Z-0y<;bKh?ET%^ex^Zbx!ji_u$ogRMid@+lwW}3+i z^dqsw^VNby<0Bq&D{;f)QSHn&JSn#dPVsgket^ExvWu~ASDT*PbFo83+z$Ef-zf-{ z=HA#P#9#@_EwJ0yNZOg{PdIc45#HbZvDb(XC8>ygnif*{L7_7Z!hRLQex{4_f$EfI z*p7pq(iLo`lrtUrUl&^z&4<-K0~|7fmxWvEH^>+sM}!Az!O3{V&>(1O01!t#Sq)1Y zeg^q;V$c06Cye32@#Yd8FdB7fw3oIHM7xFE9mu z%`3wdyCKecOdANSlOBrY?4PUdr4*JY=~aJN9A?zxHw?cUbsrbSo8D;MI0Kfw@s>_X z)7|m5;WQ7*aO;BLz((QC5%22DTE+KVcS9Er@AUG%v25%`Wd4CxA3;}q=u3`B2p_r0 zLO3lP7~*5kXcG<}pSW!0!sgl~3!CGj%Q~^$_|yX+ZE+g!f93(eUUg#(jRB7k8{C*0 zlla2L>B-K+rYpV_X53FGtG_au+!zz_b@j5@U#KB(r1-{tEgnu=w)j?{D@#}nsIfOS z(M!K~FDX#rSwy-mm9=+mj33=cI|-}eClBOan1MK4sAFXzlEcBc?67F-EPRYEKU^qf zI@IMGbefCs>xe53n`Gxbz#hKzus#`Ax*E#VxazQ^uNzZad$m6ZFp8UpYqjt~y7sW> zb?ac>X2WrvF;d{;>#O*|MUTe~{#ury>f=U#ZS3vp8;hF^C!=Deui4Em#ethkvZB(e z!GBEGH~b1r2am=XMz5u-p8x|!k8DOU%LR2^T|+TP1$ZY9#atCEEQrl>=cyk+kr}rf zF7z#l2nVoemTy&+RHMP-65rK;(MtBj?S~6#*whhs2^Msy4A9)xzznX6J+GbgN#T9MH;cPsY6v{n5e%_6Qk- zL=&;z$Sj5RXk#M#1vsNS4aR^LVr5GV9TxMMrc5Y(ke9$C4r_>o-TIFhb+?^}7-2QW z1t{aWbj2o529a=Z!kN_9)SsDe5Pjva2uSwd;M^RT2^+;^FQaU^zmoz^}OyNzPT{^a)BbNK(HD?1u{Ju*Ao-H=_+ESw*<3^_@Dv-Fw)mCf~`!)u`fq z&1T>QaKy7x^K-=6SB!EkVn|NrQ31o~y)TZbE9@+iIXi)F;kbtS8^ksz+!NP25{Z-k zYQn&i#3F}nO~8XcX%(&0@#wVRnY!~Ri8Jc$okyX&uL(B9F3x%zAoS!Vt4S>B2nyQ+ z8A%Dvc~@%NfD!f^{vg+ESG?&7gy4?ha}aN-myM9dcw4{WeR$6bX%089rFhp9vAn3| z;yoRPv28kay-HyRs^hwh!lx2#-K5V#~5RbF~%5Uj4{R- z^L(z~_j@z*JVwp^@9*vUyeaeN$ zJ&TL~vDzYRo`CxNL~Su`!#PA#Ukyh1tagIZ>+Dc`etrjshGzHC$rt_u$7Te2V=%t- zxa~02DQy2rD?xZ8cJpfw(u(21HyUIBdf;1kvdqzN<2&E?a^yh^nNmBxch6`6F#^Md zQs0E%Dz9w(sQF+woDBL;DosboV!IssgxcEiz)G{U3-$>iPNl)+h5LkS@pF%h_LbKg z4i{^xVx)bsO9wH2y+qB3aK{>7y04(+Omv@V94=Em&;#kXRF8lLNCW1$gNBB-xME*H z@@biiRmznb3{howKYpo52d*-l+!0;^Vx_wVm!TL56Z+LP@_MG>aBgKa|B)kp|4@zGW&0_(9O12 zF}J@_74tUcD-}1XB94s{akIM2Tg~;mWuGJiM5rrfnhE5ss?k4G0v9dXC)Dz@U&dl} z1LqIZ>hY$M5LoJ7mMg)=V47mrtHSJ)X_3^RkMz{r9*u4~hObC8sZS_x%w)A^C7UNF zvf0y1(L#$CA{Dh$(YjA;@!3ZUhDl*cn^zj2Jg$6yba;^F#K&v2>|-#gp8X(P9f_Xz znNfQv37j+%BLD%Bm}9*r1gj!f=#;W-P$$1x5F0&wLzM)I3dz(ZSWAG;9 zS3+m?d2+B0Ns@2&gnqMd;lkg>;65SIf>V-O%nLo_m0xjtjCeM){|19L>ZTJ#-ZAce z(W1l0#-!IFh0(Jwz^uCE>ULRjVzmt;8GqUzJsCUy8FgNgP3_mxK<*=Q=!^q;1?o_|reg)i?Vu_H z;HH>XFPnHV2IG)dyx5*0(B(=B1X^)ce0ppBY^fIrZcw?U=4XIiDD~I}~`WYKW zs`OUv6!7AKr{e8>Lf+@ikn&B&tI}g+YUX%XAN@sO{hmKESaQ7YLy=}zNim!gH2ru3 zQu=|XS7@iytA6NSvF#g(kNklFFWAz{%9OC~jZXwIyR$t$by4Py=K_4@X1Axq_~#xk zLxI42dM)sfV8y<4MZ#5MGxRsNGexiO{4Wd(zBYVtLU8+#k>7YI z1%*~dwr|P}nX_M5x|Q`$W9BOiO^FcJk~OYW+j>Zgt28?21kXt|RDUNoYV8I93qfsK6om z5~5u1T=KanDD$>X)=n?n57ZSGcAF>HMEsSM`A&HaPw!4E?H!)#mhD{FyHx~vkb&oS zs)$VHuzb5*uQ~(^aF0*p2)g8Zz1K`|0&e;~kCsp8I02&W_xw>=z&ti{y$QX!F(_`_J7ZgIGI)-r>hitD0$ryckHlFqZudWVuC2wDi zXZDNMM4E@2T{(ua-^mqeRp~JgH+z&Y*vGwyJf~^+bV9A60uvdXR87VM@>7}%uU9dSf#Esr zwzrevie)>a6ME;$ctgD;jm(*P(||4m6}_b%S9Ywty(Qi@sJ*jC2}*Oewn1nGQj_zp z!ED2ZCf@VMB(HV6?+&LREzY@*)kJ^Y5+4|R_8(mEq3UID^!P|Cln>^|sx(jn<;Eu- zHZ21nzv5GO&K-@<+c2LCUswpuCr0EwbEhY{iOSKyw96?HJXxF7M^J{-(;*j`8 zU^HM}^{swR;8gyd+75LRA^3a0Q{)0DK{AsLjvowEBMi@vwHLThDBJUy{mC1F-2^5Z z5q}XvtRIHFzu+%t(i9fgs=l~T1#qOugD!HdCUT&~#eWf+BW7TR^t|LR1@E zdycREi?A{=A)r*ipx3Am1XYPCu5~fdNJhGnajR&w>#tuFb2ZIMqP2ORCJ1^ouB%Nm zbHmP8gZWt3-{7uGFrrvs@bH>r0q`qPcOaSJxY0EUUBlwu^p}DFTSqg4RLc3VsMVh; z$YC>!R7PXIMWZG~0D}B$=e)7_TdMPS{>U9#tlxNS!%lh=^65hwwp8O5BZ^m|UKK0p z8{gpHL&$y_4IQS7EK!q}D7}V?Bze_e3b|Bv5|#w%b+a3+Ce0Uamm0GxqqL$fYc;66 zX~Sg6h&DHeTjS!V#Z-rylPNM4y=%O>Bc0GKYyVPE6oKp4`Oie4?1}Y;iDV{|Aga;W zVDLM;x?-c}yh50xOTFU{+26fAdIZf-I*O&`NUy$MzXxq85gek=jhm_|GK$R}wWo86 z5Cs0%l1@EC{vb>lPM2=ggN+fpH>$GKq;HS$zvy{JYgS^1B_}iK1#9ih%e76D{Kd~= z%F|d)-rF6jQxY2X$KC3(9b`h#R~3;AIxo*IFL1ip4`N<)kE*6DdVAb!Fq6!S`@C#C zx4Vj|#Qpw5J;EK19&vhfnloX`18$ijn0U~DH6hD;$m7B4A=>SUhdn`FI~mSF^*^G< zWa5SWd(^W<=$m-aV~&!1t&j|lyT8@5$J5k4o~Ru~+yVEvX?>xnpK??7j)?>AUaj2F zXgsX}dvT@ejAzsXGzuXK+=e@d0R|7i0ZnlTx!1wk;$UYI(*~nqd1Kuj%^h*bS8xP* zDhGE2cdx^)(u|fit%;86C}RqU<7JVX%?P07YvP!OA%1)u_b@$EAb=D8NO5qSRHe$w z;W(wA%OIOiYtlu%JrifV1gjC^b;ldE#hKk3Z~FHXq5j`;x9x+&lkv8iOrU(=i9YMY znc+oX!QS<}iY2L{sJ-VkD&~G^O835iS=rJM=TuhRR?AuQfh*sZIjq=)_)z`#U{#nQ zzCJR%h?H3Ej|C&dHZjr-$XLor36g$(ni*E?GlOM=kCb(M?)uAkGdUMucwofSNC~+` zU+TdpMACA3cVz6Mue>MNhKvuVJ}kaAK$|-Wh!)>y4@)qO5}?O>3hPj8OTKe$@;Tzn z@V%FHFxAFC7yzuX@ixP`ZdwXY=h<; zun>+S3EZfd5$diuAcVTSF|Iryv<_jnC9YEQ%Fsl0vNNteAjHYD4b0rNYg9KaJBQ+0 z&u%4*Z^hGDjV7fJW$#?|KhWP7^E}k+{~XsHutK6ZvKK#sVtwnzh>`MC#0?rHGaD16 zN6i ziN*dvjy@JkJiD|*TIwmx|IbnHtr#KdJr8?>o7|DfL>mtjL{)cNH2KfiA`HYTw_dqA zn!UX2I%at-0$HRlXT7y{L{jlsjkW_4Nx;F9wlpPW9fqlecYKYyA{*ok4B}c3h}dB? z*0~?z$fcrky~h9TZ+;dVJU$tWczHJJtL(!o(dAa{7=3V*dki(k3cUv`63*x=(w&!0TbyQ718nh>JL=iaG%*cwWmFW1B!)BFm;(55ls3hrL<_bAx{Bwl0y#|R;1YIyZhro zU!@Mrf=2S;dPwU_{FHcDbxXeCv~zgGn+fsK6_4tJSZMGHkEzk@KO%-TgdGrTPk3Hj zp2V(u{850eN?Pkw4#sCHWpB#W ziO)3%l05uXG)U$P8;dVhfWaeZ(%UpvQ+j=^n#0K8w#GMxZL0J1pW<7;+^imgnPO~{ zdYkXuLmnBtS9;=m!BK~i8jkAQL2YoXG5#>MO-x zXmKuchr~Zm3~;%6S$2C%N6b-0B7~sijVqolh!J}oo|7x}QAw|ixJqpnezNo7xcXT= zjvZ`6%Evg5*LVaH$8!$jT7g`d+!7THP~l)9*_i95BxmOh=Vg5<<~=L20JERrlssIg zk$!gl&zHn}&v$BgIBwAIev%f%0)vKhXgC(SgMXNq-PcvcFGYo19yb3bwVK@vR=Qcg ziQ0ohihifVQrxQF2#Db*-7^y7p;+t>sPO(+;wh%jGUt7%MrDi)`-N?->So+CtZLQc zstmlg1 zo0?~Y;A(W#8V~`UhmNr5h8X|#G*DgZ_9*sJsF@C&>$FeMi?LoH(>cDHnj5wxaxvMc zYQsH*epR)~I*wdVZ8Hzc1&bkjRSZkk8GYWD7R1lVhHZAQB`*#08FW`$(jmpG$73`W zBhQM{eg#h)<&+uq6&M6xYLN(TIz0PaOzNw|G9`~l?G(OH9{Q;od}OVM4_9A^%|oimtKrpLy8TZ%Yg3M+DHCq?5MAqw2NVg^liG z&la>_(t_cgQP9}qYDz|P&eanJAwy&B5!lP_Xp5(w)kDR*la#XLQ+sQFL~fFAUO7ll z*A`;}@nk&XeN72hUZwrdo=KBpJPp#fofj?xio^j8)VYOplpcQ#Y}m8_E7>OFke9VK z5F2#Zou_0fAC{x)hD?GnqoT~_UbgpG4b&$g=h7_w1g3k(pDhGw3;CVBdc=g>5+@Bp z-|W6PrGn9FS64MotN&qSBxm$@c~{c-mH0O4EySU(@F^4n@&*|;dq z>hCF&tH!(fJ3jpJp6jnj73cf4o(_SM&v{u9(WHH>k0boVX#ckO&a$ai_i2M8upd=T(=5yh%|-FF28Wi}p(cu;9`TnB}zdxi;Co2rm2)gHYVq3#Zfsl08J;GkCHM(8>NfF$z}RzhWEPwDX+ z;1jmQLKR{6$%AyGmjL!6-Pdk9C^_Bozj5Zd+3==y zQ4H*xY73A_?q2|jtTL$W6YLrot=S8*@$aKWKT}TH>RO!>X&BlQZ60MS3(%p(xpQSh zM^mivR5DaRXRP&C@8AT|*L8*)g<_hTu6G|DycQeOe_Ji^)<%^9P)~Guv+vCGN+oP# zk2`8ZsLM6%J*am$iRC<{<>vJrEbrJa{ysJfK8Qg8A9QDs+~D+7eGUo6WcnbcMhw(u z=(kZrf5&JV29K*765b)@m^1{O2QmT2HZKpDFXL;(l@S?5O$p43FR;&=_kDX&!_cH^`M zN797=jx&8m3-CMKKi}|J9hq?QO?{+E0UAnua96BFR6iFJf?{mDany=Ufjt zf5BV|=+(pVz7I_MP@EI|JffLl1tVCmc8>g^`^Q3zXZ|A%jmI)G`&d1cbd_$=-$R}) z^;TjFactu=_s~{SNPX_F_G%TY{x7@$+|HpfGz0OaL2ezL-8)vK81a=_Aihq6yszE9 zK0^q{HyRfuMqhmElJVK2Xl=gJ7erd&#nwH5uNpuDB( z4SNb~TI)5DemvNtLBF@=^=kB_csbK1s>#5oO+1D@>Vd#sh!C1Jc>T(bXmPEfiKMSu z-37Y5jOEv+dduqSqC@RxpGj%Tn%dTP_c8KRtW~`V_13AsG_hpj>)kuZ2fA#7dxz@C z=W3%`jp2mer4p=_i}iMo`>)Ty`RGl1y>)e?-O=YR@5EwyC^qY8S9~c;T&NgS70_X- zQieQp&??%>=L$-#|8lenh+Fb%Yu(YyP46*?JKy!ZmRah!>X7@5!~HJ)Zf{E<&}C= zmAY}6eas8d!9#(5|8axW1`V?X_m6nO<)|9-rtL9QzlOYgN<~`>9w)ScfXt|sJ@K>; zH1w*t$s_U1bD}trn^Z?oerB+&{Q|N0U%e6s^i}zXeCgw$zle^=E>3IU*(W!5#v!-D zXMnqN*eihDCXR@maa1))nKT|V`puiv8OIEI`yH#}xPPaS<_Z0N{aDXboYcb1?n6VV zeNL8>{Yl0BY47tg-mN&}kI7+}wl`|egw%#8+TW~Q=+yD1ymu_#QVXmG;6LB4ZGSPY zM+bSfw$6jaAmv@R2XgEOljMu{G*9e`NX!Fc_r4)0nu&9@{z{rSID`*eIZ!=n>XaeK_$(iUT`M_k># zBIfyb3-b2s^qc49)tK+8B$FRExP8W20P!wxn=znYs4-`sWhckuMpYyRcsYJIxfAld zpmJ1MyG_+^@q*MZUx9P?*ZDOfDns-b?p4DmW_k@b*?7qi z*QXU~s9zSFRlp7ZXWoZFFG4D2h735N+A_t@h*x!_J4W3Zav5&hxW`$Stj46LP~sXU z1r*yK8|Sz({bDK{&f(M)mlPWBPgek3Df*2Y6^pobXnx zNRR20hB*l;p2<`0EN?gr%SfE|AE62d|=QM@iY={pu{17P~ zK6DN8aQDPVnhi?NVq^cYC)XlO^@*#t=IimP&%*$kjIsF4lYsQg^N>zUpSw1vVM~1B z-6BxhSh{F_>9s)F8ee&oCb2AE3$|7i^51w2kRBA+x9($NXo8@~gYlgw0kiwPmwiRG zYj!VhO8nr#yV6bgM}J^0Pg-X8f^jzDR-~%Wi(Bl=crX_{FPsMj4gGVWzA`8nfle=a zUgFmw=f1x9c^R_sj{P;C)>vHP{x)GR6PG@(Q^=9bAfu^jzU=veVbhBLa#zh%)Gk(;Gwv|y$J}KF2dgA(QXt_yva1Y$=%?I3WapDYHqD|m29fFxM4cIa(Zv|Amq@@kWHBCXu6fn zFwb~C#$rt&(;+X>&jHMUmKv0Ho{4%7w{>_kCP@wMAIc$hp^e@`Xz?UwYrb_&g7@`Z z6R#hSRbGT*n;y*qpn-@|gCyFH7LD3Zq{ddYsz9A#+-k^ETa;SE=r9~D{R3#I))>M} zvaq%`C$M}zy6fDuClzgDh%nZ>iHm0b8A;_(`sxqB%fctW*S^JMMO7I7k^ zv3p9x^wJqYdTL|xe#X;k)IB~C&j>`4Lw3i0KPY%@)KpRjRNR&sY7V;p7p%OT_Usz5 zCp_de;FwRp^Kcwi)gl+jZ9S^0TXXAXG~pCKCVS|HXbO^^Ic_jlCB8f1rGdEMdY$xe znZEv%Pb0D;MZwdAHn9q4Tm(Y|(wm4ko)0R@F9`6}ea1vQW{DFD$c$^cc2#ay;@c6^sDd@5>*N2`p zEbS;|NkHlxJZitVcZ zQp{yjB_9_bD!6obf@ci#)kDHyP-4V!i%Zp8rD1vdvbgM!gs0=fL=uCXy<9CTtH7Gu zW6q(1SYY=+imF+pgZ~J$R~{;O3)I~QO=3bcu5$Y{AgjjJ8Y8FQ`b z;ug}y$Q2i#{b!!-G=QFaNC-SboAM`Qk}*&4kVCE-*LhY%u*t9t^J|Mh$TNAc++g4# zEn~r$fEr*ws|!+6<2{N ze@!CR=!_j2XE+ai+-=xLP{;06hp#_}m!w;a@J0{EJ?@^z9Kj~Ld9PaaPeC=@=kvFk zcMgu{ezi>7Ahf5ueJEN-GqvMXJm7xGboU;vx0cmBHjmHu!LD+G&B6 z#N|vpVqj9$jgX@!9`%+aA&`cmxwVf8K-?=aa@Cj+@53bJe!>$Ts-OFtup|)oL8~;-X;TrkwE7 zkEDK(Wjg7%W%eI?N=OWNO*QQv*&e3_BqZ^8oKZtiUf)1!Lf-INqEL=E1y6mEqrc@5 zupdEoiIuCyM=_SR$!9elp$^g;EklOCtBw%;qmDGJq#8ir@xIScQ~!86?wnI0uQWbu z@qr8T{xgpcYeypO|6fk#JU;TaC1%L08y{+KY8l;6msC=g}*^2833NVTrU(PS!l99 z#)U5wBsp5$n!zvPq8Ef0mjiMqtPc)ncj zvdM|TB=6?DAnZ*{E7A+31@;vJ!``-6dfYAP_{um}y>KR3yE^`ft6vZTC2W;qQ(vQ| zm1UVRBCd5)om%1z)P{l@+Emz^xi1tf*is+!1R_#hXsYX6@iqc*6D$oV=f6;J)Ac;9 zHwb{TZg4XSNHuP8F?z}J zgvG6bsX~;KYAg~2zqo!uEUvBYD3N9dV~Ia)p}W+;L$Hu)6tQm98%iaY?asHLK~Pq< z5@4rMP_*2!x*k*cCiR9bGE_Y>TD84nWkStCMR;EQ?=|_z;v(^1JNz%DDrFV$6>*~^wpnK){ADJlR#Lx>O zi-`?_z(%5{5&x~2N#?F|EJth8dzkA3KGkkjHvSa>=#{w27`1OHCO7_(BCJ z++kQtX1%!E#cAhJLYnM+K`;BVlym(xY|AbUac8xwY|uS!rcgJW!+SL*@-Q~+J`I6r z3Q1AiUt9ftT=aGu)P`z`4C4XapA;Ds&>z&&ReO-JKcsfUU>Jn8DyWR$9Q%m-=$S1c zw;uJ`1Hp2Ai;n3rHBGzYYCP`o(|bCes9jiinKU)tqkfx~*EPgb-WoKhj*z<7O^LQ% zji(Jl9f;)_!wq4B^!`ZfSH(1;+ZG2@F^SVS=!NPfXF_^~#VlI&)deei(z9((r#?h0r}Ui) zhj$(Y?9)0!Rj&9Mje#-=-Q8fk;Wb95n5NQidWfdg?eUhEiBt{*UC2dzTipy+H^*63 z?BP|2cMVJ0QRvP9;s zW)6Gm)3y&_aJCcx#f0Hg{RSJ3#%CUJnPMW4-crqq+Tcih;Q=#sA*r6dZnYKv%8l`a z=#Mk(BCU1nf%^E-JCejAT7me< z>)!qk&507@u-HgQ$05vKFF0I~$@+zH;o*WUZpS3>qQknebv<0*l!fUIT8Y> z5`Q*{hrjf2!Eu!}JiwQ!r~v_fqQA=zi;=}e6BcC7VZBaRJh0VRZ}Hq+AuzUXi!0sD zI9q?!VM&(ZnQ)^#TyXjAHOlxJeRIhRqZHQ~oGyH~!R8f>iAz4hw3zFjaJzdU<{d8B zdkroNrZTV7po6?GG2acLYWv3GhT5D-^!v_#iv_9(w(E?A>T}uf1f<`Mf+j6THm8vM zCRM!at_^>Po89B8;x>DWdL%1iv0c7ZGhcaIED~6iv=+~6k23SH$Q{sjQp0H$xx{@n z5Uq>E&z=_U9T@WHL!$l$H-#N4kh5QnhYR76VGrb@)#Rndpe$CYUlgEu&zc1kA<eLmXMeDmo)c1MSY<2@S3BqRlGYXlA`0_EshgPkFbJ7b+X9mb|* zz1J6&SdxqzJcckAnM~A<*F4>FyA1FU0Qa~@-20e&uNQ)|O5_3ciQiHNp^D9JMP{8C zbiKxuS`B$@Btw~zXv7DqTpKvuQPtfH3%xDIHC%F+aCMUgvx`@}mwni#A;_o)dQ?rE zrjyLwp_&K|3au4)yCqUQNHij=YY+%H0N-qjfo={L^M)p%G9l9_!(f3CFfW<6?H`*>~h zI6kH_GDMHY<6h&WF3`0+p&t5CcJJ}3roacO=u`gWNRSXW_UaH;Qp#1VQJ(f6h`E#M z-qCnQAR_@ySGfJ&u))lJbily%AXzx*6PJ%qG3Ov#iMt|JS7u@Ai9;T{tJ;UmJP!Ml zwDOp2$5DNPJ`yn%dfRFumAHveB*J+ zNWd%ft%o4ruPsNuGuZ9vG=Z_KPEfk6rjmp(v2LNbQVGPa17A(v3JG zd^Nidn(~4pXNo$HILmS25%Hl|br!P!q9a1Wnc9R${bE7r>VS_Q`Lze7jnsSVsUl6p~ zF{-cPv-?Vn8+4v4g$I~a$II&4m&Z1bSz?Wh$CN*v3TTI$@@fxcKDW60i2SY^CuaXO z(|;BB29|ZF55d_-;C6SZ6$C$u^EB$e$EEH4OzU2cj=NIa=V@R*UovLiuZlxye-^v7 zGVM)u@qm{i4Fn(b?*Z(-A5#0l>hH&JbsCR{Yg5fIO<3s0Bc2Zl2aMCei(^L$KeC081%?h9D{bgbc);Qz3d&Wl%a&t%aY}$EnKBlqHFBlp z*KtOFubUW07x=~z1##hgAs__;TPwtyp7x4#!F{W?B#3CCa^9{zN#rz7OGTcUvzh{0 zcE|)cmv`NKB<(fdt1UKH=6wM|cCR?6Kc{bAeBdR3$4BD6D?ZeOe>XoqQmx7zZ7btr zEjD#h#`%etKApjc_TwYL{nuiD=2av+D#@Y985o~y?E18z)lYCjd}&ZYU4)GF2iTcB z(>h266C*Y$rX$}NoH1C>Z$0TERYhO*odIHvSn}^x+Sx@OFa%KX!x5qPlzQ}&C!w6n z9}Ogr!=Q|gfAZ;Fxx6@&7p6Uq3NeBM=qxTcD&)7UX*p@kFFab%D?A3Y>_w{CUqbZs z#l=Uvk2)cti$^ng+0vZKP0DXlCv4woMlVkXMrC}=e194$z@a|l!L zD~?*&`~oKi^$YaQmF_OzVNCz7@=y(EUE}Jb1xs$4=#Og*Hiqwb`dnM<=Xerg5M?t_ zQA6~N?B`t7URem)n0K^1%N=CC>FjCEgh|&4rjlWBz6P0nb|7xhjQ@UaEYKgTL%p$3 zKXEFH8x2GkdA=|jQUT7iar05V8f_Aiw?PZVEdpqKBI9h|s+Po+Om=;d=gcFJ{;7*q zGyS&Eg$guWBdI zxTDX@(|Z2=Z&uq%Cy<&mo2k5TeA6bziM@xMPi|Vn*zbQ5Uo&t2yq4>Ernvj!A#v zJxaxG?tNmsH>T7G;Tq5U4&Q;Z#bBRpxZ6`lYn{aCPEQcG1n|Hv0hTrhU^`5P3}|;D z4GFC7?+mJ%1jl{uokR+0UPH2Jt??3~^Y1noJ)~rf2RspcI$%uwK`8L4c*q-=;@pCn z$@t}Ryrgl!^vqsAzC9lGa3Fak7Yz59{{Y@X0nOv|xMxTDrlhQ%P*2-NGOYC;^|Yl* zuTQC9c5fo}z3wG*#up;+X)j7DpI?q=^wl^#dK(h{{rXDQ9He?ceG>Lnayo1-qmaMpMDkZxoB(z`Cn=w-gny`B)7?Xoby^2 zmNPyO9Id0ncz%DVqDp(=cs}w$m|`72*3u0mB?oc)iRROQXKQL|>Ge}pCS=cGe5OUJ zG`Gg*>Tx*DdOG6^_m50~JN#wsr75`rv5%UGuT%jd0^h3mTCnj-ax=bBQO4&Zg3Pyu z?atQ5_)f5u^c85)zxP&*XGFsvRFA3m#*c23<{>|wf5noTF7#=5>#ykzN73)(nX}WBbDeXy#W2j`~CQYeCo1TiBy-?@#y56E`HLPrpTeS;C z9y(sqOe}K8Bf~^{TCAFx({G8#xGkR-c73V8>xz?VXVk0e2!;p^`q^IneKZRGN*!{! zCP7u{$Vk<(N+n%st={Z0Z-a!17B|Vn6RjF#n3XDsyv>7>EUP$9ba+>iP2fNudFOvY zYOc{Z#8>=10gBe1-`=Et)_J7foWgpqJq9}DppOj#du3gHY;?(;h=PH<>%~HuZI~ZD zZr_#R!h8L}@f(OfH^JKlL3eC+SzkXHqXzwPTR!hY{#cV?`$xPctFe#okI@%JuBE z!Oht7#2a2eE)o&Q;OpL0^~~XgcJeI`(U_JRZ>uhmP723{1Xt@=X>5ykU9p%NzUM(2 zI82G9->(&>0`;6n9GN|q9z?_Ofv41wu?t)i@hd(lUe=M$nlkibFBLJ z&cdexXB5i$GhN;l9r5}3^NE>z!SP>sK^OxW-2Qv668mJPFK*Y zHAvb+jKwt`5q4rCu2rSgb*)iRul*y?4De%fXN1XQ zD_c4+|7pFyZDxx$RB4SvVo87@J!?4!tdIwgWnGCSk@U(Lz`aBw{ODt;rarS@X@HtppLNaxL z+W%(^3C6m$D`UhJTQjEasLpX6F$?_XzGRCg4FeJq3{$qbXryz5cm*y(&xl#<4i}M6 z2lta1D|P<&d2-@N?3@wpS@KDj4Ip%Z``Daend2TGrv4$WS>b)}^;#ihIf0_*K3C72 z!dTa;Fh%z%v)%2f4%ZYH4``}gP2E_)Ak83;N{f|KO%)}E~zxBV2J)Q-aD{WY~?N6y!TkG!?EX$EX#nZI~P0{}| z-r;%}rBuc4ukGCnxp%}YI-uIfY@t048Xjct=|VQ`PhQ}R3vx){%>6|i)+#JZEoZI6 zCyO!pk(hC#L2R*ErDG~j1{GHs=6u`@5Gf2NS05(~FQ|MlPHH0UdAm;e=pP@8)0#gp zr!fX1#5m(t13bs^hG&VhB1+(~cys1_MD2`(TVhKh7&{Sfc`ue_4w`t|%?STnn8>pN zZ5T`cCy^|6#=DyFl11^J4;m`+66KHE9`Cy|&||eL&KV$N3={EzKgK4I7=5S@P$hKf zN19Y4EAg=_L81^8&&yJjIpEv))V*U#z;=J;x|xR}KCgYu9z_7Fj|{xq8DF}D>r;)d zYCVAZa82>PoP8+0QQ{kawpCM^9p7qxtGSbP*hm!}-gjz`ld@sPOC?9?>tQZ?X zZ&3E5?qA;_~FgbQBM%g-xN{6aVi7rrEvI5EmqyvQHe zNDsxuFBSBeFLpV&FUBP=6&wd99EeL_5}HhdBBC0+B*dH!;~b95RS}EPshIQ9nUYH$ zG{Qw<+ydzp1`W!ts}qx*xKcf&W0E0G&O8pchTD4><&^x0Ref7mp@svq7vH~w|WZr_oZ1A zVSn5ty;l*|Ef(Mh(Pl*ajp}O4#CQ?JH@WCe zPUb4lbY;fi;Mr+bSp!EpTC^m*PUzQLH72s*d^g%OPzIUlFyP4y3~qwVSmTu>_853m z{qq2%g7ZYI^LXtgZxhJV-`tXPVvP+NA94rmS=-eTY=<|wtM-R`PuExSHLkH)9>H?fl#i; z-8~ksMH_!Y-T$X~anh5`BRUvlE86t<$t#6(BIGAeTpT--Ha>{0k;xtt z{wyZcsYAN(SiwCbI4)kKpDPL2d~vN>s{YeK@{(i1jh)D@C*#s%Xa28ZbY(<}8(Ctj zOonjTv4W59>>og?ow5AJ;&Qibnmvl{e2zu|y13$4!8V`#j|b2H8&@7H$eUOR$cwMi zN2PIP^YXY_)2QV2x<;driKl2(l2EwzSi#q;ah~V|r&f-MQ-+os8s3_hYOXqK?jJ&$ zGEcQDm7*az5upy{Wc} zDQ-E=>A3lrurYWM)QUn=->W&$FqwmKs~SM#IjPGctx%*%NzIpa|jJOPm1QON07*!{? zqr4qIR-WhPiI_C(5WWb}u*8XNwX0C#6HaO3=~PTg6Mc{r9YXbPf!LNQ&udEPi_0?)Ew<#}gOy0o5%M!3PD#>+71o;T*??Ed4mb&+>2 zd-{a`vISYTG?06I@&TDQFUK2N`!P!3)P6(dY;m0ZR!vw?T~)T&DXPJF*xQ;06`HkAdb7;3X&5Q{#B=D!QaP8KQmyEohb0q)$mlUEo>ub z^t3;FaUkV9pD{o^v(G|ty`e9R(bH?}O%GF)mRMB2rFm3H)c3ZFcrQz!!?WH4_)AW8 zH`wW2*Qjp8vyEMPuXf2Z%}r)Se_tB_5W{iKmG5*=p$}@6r%wN`*Z&21mV+cdapN?ohuWTwPt|+IeB`kw-hz&42c{=vp!Pi~xAqHtR0_NL zrTT=w#4)tCZ^iQRYtMG5bBYero#30=G7jfE^sT>Im@7`9o?L(0#-X(PUdyh9@aD2jItVW>C4uHrmhDU*vs^tZZReF>E#+5Q(Y9xslu3JV0cke zc|@>UkI_=sUKdvyn68v{!?(VCrjQSV9G|OIFJldFk855oG(nN@!nwN7_T94|T6;#NU+{rUy5=;brX*QaC4 z-vruP?A2|AWFM>c#S*Uv_a>I=cV!TMquz5l`vfQUNHloV?lkIe)Ku_r$k<3tFH2^D z|HvR*d0zijuF#L!@MJW5Ey*B)*Dsg4#eYC6Dl6Dl50=JRyzp&q-iv`e#<;GX#{sXL z@mS;9nDu0kp0x(8K0WT%xhdxW>Sw)c!bC%$#s(LnPymXJDlD1Xpc%Se7Kv5D+9b&m zJzkFXPJ;Al+0xIb+}J+%oKa99eKvblNt=ZI49Pa#6s=nfX-jZRiV;_v?8l`MfJO~E z&XHp=t~Mz>h)K2C+%pi{JpGI*!vlIGraUxp%d?Li>Vu06PPtp{n$QRAbhUQm1RRWA z{v45k!aarrYYie4z@{1!MmZAqsh6MK^_#fAws-hZ4OTgRyY+{X2M9tBbgRmX``?q$jz?WHWe@R~k38>oY6y^5_r&9FeP@XP@Psz1 z3+;b@`g0A$9-qLZ1i> zyY9L^j(h7@wsyn`^@LWRcnT*y^G^6e96xJcAb_DbEtqiTO-^+(&UkXX3*b$Ekb+}8 z-t_vs^QS^9r)$bvF30`?lUuE5Q32;0GZtrkJmf#z9q(#sI#$MeZrG7pr1w3u448P% z1<9Ys2QQ0OC9F0^Y>03Q9TXp`{>t`__V~z)1b;}blK5Dwv4Dr`lNz`w$0Oh=>XlCg zk4_yg{AV7x9_i!f?q~~aDc9%=6}49fd*jR6L3^CL^HO|e5MjcJTkHEilsqZs=-cBP zEw4fo#ka1Eb;59b=Z~{bB4+>I+fX7D#}6KZR5xkP^rH(~I#G$oPyT8poh^Q#$HjJX zm9}q*3yv3**}o;V9v2=jD6@ZTIgxO;$3@451DmjO%Dui=bt_3i$0f&wHPc;&vuCV% zyr7E0w>lDOTy|XA&jupd9@pyzrtK}snU8vKsuBLiS#x5{@3+syj&WB+yn=gmrmm%++uB!Ty-%4#Xl{=dUfe|B{K?UGyMX z2OHm@>5#@nnXoj`h&ML@ag>$iZPz4JYQGNv$sRUX*u1rp70t zUb}Z*>fPW`xAsAJ$`7D@(`_4Aa-Rmt1w$X{}EJ_R2WbCAMDY3L~8*5Bhqaz2eZq>$}0w z7t^*>tAjfIC#EB9^H_AblQv=vC+>77YY3B6CF@hK*Kli^?Dy%TgS|xtEMVbi6P7`P zJ_=g{>U1y3azZx6h-clNM(=qfMzzf&T`}&W44;*5j4|mypz&rL=CREa=h_tI3T~qY zYF%E=9Ud7dfd6iNO_68?&T*&92t7jJm|bcHwZp@IkBeFm%XY@SUIp_j_j!PJgf?-% zYu*MDoQmBlnthh!7V&_aahp>G{b22L8+1W69@1Gk{iptTSbe0t3q1XZ_MEtxgYl?m zS19`K6hu6B+zf5pCC4R_%{ghXQQZz_ z$fm(KRXblC{5*Z9HQMqv99i7WDpuwVcQaWMHoU1y)h!e8miPCr>wozl;%$F`vW~}D z1Jxlx*1I04nc&9*oZI(Q7pGW^Xx{g2N(XF(Ns4n`2TVg#eAW{mXwY@rH^qm7tVjVq z@{}O5w!-rBcz>*spj70G^obg7MGE_=r!mq$$#Q=7|9!cn5cu5VqV*$AAl-f8d32Jx z6Hf6*Ao}NA~ogUKwP4qmDbzh(pL(KuEW|3qWLlx{Ok6dDy{Tn)_m!D;jweW2z*a(k`Dd*^{9@1vrx@!>5P*2mme z#74uKrOVGee0HwwNIS@(ZNledGg->Y5`fr(h^L>WU4jLolz zVP1KAh9D;RD2v~sKE?)SU%`a0)w5sLyfWH6$#)*(G40R>FIydJ^sA#G*1mG4sKzs+ zmf}_<_FLzTz+fWzw)N_fI5EAk!7H`3+Eru~8#Q)iY-=t|m&cxh`|R=983-_XRlt?* zi9R*qFsD>0eGMFP6^zG1ZA{XQ0sVWe8u~UCemOplRoZAXIbg!AIkL!MKZI)xm zdhau|@N5Ji!%fAji`R=bHHylTpNboAFLqz_VfLhjE*S0(!*9Q~=ikK%1ZgVm>d;F1M1LG;r zt06G25FE%zBY5GDaAbl=G3V9Y+! z%zk%6e5}70i!0nW1=0%a@S#w7+dfsrCBKQ!+(B{{1bo+`rEyn$;qiHyHpQ2!mQNUt za$l($NiZhjYn?_yPh}LT6EaoMarXvjI}Cuy(meFqYyWk~5CymYYP z$nAN`e?0%3raFg>iJ!C(X`B@&%s>)ypR^|@3I@YyJ1$f~`i3&si%tkHf=JQK7X{YE zC(fkUa}+^$TymmdClr4fy6{L`>Ry{khfb(>H!kv~d{uYGUm z1BSuTq8^c}=ImSDNg^&budTMz8TYn?u8I!TNJTUlCf2C=Br%sC+#NkS2n&gp*z2{z!4FHr%qQ6A zwJPedoY-b{jQ*xN7=xY*>yEcU40#bSC1*Fu5@uNOuk6QSc=W`Xg2+37VvIY%TQq>Z zkQloevIf67zA(I%@+ zJg$L3G(;ZKeiu8emtv2$b6ambr8Doc&}SO5jJCk3c*ccrt`If*)huIc z!~u0S*%b%%SE!(YnATtKY=}epE6O0mtbg6}3~?AoH9fSle~oxJQ`@ujRykH%`N2)Y zaa=uB+R1`&!iSH1vEw*QaQRO9j4p~(ULX?Ni!C|rKZv}=Z1jv~vqrb%4fQ)VaQct& zrmFHBjZ54AmO(~CghdHjOKpEAfp*UtI0W>Q@vg_<#hZN=mLlG(^~ziDzB@r$0GqZw z&Z(0rF6;*?%5%umJ`*2$l7nR1`bc1Q4Hr)CV_*Jq@zc8Jzs4u3T%S+dr`|A*N{U85 z(-2DuZ}oZYQV_BL7o9KEh`@4$f%jR$! z$8q>W_uf8^UX$QJ+J>gi_(A>EE$diGDx4p^Sl9y~Wc5&_7l3ezSB3nT*dq{+nd$9< zR|}Ss`ryJ>g~5oXn1}D8SI?A?K~v!Wi(j>oVKN}a?*d8rC903AvCv|dzFLs(_SqNW zGId+2&p6GOzbgE}TX6)s^v^NJLqdTsUlsMyiY!D%@@S@1Rq$}rFh57E+)>-b zjyq$Gmo7yQv9<==W98;yP+Qxql)a@e?Rc#BWC*r985;~ccJ`@j+$eB}Z5v&+t}@v2 z3{P*5K*Gu@ou+zKa}teDpZjReOah~^xz;f4*a(FlgPw6)NhvVo?W`l65k?X*;_4tJ zh;|;2QCG(*fCD|QsiKBM9Vy6D7zL;JYQg)r!zjL5sI9~g1Taa%h8^yr6@%vCxck*J z1?v`>EX0X46+6{gMgu^WDxfl3Lt)nM@suX<#{#5#H3AB7;$Igj?tT7?mHpJY-}73H zYz@&~?Dift!d*R3n-YfUgYlp`Z%<1;_Us|=$Jr+_rF>X5Q^J7<$0M&A3H~3f8T)x z?c}s?NEKEJV}tVi9`Y(6ae~v-2a%(A^p6^7ve-f}CLP6U&*=D7whzZK&p#u89QP*3 zUR8sHi5*?KO%_rEy|EL zhYGp)x!bm7?4i-T^k4WdV2o~Z53>(n>K68HiLV4lTDO-&`nBPLhAme0-_+njBHk0< zs&tei0Fwca>M6B#Vk-H)z$1Zmr)RDz~V$R9(E=^9v6({vBl@;L>&BT=&7g0^x6klbKTJoK{T9s=? zzwpD?xR_ZF#h6HPYu;ZZqh65$12Jd&>#`eLpdWUir@=P8OIz3${0@!dEJ&-WbD z>k6jehLe6{B<@U=OD6kRpjteK#o=h7`%d`c#*-p4Afqv~f#SbOr4tYmY7I1Sa_PQ(a^Y)_uIZX}8l;};5%CvDNmLN4(DlPyH=(qhkmoOkTs5<`s*O$Jq7 zsy#!874-(KIn}QXt_G(&&MGvj4_?ni3r<od=lqDwRxZVbiup^ZE76HQ@$ej1eu~ z*)}{zpbDea0Kw*vOry5~L;o=c*#~OdLKIM^LDq%s9F<2j<6ar?!I`x&`(e5oOjg6Hi5>$kx#>v$Df4S z-=SJ-ZjZY?8^Q+-#ZFb(oCYwv)NSEzUx|D4g^6vZI| z)}}xTkNM;SEN>({_T#F#ETgW&k-C2_IT_LEW3;0-l-%==f=MTM{4;Wypi{bY?h~!|E+> zEqx`QeN;pJ_E&Rb#vRvZ@`qz?cSi}rf857;S(0!N(wZd}g?LY2gPnb3d5`x6R9$;hobz6s{pZ4=e&E$#1`+Y0ifYk* zKhn^B1Q!{NkG)16NyvSoiE)PER|YbYxaRO&8F%mt zuMTE}HbeYUu#6G;G`=#JUDbhH)e zq^*lyvp_iglj353;hKg1{}Of5&%+*a2lzw5$RAj9hbjW5ORfW zn&TRU`hBtdKiBmNb%YZ=78%fpSH4ydGuN_aWw0}@QZMWA#=82o^UOGSpm>>G^O~sz zQZprf_4M$K?2L*(GMRPE^=xt>(SGVH4J$w**J*CW$aDT{RzS|~OC2z?y+J^tGfQL4 z@mSy?Q|gBe!a|RNw1CtrUg}M7L^r)=ZVRCxmS7as3DNB@L4_6S~O!OHPxKU!^daSJ& zWTBj9tU-;1{D-Fjf@(ZtujZ%9ss)O2o$LbjO&Nf0)O!{~8@`l)^vC{Em zIHojK5t$`xek68y(;5(Mp^SaE->65w|rX5Q_Fy>o-a}x5BPsiI{ zx;C6FhU2Wi>LB}wFt5Jm-F4UB$9pcpcox#@eXR%_F7fHBaZZ~_Xn)c3AE@E$yI{jV zbVGC)P%Iz4R?A@z0!Wun!vEy^nsj)4tY*KOyZArHC!W-nVf5#pdIQEXOh3*ms#jTt z!^r1e)&|TRCua9yjqAcew3lCcY@FXfdSAI04srVBfBiRWA=&jeuD%kK4U+X&2ll1| z@tvl~9fS{K-M)7zMrNqnf6!NPog!k9VTU2;i=TXUQ^S^SK^TaPMVu14ZAQ6!!Ks2x zu&_%X(3Bouc*+9f;i)9kFFGZB%-ixvT&$M7M4fTTDd9TwxcL7QSDsATDy}2cPh57Y zplTFpj2oA`Q*3S0`gx8jlE`%|u299qbUgT1o+__kOLJVMzheT1HX*K72P1vhtg?L9 zc<}T^PJtvR`r1QeLb+nsCMD{hNDADCg|MY#0JJpzMuBhK>2W$V?V%CY5@^K=s5D^$(%WwOZ=YTO2_( z>NP^cFe}~QZV2r%u@yd5MQP?4P5KR1A^w7XW2ZI{&2G}kyg2|ZwFB@!h|kw5fGZ6g zs9e%E7dEF7sY8V+;wsC##+7jzNu&2ztFwmox|^^~`F5@IUs_Y`QgHWrbqI5gQf-5l zV_kJg5sVM8B_BGOwo1pYC@ZcTM&JML*b>^_RU_)0d)1)7!dp24dtqD6WDhWm2t|~tI&%nG>#n_ z0oCwm+^yf}CW-l18;$5sP`>F>er)}xPYA%kAr>#q(zz@h${BoQ`dMqX2> zYeL|sMc8f)v%M4L$OFD5>F=Eprjz7*P{3o&#&$m>IA-^a45v-@!-~NL3VBi<@i^c$1aLMf4tgv=8Hs64r0w=Q#i{sv$+l$2=(qGr(861m~?hob*u`}1=l!q&h zs&QJg%)N;-ok~Q&*^UdmP-b)6?oeX|6u~Uu6Q-ZSyiql2pP=Y zyIzU5&U|#@Jp+gVE+PJ0cKS~k9*|_)=LIE{1D$`Mky29=ANn1XYY+&QTl{njdQ4}*6c)+E4TG%3@X4Sx8F|ymiPkd{zc)yWAf9KV!>zR!2)vOp$#}BI3 zzoiJRf7Dk-&xED?$rH`z3GO3ig$gG}5jb6-k6unSL5pU^rxxJ}ukJ;&dXgaYc-HXa zOkrL#h%JOsFHmQcT!xiW+cc;a$@b@t@3nKNs7Z@#kp zHxpO5vtRvcNnEMUNE6DH6Ge2@Y(XpC^mMgqE=yhTHJ;Cw2~3!;tqp;eVJa$~5ENz! zD>7H#CGlAvfq5=ZwN$2ky3Vb~u-+Mn`Ll(DNHSp)6Td;@Bwj)CWa@RYgrzfee?n zd5ik~c&oa;ph_ehnr1i*<8T-*LoDpsRv^D;>`q`uYepc}uJEv_iF4H_ot z3jUOs*NyR-?DX}vX*iW=K^FO+{@G@oj~ZM}Y>N;_K*l3&YbMGILA8|(CTv6Ajs%=UlpNA8?5J=>M)eQ=STx7O!o7`;X9wSH6n<7tXeuZG zT#w5_os3;}ro*o-JCz**baee|?6feWWHH`I0w;Q3uru)%MG41YBvyIqv`ID^e}_$l zkvpduc)3e*7Y&a((*XPYrW;ujS@;LL5Tg#CYDsp+iyAdmEgw^VBJH6(mvt)r*76(g zir!+RY^3svIxa?2V%OpoML@A7*mX_a>v6-0vOzZjVYfJbCa6swmdsalGrD@TG1tPa zcx6f2Jn%;rTU8Y4#xo2%D&fQatJ(PKg45p_1MU0 zQtp;k71;y&O$H@I}UQ3$&c~XchBy0Ob zb>U{mh;L7gBqCacS9(sZye7${fK>UMO^&}(`EI*S38*MUJ+5YIhPi7RaErz!J3WBI z*rd}nLt{ZQ-SuduaIbCc;A4C`05^E@&RGJ51~e%=__Ah?1oPqgdFYPK2}GB=FH(TT zpy2$s&DAKb_duGTH6dwzZ$$^_6BS9s4Z9zee zkFPrbLZk6`=@H_^jO-F?p9YY{ro#rbh}=Lf*R9h4i&5tdsjbISdkzp=uPs3RpB&B5 z%*IqIBMaTekI9xk-2-?`cz`Xv#cIvUh3v*DoOpoQnlaujx zw|Y~%f-075*Pd9yTpb2M+Z$-+hVpAsitG!#T)$>73$GdoQY)^N*hpiVDLa&Vr@H3w zK=c^-dkk3zW8l{0?9^+A;yc69-sAa`;^uI)fCLrXN9ENxLyIYi^jaX*k&52EE~IK< zMu~BWr}VLQ*ir2{Oj9QpxgAqmRD{U6$2Iro+~2&%PH3vljdv59>Dc23LOgz?rhB-{Du#+|^1 zUl7)8A$L%hT@=hZ_re<+2_>=HariRQ>2eK_T~@0yS6E%qM4fBuva7np7XR5ieC_^9 z6bj_d)rfw!32(L=g0;|7V}B;xD`EDBe`7aCLe}DTaZbhk7XHC*#rn7yX`c%;p1`Qs zZb$F@@V^hg$=Uuwz#ztWE!&rRgf}KPoqhG^nWvJ=U&jKrwc8zGgdK|HLvh;D#lWq^ z_5Mxvx}PiHUSLR(IEXF|KBzy8?R6FLrT=CRoeknhgsF=~59>V%7Vs410FTQ$kmMoV zXyes^eQv|0`H{0hS2#&rfh5sA8-~YJPn?W0efC%&l&h{>TxyS>4f2eP>WV}6_MdQe z)JZ=Sml#p?=0b6T!zJ@%yzOX%;7vT6h&5Ajcf_XzigGqs%kb3evYc>R)Y?~0rOQd# z$oPxdWKD#E$N6%(U$9%oOXAu`*O|+#oF$Tq2inVBn*ABnYKoPLg7`GB86+%5liVfPz=p0qL;DS~O56+MK?-DtqBf)t?ujCBMYW)RH#>H6HzCN%4 zZ|HGsE7~1*g77kTr>#kAhav)bRVxAUaU*|-gl%epQhm@mgks9IgSuCbVHUIQ?A&>I zGu%JnZF@5Kcy?M}AVWY6Zb@32dgAi@Yg!i#n_!)&9vV|k#sh?j23wyY;e5pgEd*+y z(7A5ZVYs#FHQS`$HN(ekv(WPlh(O&EuRQ;)ZH+y2E*MwqZQ4=j2;m*u@586mn}=Y( z6dea)a%@LzAyoowr@(t`8Oys%ICiojN6Fw;xmKIy)HO)vEtuhXTQJ&$e|~u>B~bG2d>l>T6!p z)d_J9=f~FswvVdI_PVxh>|b4tic0$*>`3+}PZfUp(jLBuqi5v<;=awBllz@VC>{$W zD*xJ!$0U_q{Ev1bW|8Y#XD2hnJHU|ie?rj{cCB>@=(OJR4Y@Mst(XosPm`U|(xIwN zhJz6u@xGSfUk@Mfkj&BeOY-f(=kC%k?cCY0Q^rM}Tuq#^rmW(2UI0sR#BlG13xTG` z-&_=4o`?xg5GsiVB8#hP>+G_&h_ZQzEms2LGOTBw7VT=Rr@Mo9*tO`j$o*%pYi6W2 zcK3rDTK9Z&GX=Q=@vzW2Ew`KT`s}9RH`dy%0F$eQmV6#a;$msHg`}rn!rKocYP7rb z-w{c^jCMDN3o=zmJ7oSZIjK&_6^|BoFK_HX3$Pj}UVQc^s8SgS;Y|Ql!Kl z98I(?SB|^WLwe<P6AGhDpM1)#fDey8HV(!^r!NWWf4|!aK3ifCW72BOy z^v9wbVGQ>8Xd;wFHC2nN7TJV&B5jZ-o_j*Wy3Uw*u_ptL2NUqThWSoZgBH9|A&F0^ z0kjSV!5QM*XpsC$1UbJfCI#j{C&ET;JvD%zk_MYH8f3@KYAnggBn75%^%J--u$*#cDV#lc@&CcaS5LaMZx1Z1D=4pMl`SC4bEYqlsBz1R$oYUsfDz$<^RuWvRx<*RQd% zScTFaE01nDConF5B0epNmoLn<3T;Lz{ccs!bjW2pmlxRB>VQe2e@eXw*XleGZxGga zOtBG&J_LY<)P$t_(mBO`MNLgV*t9M()h)u$7)VvO-7%x6x>tY_HJ0W?IP)Gb_==FNz zfw>!ENQ#@#Y@^06@_dM!v<@YF9Ge9^-`|9F2d;06TH=`bceYh;sd2)QU|F|mxZ1=! zZx2LMIz&j+OR?vJt+qqWaC99aX}z969u9q%K9DL;SqjI4TVJp3&eq2d?4D@vR|?C! zSJ)Pzd)gOodLtU<M z+o5dEzk_u;tU3EP1#P7;jL@(4fGK+|8`Q76-$Q=>h_wE?{^&Mwa^XhCks|>WcXYni zqk@{Kmdk<1;;nBe)wwkUA;^68C$i7U^>}c@n z5n~im6+TN^L^8kHh>n-1l!Gdd>aiTvJ4fi;{cnuaTg3AZ+WA;fd|`sOyr8L|!d{mw z!xwexy5a4|;`i_?XMbgv<2NpkLv|%5ko(azyBdG4E~+K2Mk`Mxk2brmhbBHFyAcm9 z$-(tAjetv{-3)X%fW3|QUE8f_SXNX~yU;$*cE6{q$aZq^pF00Qwh*`Ww3cr*#y_q38; z9k&??x6!^;uSB0u+joH+x2B(=?_-6?s%Jk4+}KXPbT9pqg+FTcb`T-XB=uX#arE}6 zOOYk+V)EeIq10Uq7tmu5y&V$i()#3?%oaWzf8p>YH$BMoxVI%8xu;i(`sY&{uOWIH z8obI9kM&&uOt+Rz<|h1h_?iGIj8xiJ@IxT>#PKt znJ3k>C6!E_C~OHhh5*9J*i+GX_}4g7TJG(*$eEa1$CIDbB#jNP*W)3vliv;*JNb5a z#3s?MF^1PfW8tU1omh>9#Wp=)=hpPIP&2Yc@IV-JuZo_cNU>fPL)8YXR8MQD<=ty#F?C#{Fb}cpovl&cK~g&7PkcTfM>y9p3Q;WGArUY5?ZUR)Zf<-wZ1B* zoGV#w&uf8^Nyz(Q+`mKcDmXcV)#BCo_HEMxH6F=tA7F_4vzOG#_0phWI|8LH(&gZdu`^o<{OnN(xVQeUY+Qsy zoMUkr*d5T8@ulw3gs7~*!t9Odr&`xP{!81Z)d1r>-`UF=nQsaUE6QVioBgoSNT47z zXa@pom*5Gp5pc9yD60--sJX=E)`H*J;p|TlAMI9^oloxgp|rvY8zh2(m0g3QlMgK6#HMdNXNFPR2FyU+3DX zm_=<}Q4P6xGbCL^c0=t(dn*Gr=*Rbr)}_KTub{{s(atr(8QG{_ayh{vLSSR29Fc3lS;KFlc;!#d1(+F7Bsb zBa^umGi`OnkbSOcSCVMS0k}DL;DFlM9_Um}bdU$Qr zJJG9xPvNd0Lk#g?iUmzn%Wvp=(GF)R`!?FSv4(=B8ma`vE*W{VO?3M|^+M2~{17PS zsLEmRJr6@obJfH@JB@lk&v4K6+?n>^xuB_wx(dn$`p~%$k*aIqO4wsl0%R_aEA`YE%i|yNHeEm?Jdg5gF1+Vio5S7s9n3gx4(#Ra%Ea z<68W|h)-3Al>ECUVE^c+)23OS8p7NR`Nx<=_*V-&w?12;3pwW{hk(1aYG@tEa9b0sW zv9|gg$R2SnKJ)*WJ_mX$9BTV}S*wDc)_^cMmIVJ<;trgI^ys;9od>)(TAzmCj`jLz zf3yL0UsmNgT-NEyqlB>F>L~>_+j>2DLeys45SzSau-!Il6<}VXwu)_vmy4Iy+U9d& zZJqT@4iOBxMYAS-G1b7&1;6N*AQ!raVQ-5KPW~`k_#5ZIw#V>X__VQ?Vn-qKkX)gH zo^hu>lM2-6yW-FJPAF`Q=j;|VRu3t4!d^|PtG#FBSGG@McmTs`dpVw5B}zp7er;qK z&k(i)T1bv7=2+Ar2V=O&aOa1FoPW57h0#FQkiD8syRo_1Uen0+7?JU8QZo{P+mZX9 zQ}RDQ8n3wG@o0)c8XDW}cwAgX%j`seMQOwVI4M9yEX*m5ml9B))=7wLW%w^?6mB?A zk8wuxaXSb07cvhE@<>}zrinR zw!N+EP~4B{P+&Vtrn6-Hx*T&O%ZOdkB+AkGT-7kxHeAgF{kawm$qQlEqao3uU3Nng zL?UW*tITIHK55?Jc5Z66_4!`Uk#{Q&Sgz6a0g0SLFx8|Jk8n7J+roG$1=qXn3t@a; zk?2C;moYjKjs(&~bC$0c7oD#Ki>xi4N90cS)uEH+_@1!4nqY0?&+VI-&Lq+m--|yJ z3X6EECX?5%vhTE%gv+$s_c2mK6AA&`f1~$uFt&FRg{p^rb=&^~?<69ZtH<>>-ZI$+4L$r$P@PJ6VB%AwVQwu9CO7%`cY>(9c`0@ST(0-ax~*x#}%*7rl=Q=o}SfWsu01y=yx_he*~rWLj-tzUB+1{)T)K$<5BBssiESW0U{*|X_ziCSE}N|tThr2y4T=?; z^G>ibToWnW?wROVyU6Co*sekFisSR%37&=wq)2O>9N_ser^Qu^>uf>%<|;xWjpZuu z>+7zGlp$6cugA!AWM%QY6t2R`<9DtXs<|Y7;~?5)6|o4N`N8@|tBQlNtZt!It35U| ztBE#A4{vo^s60V}$R5k1T>~!ewnDF{2<-KVRtkjEuT?r+rBt14wdeIj5e?|p>R6bi zWQah|OKmlwH#G7KYY>bo*z%@8!<}}?QZEpH1L0_~wrB>s3{lj9fzKg8lFfYswO&DR zC_ZZ4T3@d@)Xw>((o4KeMST18cK8$?J_Q02Y(V?^3gRLDzMACH$b~YbT`g)N@fOVX z`dC8l7TchfiUQggV5lhJ~jY+E3T0h=84 z@tY5f6!q^U_Q0=b2$p-<`HE!gnP-N z8fSU;Mn^szC$szFrSm!-?DldT_a7gnc zJ72pU)&n+b{FMUejGx+T@oQ15y&esQU-eL*BY|-zO39Rr?5Ot5)z^ddo~z-Qmbg5n zC^#Z!|Q?Bsc4S8OrGb{npaa#%BTNUEF4S(GNvFA z_~@ik4ao)OCHO{l@h)t$bHe=<_nbW+0CLOGBVBkW`F$tu*1dl{tkJEi7lkj?0#M&Z zhiVslF9%TMOuM4t5ER^ydo`AX9CI8hbPm__+Ep=pGS}lZ!AtJ3)n_*Xb?Ba7y`O13 z3V>7ZC7QC7AspP&#?^UcXQ#ZMM_+fg!N7StP{P;PzQ|U9PaApuOF=Eo<3ILQK+R#% zASjV64qArA41Y)4*R=*;ox}y))!TLk_(pUAUKHk9P@^64Ll5c2gag;xFfe6fWr+{=cx5p4Y|AvG%X% zp?G#-nExHMhtDVS$Ejr=+l=xHbY3EN8tmq znK7*wX4=y+F4pE|n#Xa!Rx!@1`ZEtC_DD`Mv9 zFPrh6*y1_^n}1$n1Er-te9foO}D4Qy9Owqk!4y9;XXY7jnpG>Y8;x`VGC`(%&s zd5#GpTxP38#yxu2KzwAdoSY=$6W#4*ohC@eX1`H}VhZlbXzK;6)~n#y1|f#;+{S2B zXk==eva{@2Qd}{$IqOAMdE9{YL_(zeTHAX6K(ut(HuY>m9zavuWZN}4zc!J2IL0q& zLCFJaJF>|*2MVS;16ID>E4uCyEcd}n$&_|$9>_tqN9zdC%rd?=(843dzvzA0LcbD< zKTgWaT9$lbC~SWWHuBE}j^=^rT!aT3T+2aqhOZ`T7#sYi9nwVj@Oy{a^Jh2P;Q+U6 z@nUX$jHZHKPgW0fW3LH&6v^%eiF1-x4V?%!h9faJQa;;oRIl3LUwZAB7PN{)c6K~4 z89v@b0^yX5(@w-aF#iQRsUdsYNnO=or(!jm8=E|=g`7zOL?($LdrSE6nY2=$vd7M7 zH3_7(k!TsImqz0S_dEW;@w>kdcf<3-ve54r@P!zxm86=9s$bM##eRQ|otHu)xF#|x zbD3Ng24p#>3%h+KfG>5iZVDQIYlL07rR_xKP8Ky|_4xc^1s5|O+QzZ8Vt z{*;%`vHwaFMvFISUk65Oa9+5hzc0d?XLsZ8QzuWgZ(_v$!dkcZ-4llRv%3uRZOktB zET`c+q2}r-w7Btx&+w%E5Wl@FirKq~ME!XF&+LJB6P3&rC@5wRz8eH~ac?iI+(Yq{ z^zA3?VKq#ugK_athp^sj;{#f*mV6CrgeeU9^W8-4;2VgeiQg~2IC-W${%#_%b6>Ct z?nd^#Q|tTPU>B;%5#l*)-Bdp* z#_ab<`^f>B^rkG>lz`<0Mo2HjAEw3(RwCCL|KQ`M2B!m|<+DkSxY# zVoz&~9zOh8?}`r~d;(@>c1*K`SQ?Gz9vwK;K}!XqjGzsht8SJ3<7K`SDOr+=VJV%q)9 zd8-J(H497XtSSIwHHS^9j)z4xOP5t!O}21EvLRwx+9rJ`r z&=yE%U{l-gQ43f>V|3!Q?fC$cBI#GhpXdF>Ppm$+*ex|IZ$msq+_p8noA~n_yzqax z-zeoBptySfU~O8VN>4!6k*&o)W9K_+1sxw$VXyXDckESjz4g2sTnp*)8~u9Xbo2#A zxrOyMpk7|GG;izFc2T3#D|pm3Hl#mc7lFM`epKt)G8+Lz5e2EC}_61Jzv#S z%iV{tV6Vk^>nM~a$ixR1?1=WNrmJ)KfE`tzX6yp+gvYWk8;?Uc)`<5w9&Jb@F#PZE zb|Wc**6&ReZ3wAF0;Riy|Z#%Cgn>5`nXfEisae2NNJCY|OfQ<=BS!Pez<>*z3PU=du z>-3PntJ*o%6@iL`4e^}3WxP1+^;pERYP+Ffy9qRnpEIZbmE9Dc<-{D>t!TXZMf*JN zor?S*T=(JvDo?*OZb!eLkf!kqot}l|0R+FNxN*E_8a|1-=!P_Pv^t&x#!Q!mY8; zVs;@BtC3$d4A}!0f()Vxb)tA7U9B~oPxq&ZsoFyq65T`z32qNxNH;%fn3NuQ+=US5 z+)aT)OVDXFZmmE2KRWD@ctG6dj6HfGl!}>_jl2vOL=(9sJs{-qKxQHCkx=dl>Rdzx zWqTrqb(L*nCxYh%U0ukvjOx71#0zp)PgTWS#&WBYJtZiWUA^s=6Sion{OxI*B(Q{4 zV%xOIfl#i1N^ay=-8Mz9OFat{!e}g9ZYe1nrt7yWyJu(-J!wLh&5RMrLBgb;*6_V# z^s!m$(~et2V5u^fON^+ZjFo0#?KI5iV#bg%_cY(Re~@kK!)ZR-TC5}zQ? z0x)zT@q^D*+xnP77qrDTXpqWO@@b=<>fP99n{+tVuCvX00do!LEdpJfA~x%7>xEzo zJ$E}w+@4lUKii`7!lD}6uFhTxo-#4fnVU34AleQATZ6Q`*6E}sQkpqf@5&a|eJOVf zmg{24(4gaoRl)YE8CqkA$dBL=K2$da;k$&E4$1)VQx~kfa z>%(io?BmHvxZ0tLfdX4e@wE7z?_tDK@w=;=->0)H1rK3gOnXa!2?6UvUvNg#sNl{R z(IB4RzZV~{bZ?Eul&4Pn-|U>ajvYwf{rTv+#6O%1*#>!%BqnlEO&22Xa_f%8=EsGN z2aN|(Uk<>nDC)1McM2ilxuo3*_-Hd*x+v;wcVlq$H{2!P2v}vF9H(|Knz~LQDNw$RrVu?& z<9FGm>v;t+)j(T!+xM{s^HMcH8(E0BHz!9Ez^J7i5+r z0gh?n!5 zdq&n3zo{Xcsg}e4jaN@=?4o6LHY<7+DH-GJXt$92c+lp&7xXfPU2JrVD9ike{+Ky& zz+s-Yxms;vPgyWz&usPmD`oxWYxqU1uPqQ}EpQvyIOEApdx-j_dgmIo4l5I)%+JaL z1m1Go9ZTK|wXu8ZJ1;^KhiFRv3QEdEQ)CJ5tmC4jQ3^WOL-U?-0n8V8eN{LV!d)kL^=)bAq}y#m@Hx z@)RF;HnA_m)(;e3xSCm%4PWqd?p(P@)+ z4>pd`*yOdSqt3-DA!>WT)cI(N`L7>o(~s>1VTW>*q;#NrQOgPs$_W|^G^#z^hJCuM z*7Yc`Qq0vAoiNX0-~WGEBZOip_(6;?W=LfmWx9Gwo;MC|MLf`{Zg zy{{v_6QEo04YRv}GG5;Bdf&v0+^voOV)ryk37&mqKGGAM3g_s%7&Fv$Oz8VSdkGiS z4+7CKNL{vziH5R?^zb#Up^QQME|ph4_ypFvbGTqDl*L$?BR>)T3G+H z7i?Vo%)PL{#$VJI&i#+}E1RH@4Y=i%IDD&o5Z9uuOuj=Nm57A4s)N4kdW|G7Ummz;#w09T%^( zB|@aH6*^lH;7N*^l&e*OQHu{k;*F|ff~g2ejV7Bf3|L(@7?h7SLK@XU!VuN(PU|Rg zvm=u{M3djnN#whA-~0=LLlTP*lR5@!8MMEwP|tbmS1Nb(W}DMIEDCqb~BN@-Eo%Ww&7d`ys7mB+#InC ztiJU{2e|xyw}Ch=LoSzUgoZvmQ*0;(CpQ5{bbSCI30T3?+-=a2Kmtz*MmJ_hq%|_J zZ^~w!td^|HW+C8OO;%-#9=V#_L1e3z9KID(Y`zm^t`?<=i(`AX{Gr2fW!+2aPZ@Ie z;@Xk5#;1R0paOYdQ9%pF1{5st#%|3bMvrA;vbDqW8RPKm=*{Z z;zqcUnX<3;)Gt|o}4sg<_=wR{sjbCpFUc2B<>xI(`TD2UuR@7Z@T?Xng2 zeYC0IVz(cJut$5fo4r5kH-4^j_&4^z`$6@RaOlB3556CwCbkW1pYJC!iLWQ@28Lm7 z*oW1@y=*|N*T%gcBzGacBKS|@?>Ig{cpN&exG@;CABy^ZkV(EY4&Y<)#HT{F6np&r zL{ZVqq&%e)1hp6uo_T@G69I_hf+z0#i6O|9q^}mU==+Jt4`45SDw>f=wbyd-yUs&{ zCk6c6n&IDcp8c&&j+y1))ghOOrJEA1^L=d7)c8$ID=By5cdnG>njt8}1iD+=Om!hs zuRSe@i7|Z>!G4p?iiHSQg4yruif{#UR{CO&V3%Nvct)@b&NKGf+;~;9u(sCbWyrT+ zfg$y-rr(^BgA{DR`-zG!T4crAokd=!+P&CHwT?x^U31P^$FhJ+s!;+#13@gnxV$Wh zMZ_)5Dq_6aeu9CC2B{Jdul&a`=CD>rN4FD@e<2=nsI^I4|Gt3v4$v1GLRt*yw zN^#qyNiQs9#<|V0XFdH*wnf9jp!d1qZ>unN(*TyEUfULJ^Xmp}y9P-z-%Ih3LL6*I z93^tJ?-ab=Znm5cd6%Bi6ZAW|-Ywh^!#qpd9?h}>_jTK=hf0@>_GO6RZAE5G(%9>5 zalU@g_GimXHaD^w#S;{08zSt%Y{+y&l1NY;xqHZM6dOPt#=N~6xP)tIqrE27;M|FD zve)&vw1aP-?Deq@+=X`Z{ZUn!b?5fP6hPUJ#W1bz8E?nqVdmUh8@JgB9ZQH9W)Loa zW^+;#Amj_IPH9JB8_5L()j|!dPHaDx9#l*mgSP^CWcUUu==(Th4wgkk^N4MIL$2@X1jc%X&LE|-f zEzg3HvLZ!(F71hQ`VWkaCX(9S)n*Tl1qFp27_x`P5@o^(+#ZfLUHxktY}{D7vH$B- z8?T-fi!1Gsv5+dcz}$)a(Rk(cx$Locm1}`3wDPgU*idN+cVQPIDN#W5z%P=W4q96_;7$hk zqjf~jOMlPc1#K+klQWH|*&P!=FzVoQ?orc?;MEtualgvjfH2`oA}}Q$sPP3~HWXu* z;`^Ee$MsqexB>Jo8=`4#V;=?gHVQ35ZyL{@vQ76zLdbgBqQ}2bYm2{8D?fOGG74UGe$2};AF?Pa0t`W^S;+&?y& zk`0gj8}eEB^t)O14X(fgnmSfq=YI~yXHY*#t+cUVHT{P7cpJJNvBUAcrwc1-w%&~{ z8^0EN2HQYsQsLvF2}xIUBzjGIahe^CwSk+Z)3JDPKPpnzN2A=>ukAzts~>)&!%k|L z3RagYY1yDy*2qz>{zIhQwYM}Xl{CWac1Ba9mKmk_Y$Vp*%no>H2#!!y@cXO*@wAll7LpV|<8HvI_EMr@Iizxu zMcV_H5-}uy$b)tfDC3F1M0X2+ zGaKgq1Jzo=7I(?6QEN&YcDRgHr&(1Hl|zcZOF`u?ZT8QPJ=(&j_&Zyn1%zq* zyQJUvgRRsk1R3_VcwVZ)=w%ftG}e1ct6MBcMR|cuGc6T z)DXR9P#a~2HEC3?4lX*%vJ6^FtU3-xz1F6cb1PSeb!Z6WQZ~O3?cC$uy0uf?RFUfm z_>csaqEAcVb#-tKwE<0jY)j;zAgXjX>hpEcf52VPhcs)V^*FFx3+n?%G6SwjLJA-H zhQNfo$cr#-6khJU(QBJx1E>A;uWWOm!g1pk-lEki_78unR`R)4BwKX1*?B_b;qCSS z;ReW;0y8)lbhh!E*tJI6DQH!m_Gp*7=To9(+pTv^p5SqhR)FNkYhjjbuU^*s_q8t| zQpO4ut$>heg-Eq6KmY7C?SHLb4P1LvSbSaUkMktKDzF(xw7ix5lvcH)ffs~@$d&j_ zoknul2acs4;KD|qh)!j0Mw zG~BV>?mHq3lfC=5a51C85JNWZcIRR+udQR}wc4b%?6V76#<2r@d2iaq7&|=2P$&Pb zjR{V}psS58#~ON~nsjSk30Oq4Vcowf93sa$NL^Fkp)OyU>so(wSn#ZrBh3zV;!5Ev zKMP=PTFTo^z3s&a)$NL~TiFb~@aiT!WIqpFy~+l-xGVl`&5U9x_4b8kHsCo@NM;`^ zz%_B&zKWhXS6i{>?C00Q6hkQ+btk&kG&X@MzDI$0(vx_Jz;EJH$gOO#dm5yuDHT`z zHV~jr7~<4-F=C#7_Pus6ZhFiQ&JO&b22c?rZQ}ppD&0!glRt1dNDP`a*dSK+!OKCD z<94)%E{kd1;W+U}!dmnv5bpEXc}~r^06F|?&u}*Wa)^hu752#GM8uJc2kp`LjoB0H z=;hD@W6y%Q_UqyC%R$7e$+ttWsGgP(i9%|Hcme?zZ=Q^?qVnm)%ZWz(_(gk4jZ&IU z%UzZNbsr+UXYX`<#-u>3nVN@_H3!eb%!z71`koT_Aqx@R5{rYu zJR?b5fB8OBK?>nMdpe%|cJ;f}W@&jDA5Uxef!P7sDW4~7oRhszrBx^%@QjupzWuj| zzjO6S@;J{NJ5Ol44lspAV76=--bjkY4Ru!u|bejZPR+<6-$Mgl3#Y}3T zCw*3~CGW(UhhWiw>RMo{(1c0bgukwuJxkim7ONIO_Q~z%HEIce0ojd~9^i|2>~igR zlB|gw>#-Fv1a7CKtdEXdc|cjEokVV73eRUNgixK7NUJpn+F@*?LV$QQU;YMl-9Prq z*nVr$YV{70Wd+HV6=;e5NoScovNj=!-{zA4=hTzh-f-5Efx z3EQVRk>Y}VLZ*9JOX&;JZ~N7}v%if*z~o!V7CKoW4r&rD`3Ag>gh(yQdM0={mH}fX zRo3EnD#!AgAk+b$illn#?1IO(BN{E`tL8R38vSw=uusRdQJm9)Q=T;#AdYK%*sv}; zk&WNj0lQ%*W64@^-8dyM`Hp%!9rJbBA6X9L>|4<;*GnX*+6~m>R}`Sh?go__73gYs z^u|^@7w!68<~V;jxK^)ak`^$D5_Tb=dM1*K@$67(r*ioAN^>WeWBOM!wPS9IDjv;?)7<6Abu4O^X5;T_E+|GtW+w& zLc=?;X#L)IyJR@ zjYxOj#{#33vmfH;!kUFfU^10N)Ev~-z0_(9NBFH(}sGY@@`=ql%-y|IP( zmk$z=Y(w`t?t?@m^Q54%@gD?jhHY!NN8$zb4=Dl@FJKh;P9IaN`azhx#{+PVB7N>| zHzD4^6+m;J2ngJ^_}@ZrH335}QzsdW#KmW_Wgt>;7PK;%9DQr9Gpc{X;f4cA%1UxPey+NSZIaJUX0g zxSD-=BZgUW_~{Y)=B^>7Lc&#SRJO6Q58~=Z=_39QTc|dmxeMo~B{90!*{u+$5^P!M zpsTcWPHo+ofEf6aRjY4}UtcxR*UMfzVuJFQb@8i zZd|uj_m|pLlg#3IjY4b%sm5e-?+IQD_HIaPg1wXYJ#sJTRD0Tv4KWXx zF6O^6UJ%$^uuTD(t@dzK+Z-^452pCrf^89&cuiu}Kqt2~`Vx{{ux(loV)KUo&9+BZ zG%!)l`Xw!0ExuT`Lrb2;y^=;Zo4+gV-9u@Yju9XG?rfoxAKD(ZB#hUcQW|Ya6=8hz$N)PKQ-uMC8$t)H#+;vaJERj*60uB&;~f$qn($XY z2oazpRl|WpBUUE9vgS4Rdf<}#&r|J4bV@0f?Wh1^HO_kmqiEI*xZ|--a91u{ZMPHA z%`?*U+R2y?B+obMl+d7PccYz-2iI`r?X7sgbA$Cbqs4`h?Inzyt9?WZSec6zFG`z2|^JZUX7f*@+8{pHv6N}R7 zM(VpU4r!ng71cP%BcAc*o$o_k>i0@` z)9FI7fX`&B*WH?NC9xZIYi(x0ckO?VJsqpS_vH8VtawFTbiil-%N~s)(M*rA2*m1+c0sA!@s2(G7Ifdaj2e_+hIN zvT_$=dUCplgl@7)KJc0 zBSLCjttM-5U$goE#RRwlkUj;uk2&DW(xj#h6lt>-olH^>0;Nr_iW7S5Ad!lLE@NaNUvp$VCv=(6;U!rWhE(-Oni+8>$?n68j9aD*o z-`jf48TFuTi2WM=HC^`mm;6(2>9g+Z7}i16rG-Hm1FaO5~RV z28tlN5{;VZVpp|gSnHu-&T3v$x4Is#1iK#ZlXG$uf4LzH%DBnU$J%E?I@Nf+8MFM^ zB)g?%Br-hZ5xJiWQ9_zsA$dDqM>+{U`Y!^f;e*YnXRk;%_$RyKYjS zBbLP87Oa{}DZAU-6|#P156mTa!D82d-$nR)R7W~IyK#~?)G;Co~X zG&Q2SlNO^mx_MgYb}JQ(a(6i{3m7l1nz72t)e3cfk1Y`hcklMJo)wxh3~u6YQNu=a zFC5fr)f&GJ+ib7Z=vYAfQ59L85cML?%QZR1omAxo;TDxgwG ztq+sy^Vt|Mjf1v2J9@6A{)N>CsAXOk)f(cR=L+Tywx-zh4)Xc6X!#doH%!%KT%vFL zFvzT5N@zwEGS?AUU;NYio2(GOyIeog9nkXKgVv)(anC5Mvz*qxz>z>0zi0+DzrS%R zvre1rjB2Y>h9OPZ^$;8bPlK{v=()upoelr0HpGC>zwno~QIqb3p+h*?q)jSWR#UOG zy3RJoIWI4_E%$dI)j!`VM2UpuEBD)Jn+8Yv?vHX_wq5IsA{&>6m-Iw|9)#B&8Ln=D z@!A1LY^PwhQerT~&0U&q3EEuSE!+t6?HI&Wc8^vD3@~8sjVJDBchK4wGprmyDH|)@ z=WNpc`12ZqI}d0+f8Ea|O#UG~ViERO{X;^f55*n^AmAU?UkC;8;8xg=SEI}DVbAx8 z_$UZo4iCxhb)jBOY^NR3yAD@CQMo%{Eyf)d8srD&SRj@g{(C$AVPc+hY)7Hj^&ngZ zCo~;gtxv{0Ny_5co=#~Mo$y}zW%~3_XFIMW{oJF!x3`2%g&R1|1U73~DEE;Y(YjW< zR*BDfRL`|Ylg=HU6K)k1c0O=J%uTK!XjI;`3xUuj&lQ0IG~2P^UqemfRnBYLH`-;b zAURwC>qe0)`b#$#hX*}i0*xLxetE_0YwC}O9>q_u$42IQ&`aEiH*ALUjGx7aP&D9i z_h`!PW=vseV$V4`x74+6Ei$GsZ0{LuK;_7?_~Wi7QjEb0N;7EV18T0WqShXXh8!@LNBtl>-9*bT%J_@h# z_PBacoXIBWcQu-uC$0t;z~Y6!{F55HxT_wo?}=A~z;c&0xWCI&F$CHnQo-lcH+kMW zOEKwcqRhF!p2uC?314Sjmmf>I9R4oD?Xb@VB8vYwI0(ZFiQv45X zrtmH2etKHda*HQ>J4--tF-73arc5Xp#9>Z6LYv&=WY0v?VzQyM*xbN$X<3=g6JCl( zu=xUs>tl;8(9rIgS{yI98IYw*)e1^JU}c&tg)DF?b%Ic?O?7e9l_=Hr@H# z3auU&2@Yv|4OLgu4TIZ9h@pM~Rcl2{E3HN&;;Q@)Rwuk_mQ~sE_?>U(n5~H4Ylzdc zl^PI!Dg|<_QlAFeJ+GzW6ZkFP%j$TQLSO6k%2f;4c^ia5U#B(2CSh^)Eo;%1q1wVP zxJ`rgwYDK!kWW6k{q%(&SwZ6#BQ>6n(8PlOTxN!rxPKIb|FB0V!)y>_Ra)EFf5!|m^sno=3X*^<-R&(cAv zTcYHAD^Q3+v1j6euw;Oa1lEm28;{1KLE06LcP_p#>NjKaVo)b`=c98tlwA-GUV#(| zG;k=xAs3CYjRg)I{|>UtLpX&K%m_9k$fmw|1eoM#0-9F?nOs$kUDKj*-H}v29uTHU zQ-34ST+GF>uG%l1&tefhqQq`$9bj#SUx8w(V^;&R^ykrTt)~XLt%Zj{voF-g4I_Nq zU+R$~JAj?;D=h*^Y?T)i*IYFj`IwbCp1M*0~f%!!iRa@uNiDmgH z5!z%%h4wujFP#UWjOm1rf>3rN==dFEPka;<{W+(6PiBL{cK6!E0F)a(nX;ij6^}L2 zYv%$1EMi1|M!!~EyZDYL2gqLJr=Qyt!FN-PO^rToX|?I`q63Wuez`)ceJxdc+@Eab zN5Q8ieiErYT2 zh)u1}&P%e<0k=W(=Xjw&ZPH?;!^VByBK#{z5MgazAXl?7rFNbWu9-npPf58sizd=y!An?}PLE!h*EDn!h`>1H?e%y^9UMENE``Q+J1P_y0f!Ph7Ohs# zTy4j-S^&Ul`$P=O2jq%{H|(S)ji)IjR6uI+TIZ8K9q`9?jQxwfrEfV=h*Y_d_=z)` z7Q2m*>;4Bel8uHQuG2<^{nGZ1d<*qZ?416vyo2VQTmk1bqzg{0!i9LBMCN39yQsDF zFA=Pe0>%QXTv1)MU5;rs;^lfp>*;~1Zh^e2XELF&yZ8=8(~89IwJdbTwV#cefBt3iOMeG%W&e|pir zj1kYiLfEc-rQs{wu=lkdo^?Ci9o@wEur%NSowCc`jRxEid?w$-fSf`P(z~a=wOp|F ztwtx!3odXR=kK%tNC(Lx^?e}Tk9o#^(5ia|LzG3-A^%_9=eD~mg^oRNEnWD%81){! zmWZJn5&Saaj68HLh-wvE@vweBORCp#*Aj8dHxFV@8GkKE3U=6*)&bZMf;1k9LC~0b zD3m=KgAD&>$R5-1HD1lK#~#0?bRPe|K(R}EP%ce~L5CXIohPm(Ho%$TOTV-y1*aLU zJ74X@YeA8aMEQ(6ol$@4T4tq-;?6leBX0E!;>I;8x;Q1IOH$}e4xl9*KF|6yMPrl@ zQw?RE8tbzZr2~~6oFY%xsN8B@oxK!|!LcYB{#2TyhoGF5e5U~x`O3=CYEF5m)+zmigr1!Cz5=8V&28*VZZ1BWIk1VpbmOhl5AbrPva6 zt#CKxiolF2m$g@*rwml zlGvg{Xt<5bWwk54SBO?1aFu3c=l9*x=Ll&4Qfqt}G%8i=LyTRpJ}>jb)%x=`@K zL)Q{5Uj1WRuQSU1NhP&j+Yrclc_8$Yh?^U=iSS*7wxKMhuz_vT$fRfPw9SHDN@Nxq z&@EbKOwn7T8F3(-{%vYTEMg0Ax8Gl6x3|6&PgFqlQ@%q@*Y>8G?>hrx4pO%(AQlxD z)sjOu{@%ul?}`2{6r+XNEA)~bbe~RYvdQ>jAfLvfkNqAc)&3aE1xf#Q4ro1U30Oo= zVh8mtxy2mr0lIZ4IqKqdEjDVMLF`q$-v^XUBz$$GA2k0GxAGGNA#B@?>;O{zh-fnR zjDOQU6PT)Hm3A{j2`^r|6$fFkw{H-M`}6D|v49ZY+c9y}9#Mz*gnbckDi)TZ$kmCg zBcB%u=i^PUyIX!8BT$K*PwS4F5ekD7+FdQyrN2jFKx(jWVjF64uS|9I?ggBF*MNPi z#!0+z-OG2vFY&Cs_I+#_X$b6x_~}(W?BhfwJwGewn~%^Q_&CuXGVmq4xkvpx__4$+ z60f*-ke}s;K2AgvZ)L>Whd)k~wZ0$LZd|+|{XW{K@gIkp!lM?^_0f;x z0{3q}79Ug5V;?7Sj@l>hnvll>t|t_>3F^GaZ^S3`fPJ9Po{R@rAi_K+YQz#8X-Kee zza3wG?&FYZUHRY{Dr{0r-tS@xOrg58$6Wi}@o4gcQ=rWpeJ z*d6R0EYMuNeZ64w0z?pSuJrjabIdhpp>09TghcMlq&U#de1%Fi_~OW*VrB6RSDliN zS$VcKuEt9V$kD7UiIH=wr&vYIDd)l4Ra%1He#(rw4XQd@0-W3m+%6bS<1aWKa3Q3_ zQzrlen6;N{i|SxSZAHwX)Z;iBY^9pjEUdFt@f&puHdCEP_6-$bc+0E~^g78~RIek@ zPo-a6a0S0<_&8bpw0~eWObweBJ=UUzW(C_~jJiDfosN&?RC2%Wmf!zY(3TL^MGg-(%1e${X| zL{e=Khs6sZ+ph)GElh$E#YX%OQy_K)2Gs+pic+atU@RgHVW z?bS??$J}|_g}KwVFUE6v1__Pv;Eg@g_x^ZQ-P6_8WCyYhBzFKe^1*=ZeiW{Tv_mm2 zR{>&aK;d%kB6rlQSw|1JfP`jWu3N{6qO{ke=kV(YTt_tD8W-v9XuQb{|9YJr(_R<% zcQo5^odpW1xUaTzO(%5td#P_5er+cM!(}jC$^YV%21Z7Mq(Jme3w$Nl&szfD+Q4;n zCU(iy1|!+2bLP!`mm#EaVf^JZI~M@pKA@oSo0{RRc0n^nDda9E7XuMr;6!T1Vv^ka zUNiA>JPrREX1dp|1d`57xzNAXt_lJ7!R=~JQC8Q4Yk9r{yQo&IsnN{f-?uV}LKy1U0S9w;@9i%_2y(sR)@psmDC|zWH({UY9PEYTX?!WjOhN zDxmlG)PqUxx)i9v`FN_iN%6iWIjK+1-iKqDf7DZA^2LboQ}s4g@C;wI>CrpaO{OlJ zA>8ZSHP&Wc4??I4FCoja}vW4RbzHPyw^$tpw_bezLUAVcrx0$#gk9!lxDxE+D=C+Y@V#_TY-5>KY>sJ%X&sYy3hWq z)kdtDn=RKY!|WxB`+^m&_!)TwK86hsWCns@FbTt z31{=Teh2MJociPp_Y1dLa?M{=7fLd5C0-MTH7G>wx}f80mBNi~WXH3SauLwyLHkUd zyLyM;sJEL!&ObBmlUr&|=3Sk&&owVTDZ3rNxzh-45g43CP~&_lFigRXUtitd3aV_t zLfF^Wlf+W9%SOVpKNL?nLd6hM zAO0i=@w69hocffxJTqRc@J!>_KcZJft$c`&>eWI@onk=MQ!z%Oc6(e;wSB_}8*D;6 zQ79aZm^~3g4gaRwp8O<;G$~kEL}=?LK~s`aE(q%urP++YXtA5VD8U^N!s(TBi38EoYD^(Cny$&UfTGhxfBQ-BH3{!Ny1SCRF6NcTw{v;mcJ5aw^;W1OI>a7ZsZsE5V79B`$wSaM zoX>v}Oj!wZhHrnhR&`+bkktq1`LKQs0nSb9ltB|XWJ%OpEn0g5!N?qi4mX;jYvAA@LEsy5Chj+8%hNzGSTJ3q0z|7urAoAHsp2R0{Ec za-3NWeG-&8yobp`!HHl0$!Mg(+mFq{-7UNX9>q-|c^foH?Qrgi(*Hhp6hMH z8+xbqHObMx1N4zgTWyzC4(%p4LznH&dbnX|k9GiZOAgr>)+ z;9iU09oX;eb*=GzkJ2OA=&qs&XUe0(wPJDAU)wQ(Epr2`9S=~{uc54qPRSy?zB=-R zE!fF;>oo=Jl=>}0M^EP5SmrL`=HC(qwP-+x57X<6T4KgyS|h>-_jm|hGLDbNG2)^= zk+#FL#heQq*R}L@cG&rVfa5D+2X;Y=*XgRff?W)xaunjQF%9Kv@;=Ee=qVB zu1T@KFTVH_vl~I3JTbms9S__H>O?&}r{3Iy58eoUE{=p$uzTo6-2AWzO+-+TeBnk= zCNy3Utwz`Rj#C>P>ftwF55`9u`Xtk)kMl^hxwUKGtz8@W&mO%I6ukl=?Xf_LPi!j` z|4n=RMo{>6SDsH0#)Kon)(v3uiUs!LG0~vRTu_@{U1CXTAsWBc2wP6?V?A3y0`5aIP>n~VTU2_h}pBdna zmoBZar!~)K@yeMMzp(<4jUiBh61uoP=g)|fARD{sjl{a-F8z0#8{@m@a4r5$^WqWP zK#Gu=AJfM&3)jCOVB$r_Eo8-7nH2C;s#WVIry3ZR1$+{BSh@DQfO#TsOX9_UnmWlU zgi|+36x`Ta6%A_>PioZySc>BWGE&VAsrr+Hm0L*0j$2(U@QUTOJbrcH(HjG_wi7rb z1Z%qr)aQ;Coj%Eo_xIlD$%(do*>NCihg1HfI(u28JWEZ^ z{hBAyARN^L@#MN-_=AJdu0YjEJEV4GpNAM7)`*?Rio_Gzt1)7(q}pDK#lTF%NBX)x z$z%yni6D+d%Z^5@rP>fzaWacz(UIzOq_aDIzhgvHK}WR((uKPr_GBE@|Kn$ND*8H~ zik3H!OJB5KHgCm{wT+#2CVrw-wGmCBB+4e+s3r(q;~qGtx9*kZ*$2;S8%wju!wZ2t z2~$#Pq>I`ScN8GXaV#FakO@qgT@GjsU1h1Xj$IM7hICVLKVA*|!~Nu%-ulOL$$eR_ z>ks%8lHQNt_!}{eArvv6X;~6m^m|y76LvG+=E}P4mUg;`?5UsY&u3n-+XC2xPON|U zl^*-zMzEj__-?xj-6JTOAU%G6fAMi3fjjs?uPZ!JV1wDGL6wNTUTY70 zny4f!MLU=BgP)Ej@w61Z(?j}QOUn3%KMm^Yf-Z`2F0gT*#ywfdrCEf_KvGYS|1^kl zWy(N7&2EiE>OYje0i$+}d*-FA>W>KqSo+7#_s^B{c=YiqQ+b>4X;AH+MlW#-)qdjB zMCyL@V|!BQl{2G>F$piIgE{f3PlK?cckz(boM5;cdWt-or1!OiyBsgrWFg(&fxg7) zW&neP`{g@TQyBXd57Xlns_AZm4`)P2s@n9m`i{&LpnQL`=XbZK)fFWb85ll|6GBf^ zaeaPk+w4z6xmD?^Q|Peg76pMh(XS5D009-yXZ@Z)ku>+yL{O_1K4`875^_) zZy!|kwdQ+%CzVQBDX*1ESym=xS&UF#@Jrl_5}?ZqO=lY zJRWQFA_5{J0wN+JZV?p`5pnY(T7VwoaR{-EF}Bz2ynToADK3UCs#@a7_ zjwSZke!n>ZOO>NtqgG&<#BI&g(pWBL*S`)c6eA13H*%%@S?lzSSLx68FWjc_8pRaH z>)hilM{c2zeW@I1tPcr#b>9CN8|{sF*A>bQ(GyLunGc8RUc6+~YJL6}o}~~ZDVnY8 z)?Pp0!={Uo~{*B7aQzuGnIp z$MTiLitfxrm)ajrQ>f6ZN3diW%2iXV*>6>-h;GS{GYG%@&q$;_2UN<556MkMs}yD) z3vx!~XH2sl-jo3ktc~U6|ftUa42;mfl*e#<(lMr3X+j?-6y&gx2&mXKe=V=e<<~B`^NtLNpLQUP9^vCp05v3 z5vR)%-~6dN33|ukR?gjk6L(GI_AR}I-W=Ss3gmg5c-*)AIbBV6${G)>4rlQ_f2fBt zYsNp|krmo)O;G(KXu+KJfp-O5Sdn@+lAy<27g0->EHsZ_a3KeMvB+rHfTP97 zmdpq5VnC^LX?HcAin*+~7SH4*?)?w(+_Jxo%Y}Gh)t#_<4ewLfgqLz`Ked^QIzL+F z&@clVzn@}>Ht~fB;Abo6V(zH@Vjijb!7c`m)X{z9=5p-bUwPzpIc$bKy}ef-Y1Z>O zd(lVg!MI(S5KprV81%^NaGjzK%k}yrb*${UxQFCye-E~>vwx2v@=Beb)tuMAVQG5D z{MgvMDUY9RU4x>{=Um_9NA$M+byIE*QHMXe;X3@)?jpw# zkGyd~`*CN$H%@=zXh$sxBNgKsM!~7fD9ba|kT|_?^W@Qwyk2?ZKVyu=$GvyFV^Pdb zq=E3Uv2X~2#ow+(;Qv&rUCO-PL>*OXcw=-GcldO!t zYj5DEIoVQL`xy^UOtF-_vtsd>YJZowa9c4=wZ_#XOjkNw40Cf58p^deIf!6GG|O_a zh0~&SydiS7m3k|WoO#_X=2%pGjpoX;w@2!3f}9tty$v68Pt;=K!nxN=EVMde16X3722fd5`udV-C^iU%wzyd6)?`JGA^_1EaWjHeO}8SX+-7@P&tJT zJ{Yd}TIt>GC7*nOmG(BeRS@n`ZpA8#ayFmh8soWK#g13m+ADYujLNcxBa}oWSbV&1 zz#Y%oXhq`zZSNYX#MpyyMkqF!-}jcizj(=FBw5Nq zIRwrlwppIu<0WC+ErO-aii%T@-J#T|%gvJPjFp7n&@MUfcq6+rpJgMS76F;7RQt;n ze`^0D-EzE`=7Eu6#o_5BzLzwkYyVlto85X{wpF^bXsb2s9HqL}Dfr}CRO_9!**r^z zjiytO$u~D%PyY!D%+1*@;k#V)$m>q|h!DOR+s5nceJkL$0;p;S^fK-4>my^fr* zasiWj0OeKzb3Ub+zek>}`8Zy%t%ij$xt%e;9_yObG*&O~9A(9$N*Uq{>fQ8t#`4ES zN2e`Xt-OEcOctXi_Axs9SR8b^Lm#z@bw|fx)X6XP&0PO#@kpl{4N9^7IQN+X3KF+? zHkzy?T8MKM(qiG>(f-%zO4YZBW1 zjPqv9E5}tdrE9NJ%&r%1D{mXNvxuGIkKIv)E=xO!%ElbP+_A^_>AD>oa5v`0t$J#} z-m@sS(ptuS@um%|k_YCE?OqDI#fNf=bJNC0`T{su%&1iw{hKj%lYW{nve2QrBCt7_g;rYt(Mhz^?Nn^PS+=9Qu{S|)|%-c3;C(r zpLzXzuMgH;!SNWZh%#%to8Nmq_8)({5<|cDx@6C$ z3WsxC`yQsKcpj78sXWHbo{6`WVBG92>)2jkxLn+AKqC}j*MBMk@P{A#mab0>N zM#_y{2vvB+3Epz;_Zld)pQbTB_oa<7fh}WP!aLt<*zGp&+yXPt;*3=VxMh%!F-{qe zTfN2YSGhgL@fL!egtrmSH-Ca9&}OA=^ZgR4E0mA$y%v8A74 z_3l}So@({Zjjv=(Q@Im6c4NA-%-iFbA9qv5Oe+H|l5n2IHgcBbao%PsBXIe6HD;S+ z>uP@PUhH#BB3q;G=*e7@ytjQFZE?F5p?TkXBaVOi&sd;LZgdUnLMsi2-E24)S>hj1 zW56>`yTr0R@3XZ$S!xx*{Tuc^SZ04Y9ddid%T=hgJ62=Gzdr}3T1{*8-{1IO-8n8Q z;k)xV-g2h5kKuIx+#cB~W$As6XRsz#!8|U%7V(yYq%9m$uD2vVUdPSVWF+WCiIeF_ zT5eD*d~}G1KX90 zc0WB~M=TdkCym{x{bj+pVTxUf&pQRO8NUBe6|T`jstITSkBhgQNvA8tE$ojnlw#7S zY#TF`qFec)J$frBkJR!t=Pv@#R@@6WjgBx6GMcy8`)hXHeIn zNN>b@e~)72A#VPBl*k8fX*&KfN|mlQgV|J;#bR?8)+eLfvgH{cXOmFzeQZ4CHkRG` z1~ml}a@s300QQh3jeR$O^F7?cG#cGV5rHoqHQKs!?|dEnmCZSarM4 zu?}_i_*Q@J=2`WMX7lH(X_Bo$rTWLeo`!%$KihS_O-i?m8>?uM9~weItHe%q0oP)W zt(^Uf8%VZU3plI2g$v+`yuWEZIuv%^2QzU)VK;B(K=7v0qQ)z&pWU+TIh}6amG=D& z?Xh#NYqnt1nEUHkORVxP~; zaGu*UelOmQEAc`V;l|&dmi0?{SA#da72SEPuGfNIH@N!KtxiE>P*8JN>UD51O>;&;O+~qo z#l2=Xau}>QXDnWVA&PS$$9tTK>W@49P2D2>VfAv}Gc>49jJk{L1UPINW_g+66xrXh zKZ`ku!`q5{%*d%2u6(hha~ne=EDq`;B~zuJw{lxBXPpEigIe6>4CZrg9gR{@Hk$Fw z*l0PnpQbJ|)z!zy@v}`V9ydI#eD2gbm2;>gJyuy*vzgWzlm=}nQ8RtKH zv%rOkioh*U@Qrp|^du$Vsq^x-QOg=v|GBP$DGEx{Rov`UjB$(q0n-#_18owqkD9J9 z*AITSjSW3!S`>VZ+(VsZkI3tt*+k8b{r^lVrE{$G$+RfD26L_AXen01vUw6Da9+V> zAwHf3^3UZ#>hfWsWom0Y76otUQ>JpT8%DC7a7RGhUfGh^BjrMX*E%o#^*#C6Z8!3Z zT^7_hO-687F`Ozbx2*998J8ATSY5PGduye=k(-^)k*a;oXItEbX%_JeZ{6N)k;j|Y z`Ae2!yQXi)o+laJi6%| zrz5#R8xeBU;f9HvJmgtw-0S*y^Q@W3w^%=9kqRuQUF9_Vk%A)ouH2puip}3_?xL0` z4$kg$Ua8c=Ez!Z&GJ8&Q*SRYRof>7i0&@(;El`zcJ&(lsC8qf!F6BzRgWpkAPY~E`2xIE5RW?yYv<--A0b$S@o@|Q)f+xjUZp^saNh_|Dx<1Ez5%vlT`SWp^R;?#<9J+zp)!9ji6 zNA~YMmU#UreExE9rd9;>bQwkEA^zksN{fp$#V=_mBb#|&Bm;2VU;dU9Mg z#emBCu`}?Brks0 zq%`F9r)%RkZKNJi$j;gw)O?f%gsJ&>592lx&}i+>T`L? zinCIIH$v)jXC!{i!Ao+&r+72u_2cnWG99XbaWi=iXqaV+UwJLB=eUHo%yIPJuE5*M zU>u!NF+Aq;pxe~om~w>qq#70Cl+F3xDk`lUDc~61L-|{bvb?zwn^Q0wZF%#yGd->3 zF^Y+*Yn$+nRhl=Xr}{k>B4W zRlpy|{sEJ%TuYor=M?#(dK6WLFjfC(bI)nAPO~DqUD4^Wobcv7Dyz@5IOaPeY_lxo z-*NZD+0>nFS>y2>o-^aKnxn+uU;F{)Do&0wKE^zYWHt?EVuAd`@tVDbmh<&ayBUit z7azSl8%r!DcM6#sr&ww+EO$enW#;H?AUU19`_r<&XAl#Ta-aI8zb(>4LmOAwoX&=xk$W2xGL2r~#&Q6_f zK%xq_gKFZNEDt=B&4Dd17`6W{L(cBd7WvLwvXDn;uf9$J&xTMou*q_QUxL zkZHbWwzp7Kd<(KIVUGA{572`0O3n>yw91mJn)S{|<(Xfn4~_iTchtguigzNQz+B(; zOfD!H4skb7oiG{2%JLUniMTdXBri@r=0r3?FTE>{CnUXW<5KHHv8-RZ{qb zYUPzHD(g_E+$8h(A5XGa`kcnQQD}qJA#0d-Hc)lQqIG+9o;ATHMeDW58_;6TZu0I{ zaL}qkx0mxOQ7Z};+c}|aQwFwt!Fhd0>;-X`j%~mVOVh1EJNqj)&8wR+-LeGS9T9Br zX>Y+AVYlqz%E@16r@8aj$IInI+56~HF+E!>xTDN`vf0@x!d=S%-x4?8yJtClkKa!s z?%O{XkY)junYKD^ygalgx`4Vvikx~Ad;@~|L$w2L7KIgj@q1L&;Zad5+k5evcSNl` zOWd7SZm`^^_}zLf9xI*?xu&rN1C|YTwloU)RR6p;n+HW>eaC0*$JX(v-luqO>3p&w znY|FZPerwUHE-^~ODis?!fsdO$A7OvP2SyQ^i;yNDr6(%vgW?~pDhpbRxeq%48JIi z4Ih8P*%;G6Se++r-8co{SHhZm&iIs%>D90rc4w8AwgJQHSNU;m@J85!!s`6iZTI#O zIct7h0o>9O7dHooZ+KHzcbt5P#rw~^n+R`&)rPUiwfsEFjDN@ zfH*3wcFEh>rA7hQ`bS&LoEo^ru`yN%UVXkE@958Bu7M?BY}iH>N4-ryXUV`_b2=_; z6Dyj~O5$C7PE~Mx*lRo!w_t)5({)kK@)CPeD<*gC`9@AM_jvZ;yeWeZeR9|vTr8$G z))eKOHvedyL0M_9*wm4RX>xEkbYZ$>*co2IO#PuQD64puV%i$dyU{GszvtYU`a|}Q zW>GjnnXAa+mb2fQ7uGP$b{pJ*g1uM}dlc+~*$^#MXgA7X-@V9^=X824mi!u=123wQ zvsJFgQcI0}3~gU6vnTNNZZ-#(X{NIg%kANPxP?ktT*hAUYkKStI2W~d(pwXBRt#2| z|4(^rz!`O3V_D$v;bX)rFb5A_pT6GwzWX~_1rE&`d8|8&_9CzxW7Ky61 zxRFz^DfW`GljDm;y^tiUEM9+$EynNhn!sesGb_#=`P^oC{++w5%*~Rw%l*S7cW{W) z(^wI??6h363GvnxcBw>s+1U;G%rCdYsf~zK^XpDwIQ6@9RqqVXN-Z+1vX}7U zisQ&s4*7+xLAGUS9yMLPgN!*A?@Vt`6S*q(^&@MMXBE03{`vvr>(3wm_)mXWfdUH? zzlDk!T+g+g&Q-MDjI5Ev$HC*v|4z7x1~iJ6bYzMcpA*` zR^9xHeklh{oTVh9()_OG@CH?SHe6Ip=F;%if98s=Wo`~t^f(5oRw6rSPqM~bwtnJm zSl3!5x#1XlS^eWZhVJvLw<5WnFn4D}g9YV*Z$3E$6lurqwP>=(Kc81awusv+xa84l zxx8NT`379mA7{vl*K=FkPUFaFJ$5LvdHe`(*wdL2&w;zs)4r*ITtwjf?3N|v9B$h? z$t5h4MZ1B6kUK)QlDKOTvU{d3uF5>$U9-v~EsB0Mzdk&$+;fE{ z2@fs9Tu$5IcHkaa+MIrHE6VlAK?VJMGmn9Xtym1!MdcP5?X|muEpnXYI@E5jjh@ep zy&@d*8yaBfR#(cMJa$R1b*q=9ff-ua?N)E-sqpeGd)@wjO#2pY*?6#mdgJm<7-A8* zbh#DU?R6$Rh5e>Q`E@bNJXBHaa3%!ZS&?DgH(XClWt{bROT1v+=Ts)@R+qP!tL7WF zUQeZZ)9`L}Sv&>L+T7$;_AtWIoV|`Gh7qSU|8_J+O5R1Id7QY8vK%aM7NSPmpU*eY zyvCSr8#&Nc_PXTua=c>^vQ6VhHnv+skhPwf8;p}9m4V${YrJ{oOp5LO1pDVy?`U$t z5?Sr`WhQms&=RQMo5Q;8G%khmB?w@$iX2BL#3?OomNx4tb`A7mtx`tKU5d$z9G9Pyj6u!yLOP>L#p{$ zOLLg%R_u9h)|+8}II`b_OsnLXE)mW>6^Qy;W07M;TFzlJ=Lp>DPDZX}f@`&$aOVAo zXUkj1k#CXB=Q0;{Dhia4yNH{eqyBK)n8_3?pl5;J4LVBftEAE@N3=YJMbo;fhIdAx z%<{y`#87U}-F1RINrno!@dzWWD*XGKMGH@;RDztLu!^eWk4Ch30#pucUKyxbf8yA1 z&>FqPzQLJ%;X|#p7-=WPnGCG^wXPDKMs2;l1Jo9$0Y>&(4YBX>sC|e3z9kW`dgG2U zNB>Qh4o7H-?Nv0cAXhHTIiED>Er`axI!yYqsahsuQ(DycSP&#FC( zXIPARnI(QPcDt77QDe^rg-s$}=}}Mkt~-G7YLC}V#l6P|`kMLUfgd-;8PwzTs)-zh zye^EJITM2wV9|RExWn0_ZZwJ4xPQVI@(qiCO`@}?^QQf!_Bm@CLwmgbc$-^ZWB+K5 z>$Y>MON)QX6fgbd;>FvR!;jpe_i%Znp(@V=@Q5Bw`@9LS%o@65qj8p%58TQKRb)n5 zxn{BF7-iA@-pwIL%YplyoLtI*V-OCk-?1|O?MRHZz@PAuj#Ki^gtTiY##>xVxz~XS zmZ;k;bXw^X}*Slx8peDA#-}PVnFpZXK;Y)0&^`| zjzGATGta!cCAkgTu)rKybT;w}t+X_>eF2MlG;;B_IJ`k2mRLnCUa|;FEq^rV^*`T* zy-c3i)w7$l0PkW89lH6zkxbwzWgJIYLCt92fxsF9un@v_ni+FLlv+Bj3Q!(;7 zOrBn`a`P$!-u=WmAa6{w2Tz+0-s;IVWtbyq&g`~ZY`?AtVTVa#DqY+FwbIVu%djg} zavnqQS;jt~F_+)XL#h>q`v0`j!*4QOez&jY+4>AC7|*!!YjgH3xe2Pb=eK_xhiuEX zj>}SakXl2kaeg{flSZzkSil73#fX`Rb5M+8?`*efol17r%CugYwVTsl%Q)Z?vL3DaJS19 zsZJG_#8GX%#q(~$I`=&_SUT?Pu-5_w%-tzNvk_^szq&IHE%w)$4CNAHtI~D0A-3R} zx$&HiH~&=3vo~znn(R%mJM2xMnPc3r$Kj5%(+nGK+J9QW^j^7J)?EGe54f$8aw*BJ z2VLw-l{0UtsRdEX%GS-)L+;Zbcb^PsECB-wwDk*)DYxUP70z3C#4}5m>&M(~!E>d{g5!ln!3%JC3RK>he@`{_N0sff z%~ZGkNkRU04kquQ$xHLzPb0N^Lj z&+N5`*QYLYhZP2iIaZ`0`p6A$n9TJbUJuCE!`pWq9MLqD`g9!L46)c^Dy_V4JEHD- zt2=7{rv7jzo2NF0>JJZBuwxpgKc9IDMsG#b2)gxYcN_w5TOxc6oaGFUsNZ%fi0&5j z5mutrpKW*h-f-KknU`i!n;RxC7@*U17AP!9^Rkq1?h4qcm1?VU?f7 zJC>sB-8tV>lpk@LI?l?&W{cMSEM+cmQ$Gh2EKc@uPB}Xj6D^iCtQ=ZOh}d$myFN3U zbIvAAR#9A&#%Hx3Q!GMv?uM`GROO!+8M0>;JDWwAZZ+^bS_0xz!_0^`s&H$H?iQ0x z@!pa@W<|V)ZX;K0W4We!Kjy^p;Z`%W2{1RJm0B(=C$HvNZ+^9_G0%!K|1+*ye9AXq zfeF}^xO3WEytQcOY^E{FMarPJIkv=-a5cfN%qcD}Rb4E0s+&~4wWyrkAFpv=uFzc# zd}CLLo%u(s6o2A&@K-5{cN3h(F4kD(@?M7Z-2POAe1Umt!EG9@*E35%YD=g`7UK&>cS-Qmb2IdL_`efV3H1*+?9MY1?<7L^;e#ojxo_wWg}E9#wp z`~!BFuitS8Y^Ob|#op?~I_$ENbHSLp^K2q~^30ucPCAu~g&R0lM!Fu~d$W;Y@$twC zGA+E*QgWx8k*#z;smQ@vxJbk0;nYd(r1^E^wPd!n; zg70;K())t8)=;Fn@J4N^D3;5db%{wR5mOI18Ksu;2W+nYom|s>a>g-7`bUgs-?H{+2*c>)kPI-OuR(B0lK#}q|_i8j*y>uP-dWuVE zv7kKP#&LeDO0b!o?lmPJ=f(Su|Xu0R0j1`2i)t&C}SdAX# z_M_Q|S~eDQ>k)kxpJ!wTkFBX;iMe{myPciU!2x?2)_jVm=9|U?lW20@Qe+qDp6qjb zBA>f^0oVoMg}ve&J*?ZpmtF(=mllc_YI6?_KdK0E?=Dz?pTyo__n(!^>*-sQKgTbz zI$$^XIS}=_HEyH1;nyosb)lXHH+MOs>Ywzo^D({_)syh1!n}&Z=}Hf>FrRLqaslU{ zijWpnomr#776W@YcK~-khD6n;In(Xj?!X)7feRkCB^_A~_fmx~wey^V02F$j6x~sO5FvpVL zux1nHif6Cmn>jD`IA|A_W+=YE0xQt_Yp~E>55A&&dR)igE3-(kx>K8c!ImgZr&`8W zg6BRh6}PFoOw4&fE|x1&-uZ+T=4C4QrR*KvDtfXBQoa?b{41IcHHjzcDLerQG z7icX}evuy|!=9|$%wr^(=9a(-II^Q!fAQ9ny+gg;x=4=s=_=zEbFTi#VsoBV;ka>s zgM8yP93m80B3(z=H(WuH6^;8Z-i5ry7U6sD>6WM%H1_MdjZ*WvaMAnoXDs>$lv%8E zIY2JA0&ly1ocjb7`sc1-VDbItHj_iXFHmVu@T2*tG6&j^Wp{H%(a{hJS7$93Gq`Mn z8s*q2f4bw|wXp*5&<}^Ib=E?p$OIMNwR($oKF5>Xxm2_p*6yI41Vzj0V}IQgi>H*m ztq(2s9q>K~H@K`rt0JDo@gkRqIOM^#|0yYVVN07O#T5v?wH@{`IQ3?CDE~&(w1-`U z+@SOHf80StcOL1c>R|=-bZ%M2vasGo%eR$H+7V@~cm8`esW^=;tC^8}XK}~exC`mb z!Q8c1kqbfes`PNrB4;1-g)@e9-y)zf`>l8o^+xtHlHX@9rJF+^M!ngRYv8H&;1uti zcGe?HiCwmvY6TSu@2KWEm#}J%hH2wzK!Kk}k7A9pK{T&sMAUNeDUX@P(+F^%{!_zs z9fQQuxY$sm=X+?!wWapB^E%DrkhrUb=;|KEU(X`$IL4wRmBva^_A2N!PaH zr95+JxD7wbv(s(hTl13zbtg!;f@`^BalBUT7fWO{tuvxGHVfr&eJft+^?D0_19-Jp zGejQqPr_^Z!?7Rxyg~95H|GPq-g`ruL2T^?OT2?)njyWKeR8du(~&pqA2%aDPR5(P z>QWabd_hw(RIXLjw8c%Y!+L+6@OY+3y%P*?S%_qsF`~P)Gb) zoNb%9cLtZ-sowog>~A7h zWO-i2YGC9aFwQckyAo+@NJ(zs%m@>dQCkn3sDGSFIfKNLdNo+#NP;^8%*|x0C8vYH zQ=Up;Cf6pXDn?o<-GphD<(WJ_i0QFEZcAaN0{rH;{{^!YlV@474ztbILU-zSPOs*= z^EV`aO!KU+q;u^NZsJ=sPa$V<6PZoX0?QF6TikkFX!XZ!26y~~@4=#2m2L2j%q&q3 zoObon-W&gmJD54=2B@PQe=N67YU8!C0gUK1aG>8!`l+AXD&vAX`97 z?Se7L75p{w1mlq}n1TYq`zR7DMscqS&Ita9YQf*2M(`Wd3bv!p!Us?<(LbTVM3)edXa|~1bOkLE{ViGr@8OzYKH3Be z(IMD~8w$ow=cYuna7*z2;I`l$bPDF7%Y3KejzlwYSFi~8%q<1?CHjByK=8lgpp3d(r0-eSjB&Ie2LiG~q{y zR^cbXB>XJ+9exq~-$0-DA$){a`t+dd@v21A@S2HQFi1q5gV*KwUocqkj~F6YiZ{&r zRlF(DhZriDieZAecuVkmyluWOVz@=VA0v#45NEEJF;W5k4x+3qPkF~= zjTkFppE*te{sH67+eJ(;$32*6v=@`i^&BQEz~5tvU?rxSn;+988ja}|a5rX3v=g%g z%P?E;zhRDGI_6pgEtuEmbzEWJK`fB#Utyuem4QX(j|N0VT@6?&Z_BaF9ILS0s2nRq zv;n;`7I6VqDa3!m8gtD>yohJk){EMjks#_ij*TWei^Nzghp|b*uhWra9`muq=opfX zO0X>!q!imNNHKPpb18NjxEWJxGA<<<9YMW_C!!imb{GMp189mBy$mf9b{#>h=<9vBCgR!R zHgi3J4x?h+Fn`&&X;gq)qV@{hHmX6VQ60KOU59YT$d9|`nuU8J9x1;sYP*aFqML{D zP;@;Vk4$z1K~dL9ghlQ3=rLI#q9$uZpXmB|JQlUpV!-@W;i>5B!+2)0Q+O_F58#DS zC0?4p68spewk-T4VOu$V7IpD@T?=v+==Ykr_Dguh=saHSw@;hu1~d zFJQ2!D+5E!qciJl!b-d;`uYflirR}X%%~Y}nZNUR+ho-k9!swhBP6`O4{_$P2qTSZ zFv^0YV6^CV3dWeM6z_=InlM&${UFAPI#Mv+C5OcAyF zFx8w3^Dxaqmt(r<>wL^K=ky}Xij^fBvtw9+Ifj|}m}{7kk9ijVAuJGe@{qAPr>0_& zIj3WZQ7)E>u9sk$sN(>Zo9k|@FxdsHjHR86RTktd)`&W0& z6r@Vnm5y{#$5~{EIxitp)OHHlqU$Nh5p`uE*IZ8^PjvqR^37i}3e5E?ibUV-MsciE z7f~W%dkIR-IRj-z*(eu%m4*sYTRr@-zYO zJnBT9O{kAmLmC<+Y~P1~=+BvGvLIDxG1r4=HA=%Z5v}dE#iFT3hlJNp;)ba6GH!~# zJd9iN*qzpb+Y)x2MyGjXvuN=Yl;Vzr?X|cox^n>cL~ZAAU-Z>(JTO@!9>y}khEc-% zXAl&9dl+F+=SB3G>qSH@J1I5jld$6$9-GH{42b?U2Tx=9zKCZMezynDMc)?Uh3Km+ zycBhw$B&ksqD=f`LfR{ou(u=~zew128hEU>F%_>oR*RT%0Iv#vXvx8Aa{qH428lYW z@w)l+6=JZ2_Zu<9BHDvD%sJJEHzjPlfT8AGjA5dV{dh}sy%=vB9E79K`5Yg1a$B!uAZjBf43Pv7#@JV4UdN0*p7R!UWM*8JHN0CIgcs z>^OwUqWkACMf6z4(XW5O&y=1TZ&Ddt&B zc~~I&<~SCLuIFHp`OCu+1rA=?ho!>q)J80m`~7+>moVZ#g%#$Wb`~q;{`E1eiUsmx zjWB%dG~x|?=doU1+b$u&gqfMxDB*XhNQ?!l!X{z2FBM60zmtb85(fRPNS5$^HMWUv zv5Ay(C@l{=Bt*PO1**Ue2|EjMQ}pE-+>&!oQxk5Ra|t?QvHQ>^ z4Ek$v$6`uL!(H=G)P#G&$hkb+R|IXnb&eWmN0VR0tU>hF9lEK-n9qMOqjYC&xO&VJ$NDa_5*ln?rB;0(IP#F zpG4m@;Ae{{6~Bl&4+2lra`K*OQCmJF5R;}C z;tgRia2RhY)YnHbRCK!m!(z_o@Rm83;cW@KQ_fLd=oZTWOdp>S)D03zT*a3xwg6 zdMp$M&+o$`lkUmK5{0^c0!t`p7g8pG6s zh!=($Phfpa8bE?DT6`WGg#$=VTU<4`mj?NJ$eAU zxv)E}8WnPXa2|dM!@m8fjJX%0N*MH4;f!G*1=R}m)|L-`uOz1Df9mC32+!aP@_v4C~7Z5*gU3YqbG*v5S4r95%ftIN@IH}>YZ89z z#~{%+XYsn|`c({;$FPs1QH#j$#~bFpA8*Fo4`8T-x6?38)RBR=MBioMZF%hG%P~BL z`!GTnN;{4?3*^H{34?x4*M+?W$1qwL&MLCOm9J7Qyfisvb zjO6EFj)gytxh5>F!aNB(^RXZnp3SNW4`Y#p;gm~Q5_7+RrE>rBIF^}vK?;@&yE&Ly z5yLZBDX(oUSS9-AB-WTSM~U$g-V0#8==xqH7@fn$SUfyCEexkLV3XXts*w~E)?$k= zTGWVSVKBh;OnGfPgzXacpXGX{Fx0#UJLTSa6uU%UpMX!!-Ki&#DvXrYB3&3Q<VK}`4*|AWS$T3XevZgSyyBc}IP}6bb8>Vq(Q*nLSiXsVvjhQI6Pq+6KFAMW(iv5we2LXN!ZJYLz^(vxCb2;Dy0xN z8nL;T;IWmV6$4iA^LQ%fP>T=G zOqhn}78ASw7ZTp9#Y+jJM=#(*Uf;8Y%)Si!#7QChbqb!5@7%gFU>LrX3hO@X4WYRPr z#tMUhLX0yHnH;Yga;7}NJowLGqOiBR8k0;~UyI3>mj+C+P(|sODvTV<#WZ1e+A&O* z*Dv!hQ`A<8S#s`e&BbhCPjdt22t#S5m@BXCd6*}j<`Fmm!D5`?`c z%dk-xK3s`Ji?srqBKWVd0a#e)I^V z9GFBE*LVBTXL;p4Frq)_V8CMXH{z+VJM93TS@7C=JQs$VuHuFH4HV#|`Ay|cx-gu= zajP(Lkxi^gef#iB%x^02)N5Fy=T6`iVYsLquRiq}-uo4JO~PQH41T`P|LHYYeL}RqppsVw!~gdvY*c!p>8eDPho`idn+$wEdWE(m*=q z2*WAOm>cs@fqBCIv|KC@M)%cVq4_x`g4( zVq^#h*dk^MgOydt7WM>+kfTsv79v;l&3WX>ImA(3z6Gj3hJu*%0E$d{axaQw9xkCo z7%eMBsW7rH1!d+T5J0)G?~orA=J)7H_=WvPD^MxyPC0-o<))($XH1xO5Y@uItA|k| z9604etuRK1gNqDOfZKAIV z(IMwZUM_A3g8^P3ZPMLoxMhI~a9hGBmDT7JKBm_$VYKcd?g)DeIFc3iWnaKOVdx6i zTFt|SW;_sviw@wSNwa88M~QVFLQvEeKv>Sf^C{>NhSIp&D);+m(P!?LGVoX!E-c4@ zVRi3q#9M|(bMdw?SYLVDJBY|r9MnjfxGr%vI(>HVv2;{ zT)|Wce`q;|X~Ia^UQ8E;n%L9|yHlGn%RDqUVRlS<3Uh=#0XDV5fxXdI(y*`^Rl;cXah$QtTtT(DA8SC3u=ng<)EaUURwwLd16yx? z%YA4NMh;gYApA>VF`9%uO@(MN59f-}s>JR!;+hGs97LOh?R(K7VYqlVZkYS|CfpQ8 zeW!3s*wc6nw}su*#^^NUW=5BJxUvs-gu(j5xT_TYm19~7Bfb{gH{9>X17RpF9}g`Q zTk%K2{=*drn)K2hgoWXv2K1P;rWR3QFGmo4!oD+Qcx)c(0~invu!nxC-25pY&m@ed zAHj2BxU>K-OnNvKFNLAzefUurtiOPt6u~$3_*ug4)C>4U*b^uPo_P&!BrgZA2nSBI z;8kJI)uVXrnb)+o7h;fvq2>d4-LRz=gN6M^^D#u&om!7Kggty@ZwjM<-54tDWjj7h z7z||MEv0au$I{Ge+HMSg<|+6;JIZ!%g!!#3M4T|hso_Y&=A#%TjMQdev@ptH-x%SO z{fF_6lIT2%u@Z(0s7Na8=8S#3Fv727f=Rg-F;N(-tihz1G#8TNMkE=a8uhSDmqO4yxt9&2LK!-yC5o;rZ_ z!f5eHBpB8;W1}#VmxV-O@Jb~%83wA5q#F5>TVCdMKUWci-D&%IDgD_ft z906e@vjI)!p)mz5Ce5Zos<59+9oI~Hk}9dn!kvTY5Var14LOI?58|ffumQIue3H!p zuDKWFq0^ABU6(MLvj=yCeOYz5Yk0K)_k{gyQSY1dJator;jFWGXdWu6@JJZC!u#@s z(Ol}M3J20l(IX6JP!UVmox&Y3VdUx|JXX$c*I_`y{&NL*YNb7aXA(w_a3w+5TeTN2 zgpauw`_e*jee_3R@Jb7QQUupe<7X4rm*5v+g!4}PH?PI*EoF-F<2Nrd>%v0Z%Q%V_-|T@$ibTuMmgLbYF;yUW0+w}@@b@xma7I1_|@7mj11Fp^FE0%4dv+hpOB{ppw@e0;GSQ-yt3_h6c`@J%bGOW0eo z2Qv+`8Zk>4YC4D6!v4amm?P}ldjWG}9!_GOFj#*A3lweF87wqmWdMtWk=@kkk4gPl zDvWZkcbTyNupi5X14mo1!u*~}!AfE1SUy$>qqPC75%%(Jix&=XDRsRtvM&t@!cY^n z421pM8BbImulun{!f5GHBng8x$FN22U!Fj+gpqxP*e2mEAGTYd@)qnc_twkUDI6$I z$1Y*GxCuUCHx*7&h0$|4NEh~9J%S8j|KU7j3M2lr$QJ%3JAfQvIJ*M5=AkqndBXlZ zY;a>zE+`121>6=B_Lirh*zjByN(?VHqf{8qY(bf@kE^ZaCe65n3gJLT4*bFhR~#x0 z^SCi4?5o_1Gs0+79;#JO-=?6(yw;SV)^J}5>V!}DeCve+NAl2M(u?&72*cThXcG3F z;^vsJ+gFNKVQ;}@ToVQ>xpZqDxH;Hin7R))gilh7ant->NyRN;Z~AfER(9Kupi^}F zB)UX*F5r$lM%i`VHRlt!CtlQ85vjGxVKQ2@Uvg>R~X=U$5&KC~CF2qOos;8nxg3wTY~-?|TjOqzED zuN(UJWAJmYZEZ`z5DELLctF71cW2^FVVH|sLxmyk?hK3JS-d3-R?&EoVa+8B7j~zd z#t1`BE8@&UV>(90{8F7O=C>B34O43{Mj89&1m2M_)RK;|=AP!qIALUW0OJjtN-;s9 zZl`0SgadoaF-h3loQ}!DXh9>U2t!xuFjW}Y#~m_ZPaqf5g?+VYm?;bw9>pwScUnDW z3q#zGnj`EjV2dm4xmtsH!pOM*76`+KPhg=i+K`Dw!rtmiEK$yHL3%gV5kYLi{6l@fR3;akFMlRQ3 zldzx5KS{!nFCANik1w4=viYrP#5Q5H^bEGgq=&FW7^y(~WDN!ZhpjeH5e;`JsH2C3y! zB;oa16ie8hnuZc#FVzrBg`vg@lnHydCRr{FXP-ocBDl?SR_3*tnzH6z#FJLSUM^vr z5%x45MYXX12vub*ucuKfVW_MDb;3R>?bHi{SBlUe94M(pKp1H$N0WK*A45wFFQe5w z2@Vd&~fvlTNo)%MW@`qEkT!rA*xy5 zF>EQoU16|(H}08~5!^Q^+sg;Sf%I}bG%WVxk+7$6H-bu`trX#yFbh4xpno@_!f;^$ z`h?L#)bTQ@uL=XgfulY=wbES1GYR{t=ki?Gd$|NJgpusycxiZyn`MR<%J7r0fA>E8 zEPQgF4X&_{3lRA4UW*%S%!8jtvJAPPH$)hs zBI_H%aA67FG!OgtVyLk9Y&nMgcdw!DtioFoMlYn|ZDG)V0>g!m>v;G;80O?IF6QAJ zMhbhHt1(I#tvZg;!hWv2jxoQ~(0a$DN18F#q?KndPFeWIkMR}}qUHNxP@RKyFrQ}ZZTx=9RIhBb-VK}E4n}pHx1xOP1 z`^vFJ*?pLYWC??T%h+aK>yBf)us63BJA^%fH0-oOW?+|uk-`l4gu(ONG80BisY@&D zFTRKjdn8%Nl(6qqKC*?qXAdJs?%!~TE8$-X>yamn>}Pu`4D$3wfw1p5*KrNGWl*dL z+Nhdfp)!x4RM>mD3T2i+9m*vP1$b4lu&3oH{1%Gkt`r9SY;c9$R9!eDj0Vo5TG-Rv zj2dC@VIEEp2HEG=8B$NHUKnA`HJIPi4G6@fN6{o4I8==m!_-=|3PZjEToZ;5afB<3 zoXd01Mx80prXk6}!d@d(#^*LHOik3nnVmpH5+tgpu+K zm@JHz?!y$hcjRKK3DYiOnlS7u!gOJGS|w%*Bb->z682rWg4x1>5+3{#hSN(hR~WrW z4FzHU>1r$x_GUL=p&`{%76}JR%dtclt>25K!XBOqSY{qhrDM4;SXqh{=J#?lRtkIb z*x;J$4=Gsy%g*cwH<{|&Y|X0NVV|$xp5}!qsC>1Fv|H-rupT^ zmu-Ic`H&;*XE&QG9H=Wro-lgy9P)*|HHT0jj5PAhtT0qof?{FcsSK2uU#_i}n%^Vk zC=))}osM$l{9p4?VWC&ww2>@TW7RQULE0DZzAT8`kcFxbG8VdnSv zB|H^Ic3;3VL+aE#7e@J(zYqqwNA=RA**^Rz>}kxyPbSShg`b7Jr<(DLFv8sk{130u z4N+C{6=8R323|ExDZp#O;6*Mh2z%Ley>8M=tr#o}7oNZnVKg&N z@rvLUch5xMa`Vi5Qh9%)Fr3bFx57wEAvOtnn~xz$7-eC$D1zIkkt|{GWG1$m*Mdvf zF8oVzI(7)drMt0H*muc~UBYM)&)y2VQ>d>X>^;R34#Hq%12TktO;o}a_E#1mTiDag z-7{f$_d(zi>#7`(!BxWaB8W2q7Lag(@K7%Dr4I$`hGM%0^! zeOYKQ>7_gbgu%=E(Iku(=b*)~su8W`q2?m437=Hgqs^q1yeC}Po!WvM_AoPXQ}oSc z+>&!-?@`M@Ky!6`N=y9$s z2>Ur5cum-M{v-woLyhP0x-e3A8iR#Tcm`sKd1!9L8^WFdRTj)|<{k_+Y&wKt!f5FQ zyrnFBdjfAu*qu^>;pVk5fDz`EXHw(j^-C%&m{%&CjuM8sx;e!5GDwF8xCTk@G&3GBw@%`h{?j>Nk672H(%#rs=2o|V45&o z#<8w2;#9-}yo;6!w?UB$n`T zZ7G&0u5X&K)Iw$YvCJ?#3(MvHXAX3Y%CS<;y)A5Sg+V^BHNsFc$GO73R*rKOs-px6 zqK8MZ(R@;=C()2=1)GGC{TGoWd~(o_Eedu0D3V40vLD;z9K6624~AT<*kM?60Xu~e zKaV^Jd!2TeuayobQm@@p< zR)o!KT`hXd{V-cwVI+m-56lCnBaaOmN-!V{U1`Eo^P6!5&kUQWkt^(Ns=x~+)BSpIlN+c;s{>-H?Il)a|vFPFifSB zK@xtq7q6R;O9_L8Jv`Dl#KLo2{D!=KeHL#@7`a@Jp~7f=8HNe_>gw^9g*x7dw}rv_ zN(@&7U-Dp?=r&tg(VgQMDUZE8mNd#@x|)a4!U6WKV=SiD3wTEu;_-&D!U(mO#tFl0 zGRF&h8Y?kD7%k(*nXvc5MNAU*T`j|8^Kg)>3MM_V8&idkFScNsusbCW(}j`!`!LhW zkb_wg_B7XEwz(fD#vEa^_#Ea6gY^xVXC8LfV}W5!1r{1+mSd6mZ3tkA;bk9|3PZGP zuuSQ8?8S0(ufL2H=I$%QO2bQ8SS5^H*o`&9zwj(x{J&{^>FB|uSTAwV&+)CWcYiB3 zDv3YuL862M6|{#SjAWlik|7m^wpb`G$R`U!We2g%JhaweyYPpW1K43XFUL*^yZ`?h z?(ZdGD2fC4_b-q2!;p|Hah6&VB4H!bEG(@wdXS2Ukcfx~hufTPLPS)Lec^k3`*1n; zVQg%7_ndopCS*Yj*%ZYRBbFjAiE(8UD`r*(s~YtAYn>DWE_2q*tOk3_h%OILuA6|f zV{91m8Zw5tJhEb3%pzw->uGEovUt28&I&2)h+(RQk|8rAd*YCfepz%cmQfK$OeOD& zUZ;qv=%-U~47o{mC`L{dM~3{WG{hlyh@0ZXYT{Ikci9+W9>9GC(PNK=ju_YpT#8{e ziLN+d>Fl*Q<)ZjTj7#jzCHl<6_r)OD!>t%O3EYd`c?A!m%N+HiiRB@|Cvj#UU|>dT zWju>x+rf+O;9VWB8uWQA_|1e`4&KF(@0brU-l}0}0#3NEAV$pkd!l<~ArME48H97T oF%`pZ8?j-JCAs2+H|SH0^EST3Alt&X=yUD%BaT_D{`cGe2dxopkpKVy diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index 276f39b3b..4ccefe932 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -1,17 +1,36 @@ -function(llama_add_test source) +function(llama_build_executable source) get_filename_component(TEST_TARGET ${source} NAME_WE) add_executable(${TEST_TARGET} ${source}) install(TARGETS ${TEST_TARGET} RUNTIME) - target_link_libraries(${TEST_TARGET} PRIVATE llama) + target_link_libraries(${TEST_TARGET} PRIVATE llama common) +endfunction() + +function(llama_test_executable name source) + get_filename_component(TEST_TARGET ${source} NAME_WE) + # add_executable(${TEST_TARGET} ${source}) + # install(TARGETS ${TEST_TARGET} RUNTIME) + # target_link_libraries(${TEST_TARGET} PRIVATE llama) + add_test(NAME ${name} COMMAND $ ${ARGN}) +endfunction() + +function(llama_build_and_test_executable source) + get_filename_component(TEST_TARGET ${source} NAME_WE) + add_executable(${TEST_TARGET} ${source}) + install(TARGETS ${TEST_TARGET} RUNTIME) + target_link_libraries(${TEST_TARGET} PRIVATE llama common) add_test(NAME ${TEST_TARGET} COMMAND $ ${ARGN}) endfunction() -# llama_add_test(test-double-float.cpp) # SLOW -llama_add_test(test-quantize-fns.cpp) -llama_add_test(test-quantize-perf.cpp) -llama_add_test(test-sampling.cpp) -llama_add_test(test-tokenizer-0.cpp ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab.bin) -llama_add_test(test-grammar-parser.cpp ${CMAKE_CURRENT_SOURCE_DIR}/../examples/grammar-parser.cpp) -llama_add_test(test-llama-grammar.cpp ${CMAKE_CURRENT_SOURCE_DIR}/../examples/grammar-parser.cpp ${CMAKE_CURRENT_SOURCE_DIR}/../llama.cpp ${CMAKE_CURRENT_SOURCE_DIR}/../examples/common.cpp) -llama_add_test(test-grad0.cpp) # SLOW -# llama_add_test(test-opt.cpp) # SLOW +# llama_build_and_test_executable(test-double-float.cpp) # SLOW +llama_build_and_test_executable(test-quantize-fns.cpp) +llama_build_and_test_executable(test-quantize-perf.cpp) +llama_build_and_test_executable(test-sampling.cpp) +llama_build_executable(test-tokenizer-0.cpp) +llama_test_executable (test-tokenizer-0.llama test-tokenizer-0.cpp ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-llama.gguf) +llama_build_executable(test-tokenizer-1.cpp) +llama_test_executable (test-tokenizer-1.llama test-tokenizer-1.cpp ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-llama.gguf) +#llama_test_executable(test-tokenizer-1.aquila test-tokenizer-1.cpp ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-aquila.gguf) +llama_build_and_test_executable(test-grammar-parser.cpp) +llama_build_and_test_executable(test-llama-grammar.cpp) +llama_build_and_test_executable(test-grad0.cpp) # SLOW +# llama_build_and_test_executable(test-opt.cpp) # SLOW diff --git a/tests/test-grammar-parser.cpp b/tests/test-grammar-parser.cpp index 7022988b4..a0b5b043d 100644 --- a/tests/test-grammar-parser.cpp +++ b/tests/test-grammar-parser.cpp @@ -3,7 +3,8 @@ #endif #include "llama.h" -#include "examples/grammar-parser.cpp" +#include "grammar-parser.h" + #include int main() diff --git a/tests/test-llama-grammar.cpp b/tests/test-llama-grammar.cpp index 81c31e9e2..73dd33dd2 100644 --- a/tests/test-llama-grammar.cpp +++ b/tests/test-llama-grammar.cpp @@ -2,9 +2,9 @@ #undef NDEBUG #endif -#include "llama.cpp" -#include "examples/common.cpp" -#include "examples/grammar-parser.cpp" +#include "llama.cpp" // TODO: not great +#include "grammar-parser.h" + #include int main() diff --git a/tests/test-tokenizer-0.cpp b/tests/test-tokenizer-0.cpp index 87fde1645..81764565b 100644 --- a/tests/test-tokenizer-0.cpp +++ b/tests/test-tokenizer-0.cpp @@ -1,22 +1,47 @@ #include "llama.h" +#include "common.h" #include #include #include #include -static const std::map> & k_tests() -{ +static std::string unescape_whitespace(llama_context* ctx, const std::vector& tokens) { + std::string result; + for (size_t i = 0; i < tokens.size(); ++i) { + result += llama_token_to_str(ctx, tokens[i]); + } + return result; +} + +static const std::map> & k_tests() { static std::map> _k_tests = { - { "Hello World", { 1, 10994, 2787, }, }, - { " Hello World", { 1, 15043, 2787, }, }, - { " Hello World!", { 1, 15043, 2787, 29991, }, }, - { " this is 🦙.cpp", { 1, 445, 338, 29871, 243, 162, 169, 156, 29889, 8223, }, }, - { "w048 7tuijk dsdfhu", { 1, 29893, 29900, 29946, 29947, 29871, 29955, 9161, 13535, 18031, 2176, 6905, }, }, - { "нещо на Български", { 1, 821, 4851, 665, 1386, 29713, 1305, }, }, + { " ", {1, 259, }, }, + { "\t", { 1, 29871, 12, }, }, + { "\n", { 1, 29871, 13, }, }, + { "\t\n", { 1, 29871, 12, 13, }, }, + { "Hello world", { 1, 15043, 3186, }, }, + { " Hello world", { 1, 29871, 15043, 3186, }, }, + { "Hello World", { 1, 15043, 2787, }, }, + { " Hello World", { 1, 29871, 15043, 2787, }, }, + { " Hello World!", { 1, 29871, 15043, 2787, 29991, }, }, + { " this is 🦙.cpp", { 1, 29871, 445, 338, 29871, 243, 162, 169, 156, 29889, 8223, }, }, + { "w048 7tuijk dsdfhu", { 1, 281, 29900, 29946, 29947, 29871, 29955, 9161, 13535, 18031, 2176, 6905, }, }, + { "нещо на Български", { 1, 1538, 4851, 665, 1386, 29713, 1305, }, }, + { "កាន់តែពិសេសអាចខលចេញ", { 1, 29871, 31849, 31324, 31934, 228, 162, 142, 228, 161, + 146, 228, 162, 133, 228, 161, 153, 228, 161, 186, + 31708, 228, 162, 132, 31708, 228, 161, 165, 31324, 228, + 161, 136, 228, 161, 132, 228, 161, 158, 228, 161, + 136, 228, 162, 132, 228, 161, 140, }, }, + { "🚀 (normal) 😶‍🌫️ (multiple emojis concatenated) ✅ (only emoji that has its own token)", + { 1, 29871, 243, 162, 157, 131, 313, 8945, 29897, 29871, + 243, 162, 155, 185, 30722, 243, 162, 143, 174, 30598, + 313, 20787, 953, 3848, 275, 16125, 630, 29897, 29871, 31681, + 313, 6194, 953, 29877, 2397, 393, 756, 967, 1914, 5993, 29897, }, }, }; + return _k_tests; -}; +} int main(int argc, char **argv) { if (argc < 2) { @@ -64,10 +89,12 @@ int main(int argc, char **argv) { return 2; } + bool success = true; + for (const auto & test_kv : k_tests()) { - std::vector res(test_kv.first.size()); - const int n = llama_tokenize(ctx, test_kv.first.c_str(), res.data(), int(res.size()), true); - res.resize(n); + std::vector res = llama_tokenize(ctx, test_kv.first, true); + fprintf(stderr, "%s : '%s' tokenized to '%s'\n", + __func__, test_kv.first.c_str(), unescape_whitespace(ctx, res).c_str()); bool correct = res.size() == test_kv.second.size(); @@ -78,7 +105,8 @@ int main(int argc, char **argv) { } if (!correct) { - fprintf(stderr, "%s : failed test: '%s'\n", __func__, test_kv.first.c_str()); + fprintf(stderr, "%s : failed test: '%s'\n", __func__, test_kv.first.c_str()); + fprintf(stderr, "%s : detokenized to: '%s'\n", __func__, unescape_whitespace(ctx, test_kv.second).c_str()); fprintf(stderr, "%s : expected tokens: ", __func__); for (const auto & t : test_kv.second) { fprintf(stderr, "%6d, ", t); @@ -90,9 +118,7 @@ int main(int argc, char **argv) { } fprintf(stderr, "\n"); - llama_free_model(model); - llama_free(ctx); - return 3; + success = false; } } @@ -101,5 +127,5 @@ int main(int argc, char **argv) { llama_backend_free(); - return 0; + return success ? 0 : 3; } diff --git a/tests/test-tokenizer-1.cpp b/tests/test-tokenizer-1.cpp new file mode 100644 index 000000000..d8db7cd96 --- /dev/null +++ b/tests/test-tokenizer-1.cpp @@ -0,0 +1,131 @@ +#include "llama.h" +#include "common.h" + +#include +#include +#include +#include +#include +#include +#include +#include + +static std::string escape_whitespace(const std::string& text) { + std::string result; + bool escaping = false; + result += "\xe2\x96\x81"; + for (size_t offs = 0; offs < text.length(); ++offs) { + if (text[offs] == ' ') { + if (!escaping) { + result += "\xe2\x96\x81"; + escaping = true; + } + } + else { + escaping = false; + result += text[offs]; + } + } + return result; +} + +static std::string unescape_whitespace(llama_context * ctx, const std::vector & tokens) { + std::string result; + for (size_t i = 0; i < tokens.size(); ++i) { + result += llama_token_to_str(ctx, tokens[i]); + } + return result; +} + +int main(int argc, char **argv) { + if (argc < 2) { + fprintf(stderr, "Usage: %s \n", argv[0]); + return 1; + } + + const std::string fname = argv[1]; + + fprintf(stderr, "%s : reading vocab from: '%s'\n", __func__, fname.c_str()); + + llama_model * model; + llama_context * ctx; + + llama_backend_init(false); + + // load the vocab + { + auto lparams = llama_context_default_params(); + + lparams.vocab_only = true; + + model = llama_load_model_from_file(fname.c_str(), lparams); + + if (model == NULL) { + fprintf(stderr, "%s: error: failed to load vocab '%s'\n", __func__, fname.c_str()); + return 1; + } + + ctx = llama_new_context_with_model(model, lparams); + + if (ctx == NULL) { + fprintf(stderr, "%s: error: failed to load vocab '%s'\n", __func__, fname.c_str()); + llama_free_model(model); + return 1; + } + } + + const int n_vocab = llama_n_vocab(ctx); + + for (int i = 0; i < n_vocab; ++i) { + std::string forward = llama_token_to_str_bpe(ctx, i); + std::vector tokens = llama_tokenize_bpe(ctx, forward, false); + if (tokens.size() == 1) { + if (i != tokens[0]) { + std::string backward = llama_token_to_str(ctx, tokens[0]); + fprintf(stderr, "%s : error: token %d is string %s but bpe returns token %d %s\n", + __func__, i, llama_token_to_str(ctx, i).c_str(), tokens[0], backward.c_str()); + return 2; + } + } else { + llama_token_type type = llama_token_get_type(ctx, i); + if (type == LLAMA_TOKEN_TYPE_UNKNOWN || type == LLAMA_TOKEN_TYPE_CONTROL || type == LLAMA_TOKEN_TYPE_BYTE) { + fprintf(stderr, "%s : info: token %d is string %s and bpe returns tokens %s\n", + __func__, i, llama_token_to_str(ctx, i).c_str(), unescape_whitespace(ctx, tokens).c_str()); + } else { + fprintf(stderr, "%s : error: token %d is string %s but bpe returns tokens %s\n", + __func__, i, llama_token_to_str(ctx, i).c_str(), unescape_whitespace(ctx, tokens).c_str()); + return 2; + } + } + } + +#ifdef _WIN32 + std::wstring_convert, char16_t> u16converter; + for (char16_t ch = 0x0000; ch < 0xffff; ++ch) { + std::u16string u16str(1, ch); + std::string str = u16converter.to_bytes(u16str); + std::vector tokens = llama_tokenize(ctx, escape_whitespace(str).c_str(), false); + if (tokens.size() == 1) { + fprintf(stderr, "%s : info: %s tokenized to %d \n", + __func__, str.c_str(), tokens[0]); + } + } + + std::wstring_convert, char32_t> u32converter; + for (char32_t ch = 0x0000; ch < 0x0010ffff; ++ch) { + std::u32string u32str(1, ch); + std::string str = u32converter.to_bytes(u32str); + std::vector tokens = llama_tokenize(ctx, escape_whitespace(str).c_str(), false); + if (tokens.size() == 1) { + fprintf(stderr, "%s : info: %s tokenized to %d \n", __func__, str.c_str(), tokens[0]); + } + } +#endif + + llama_free_model(model); + llama_free(ctx); + + llama_backend_free(); + + return 0; +}