From b1b6c4a22903b3b5c197692ebb5cecda938454ea Mon Sep 17 00:00:00 2001 From: Li Wu Date: Thu, 1 Aug 2019 13:17:02 +0800 Subject: [PATCH 01/53] Fix structlog dependency for app (#280) --- splunk_eventgen/lib/requirements.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/splunk_eventgen/lib/requirements.txt b/splunk_eventgen/lib/requirements.txt index 4bfb4b0b..24b66fae 100644 --- a/splunk_eventgen/lib/requirements.txt +++ b/splunk_eventgen/lib/requirements.txt @@ -1,2 +1,3 @@ ujson==1.35 jinja2==2.10.1 +structlog==19.1.0 From 3e4cb1b4d77c6136446f48002bfd3531e25a5a94 Mon Sep 17 00:00:00 2001 From: Tony Lee Date: Tue, 20 Aug 2019 10:39:14 -0700 Subject: [PATCH 02/53] zipfile fix (#284) --- splunk_eventgen/eventgen_api_server/eventgen_server_api.py | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/splunk_eventgen/eventgen_api_server/eventgen_server_api.py b/splunk_eventgen/eventgen_api_server/eventgen_server_api.py index c355cdfa..1bc0ee1d 100644 --- a/splunk_eventgen/eventgen_api_server/eventgen_server_api.py +++ b/splunk_eventgen/eventgen_api_server/eventgen_server_api.py @@ -464,10 +464,7 @@ def unarchive_bundle(self, path): zipf = zipfile.ZipFile(path) for info in zipf.infolist(): old_file_name = info.filename - if info.filename.find('/') == len(info.filename) - 1: - info.filename = "eg-bundle/" - else: - info.filename = "eg-bundle/" + info.filename[info.filename.find('/') + 1:] + info.filename = "eg-bundle/" + info.filename zipf.extract(info, os.path.dirname(path)) output = os.path.join(os.path.dirname(path), 'eg-bundle') zipf.close() From 9bcfdfb003c85ea5d829e988bd18a22eb0c330c8 Mon Sep 17 00:00:00 2001 From: Li Wu Date: Thu, 22 Aug 2019 11:34:59 +0800 Subject: [PATCH 03/53] Fix bug 286 random token replacement (#287) * Fix bug 286 random token replacement * Change perdayvolume generator logic to get random token value replacement --- splunk_eventgen/lib/eventgentoken.py | 2 +- .../generator/perdayvolumegenerator.py | 22 +++++++++---------- 2 files changed, 11 insertions(+), 13 deletions(-) diff --git a/splunk_eventgen/lib/eventgentoken.py b/splunk_eventgen/lib/eventgentoken.py index 56b97ac8..e393e232 100644 --- a/splunk_eventgen/lib/eventgentoken.py +++ b/splunk_eventgen/lib/eventgentoken.py @@ -350,7 +350,7 @@ def _getReplacement(self, old=None, earliestTime=None, latestTime=None, s=None, except: logger.error("Could not parse json for '%s' in sample '%s'" % (listMatch.group(1), s.name)) return old - return random.choice(value) + return random.SystemRandom().choice(value) else: logger.error("Unknown replacement value '%s' for replacementType '%s'; will not replace" % diff --git a/splunk_eventgen/lib/plugins/generator/perdayvolumegenerator.py b/splunk_eventgen/lib/plugins/generator/perdayvolumegenerator.py index 9183208a..7b84845d 100644 --- a/splunk_eventgen/lib/plugins/generator/perdayvolumegenerator.py +++ b/splunk_eventgen/lib/plugins/generator/perdayvolumegenerator.py @@ -31,17 +31,15 @@ def gen(self, count, earliest, latest, samplename=None): # Create a counter for the current byte size of the read in samples currentSize = 0 - # Replace event tokens before calculating the size of the event - updated_sample_dict = GeneratorPlugin.replace_tokens(self, self._sample.sampleDict, earliest, latest) # If we're random, fill random events from sampleDict into eventsDict eventsDict = [] if self._sample.randomizeEvents: - sdlen = len(updated_sample_dict) + sdlen = len(self._sample.sampleDict) logger.debug("Random filling eventsDict for sample '%s' in app '%s' with %d bytes" % (self._sample.name, self._sample.app, size)) while currentSize < size: - currentevent = updated_sample_dict[random.randint(0, sdlen - 1)] + currentevent = self._sample.sampleDict[random.randint(0, sdlen - 1)] eventsDict.append(currentevent) currentSize += len(currentevent['_raw']) @@ -51,8 +49,8 @@ def gen(self, count, earliest, latest, samplename=None): "Bundlelines, filling eventsDict for sample '%s' in app '%s' with %d copies of sampleDict" % (self._sample.name, self._sample.app, size)) while currentSize <= size: - sizeofsample = sum(len(sample['_raw']) for sample in updated_sample_dict) - eventsDict.extend(updated_sample_dict) + sizeofsample = sum(len(sample['_raw']) for sample in self._sample.sampleDict) + eventsDict.extend(self._sample.sampleDict) currentSize += sizeofsample # Otherwise fill count events into eventsDict or keep making copies of events out of sampleDict until @@ -63,28 +61,28 @@ def gen(self, count, earliest, latest, samplename=None): # or i've read the entire file. linecount = 0 currentreadsize = 0 - linesinfile = len(updated_sample_dict) + linesinfile = len(self._sample.sampleDict) logger.debug("Lines in files: %s " % linesinfile) while currentreadsize <= size: targetline = linecount % linesinfile sizeremaining = size - currentreadsize - targetlinesize = len(updated_sample_dict[targetline]['_raw']) + targetlinesize = len(self._sample.sampleDict[targetline]['_raw']) if size < targetlinesize: logger.error( "Size is too small for sample {}. We need {} bytes but size of one event is {} bytes.".format( self._sample.name, size, targetlinesize)) break - if targetlinesize <= sizeremaining or targetlinesize * .9 <= sizeremaining: + if targetlinesize <= sizeremaining: currentreadsize += targetlinesize - eventsDict.append(updated_sample_dict[targetline]) + eventsDict.append(self._sample.sampleDict[targetline]) else: break linecount += 1 logger.debug("Events fill complete for sample '%s' in app '%s' length %d" % (self._sample.name, self._sample.app, len(eventsDict))) - # Ignore token replacement here because we completed it at the beginning of event generation - GeneratorPlugin.build_events(self, eventsDict, startTime, earliest, latest, ignore_tokens=True) + # build the events and replace tokens + GeneratorPlugin.build_events(self, eventsDict, startTime, earliest, latest) def load(): From fe50964980a2715a1855124e02a623130f850d01 Mon Sep 17 00:00:00 2001 From: Tony Lee Date: Mon, 26 Aug 2019 14:48:35 -0700 Subject: [PATCH 04/53] Versioning scheme (#278) --- docs/CONTRIBUTE_CODE.md | 16 +++++++++++++--- 1 file changed, 13 insertions(+), 3 deletions(-) diff --git a/docs/CONTRIBUTE_CODE.md b/docs/CONTRIBUTE_CODE.md index 14c4453d..79aed24c 100644 --- a/docs/CONTRIBUTE_CODE.md +++ b/docs/CONTRIBUTE_CODE.md @@ -5,18 +5,26 @@ If you want to contribute code to eventgen, please read over the following guide ## Pull request guidelines - If you want to contribute to an eventgen repo, please use a GitHub pull request. This is the fastest way for us to evaluate your code and to merge it into the code base. Please don’t file an issue with snippets of code. Doing so means that we need to manually merge the changes in and update any appropriate tests. That decreases the likelihood that your code is going to get included in a timely manner. Please use pull requests. + +## Release versioning guidelines + +Major Release — Increment the first digit by 1 if the new features break backwards compatibility/current features + +Minor Release — Increment the middle digit by 1 if the new features don’t break any existing features and are compatible with the app in it’s current state + +Patch Release — Increment the last digit by 1 if you’re publishing bug/patch fixes to your app + ### Get started If you’d like to work on a pull request and you’ve never submitted code before, follow these steps: 1. fork eventgen to your github workspace 2. If you want to fix bugs or make enhancement, please make sure there is a issue in eventgen project. Refer [this guide](FILE_ISSUES.md) to create a issue. - After that, you’re ready to start working on code. + ### Working on the code The process of submitting a pull request is fairly straightforward and generally follows the same pattern each time: @@ -75,6 +83,7 @@ The message summary should be a one-sentence description of the change, and it m **Note**: please squash you changes in one commit before firing the pull request. One commit in one PR keeps the git history clean. + #### Step 3: Rebase onto upstream Before you send the pull request, be sure to rebase onto the upstream source. This ensures your code is running on the latest available code. We prefer rebase instead of merge when upstream changes. Rebase keeps the git history clearer. @@ -83,6 +92,7 @@ git fetch upstream git rebase upstream/master ``` + #### Step 4: Run the tests The is a place holder as well. We should write about @@ -101,6 +111,7 @@ Next, push your changes to your clone: git push origin fix/issue123 ``` + #### Step 6: Submit the pull request Before creating a pull request, here are some recommended **check points**. @@ -118,7 +129,6 @@ Next, create a pull request from your branch to the eventgen develop branch. Mark @lephino , @arctan5x , @jmeixensperger , @li-wu , @GordonWang as the reviewers. - ## Code style and formatting tools Since Eventgen is written in python, we apply a coding style based on [PEP8](https://www.python.org/dev/peps/pep-0008/). From 6ef4255dcffcd24ee3c6e5c495a3864b64ba2dc5 Mon Sep 17 00:00:00 2001 From: Jack Meixensperger Date: Thu, 29 Aug 2019 17:56:41 -0700 Subject: [PATCH 05/53] [global] perDayVolume (#288) * exclude global from perDayVolume assignment * Address comment --- splunk_eventgen/eventgen_api_server/eventgen_server_api.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/splunk_eventgen/eventgen_api_server/eventgen_server_api.py b/splunk_eventgen/eventgen_api_server/eventgen_server_api.py index 1bc0ee1d..9880e409 100644 --- a/splunk_eventgen/eventgen_api_server/eventgen_server_api.py +++ b/splunk_eventgen/eventgen_api_server/eventgen_server_api.py @@ -371,7 +371,7 @@ def set_volume(self, target_volume): stanza_num -= 1 divided_volume = float(target_volume) / stanza_num for stanza, kv_pair in conf_dict.iteritems(): - if isinstance(kv_pair, dict) and stanza != '.*' not in stanza: + if isinstance(kv_pair, dict) and stanza != 'global' and '.*' not in stanza: conf_dict[stanza]["perDayVolume"] = divided_volume self.set_conf(conf_dict) From aa48a44e8786d195fff8404e8135774c539ad0cc Mon Sep 17 00:00:00 2001 From: Li Wu Date: Fri, 30 Aug 2019 10:00:51 +0800 Subject: [PATCH 06/53] Fix security vulnerability issue (#289) --- docs/Gemfile.lock | 40 ++++++++++++++++++++-------------------- 1 file changed, 20 insertions(+), 20 deletions(-) diff --git a/docs/Gemfile.lock b/docs/Gemfile.lock index 057c847b..5d1b4e46 100644 --- a/docs/Gemfile.lock +++ b/docs/Gemfile.lock @@ -1,12 +1,12 @@ GEM remote: https://rubygems.org/ specs: - activesupport (4.2.10) + activesupport (4.2.11.1) i18n (~> 0.7) minitest (~> 5.1) thread_safe (~> 0.3, >= 0.3.4) tzinfo (~> 1.1) - addressable (2.5.2) + addressable (2.6.0) public_suffix (>= 2.0.2, < 4.0) coffee-script (2.4.1) coffee-script-source @@ -16,7 +16,7 @@ GEM commonmarker (0.17.13) ruby-enum (~> 0.5) concurrent-ruby (1.1.5) - dnsruby (1.61.2) + dnsruby (1.61.3) addressable (~> 2.5) em-websocket (0.5.1) eventmachine (>= 0.12.9) @@ -27,13 +27,13 @@ GEM execjs (2.7.0) faraday (0.15.4) multipart-post (>= 1.2, < 3) - ffi (1.10.0) + ffi (1.11.1) forwardable-extended (2.6.0) - gemoji (3.0.0) - github-pages (197) - activesupport (= 4.2.10) + gemoji (3.0.1) + github-pages (198) + activesupport (= 4.2.11.1) github-pages-health-check (= 1.16.1) - jekyll (= 3.7.4) + jekyll (= 3.8.5) jekyll-avatar (= 0.6.0) jekyll-coffeescript (= 1.1.1) jekyll-commonmark-ghpages (= 0.1.5) @@ -81,13 +81,13 @@ GEM octokit (~> 4.0) public_suffix (~> 3.0) typhoeus (~> 1.3) - html-pipeline (2.10.0) + html-pipeline (2.12.0) activesupport (>= 2) nokogiri (>= 1.4) http_parser.rb (0.6.0) i18n (0.9.5) concurrent-ruby (~> 1.0) - jekyll (3.7.4) + jekyll (3.8.5) addressable (~> 2.4) colorator (~> 1.0) em-websocket (~> 0.5) @@ -204,14 +204,14 @@ GEM jekyll-feed (~> 0.9) jekyll-seo-tag (~> 2.1) minitest (5.11.3) - multipart-post (2.0.0) - nokogiri (1.10.2) + multipart-post (2.1.1) + nokogiri (1.10.4) mini_portile2 (~> 2.4.0) octokit (4.14.0) sawyer (~> 0.8.0, >= 0.5.3) pathutil (0.16.2) forwardable-extended (~> 2.6) - public_suffix (3.0.3) + public_suffix (3.1.1) rb-fsevent (0.10.3) rb-inotify (0.10.0) ffi (~> 1.0) @@ -219,16 +219,16 @@ GEM ruby-enum (0.7.2) i18n ruby_dep (1.5.0) - rubyzip (1.2.2) + rubyzip (1.2.3) safe_yaml (1.0.5) - sass (3.7.3) + sass (3.7.4) sass-listen (~> 4.0.0) sass-listen (4.0.0) rb-fsevent (~> 0.9, >= 0.9.4) rb-inotify (~> 0.9, >= 0.9.7) - sawyer (0.8.1) - addressable (>= 2.3.5, < 2.6) - faraday (~> 0.8, < 1.0) + sawyer (0.8.2) + addressable (>= 2.3.5) + faraday (> 0.8, < 2.0) terminal-table (1.8.0) unicode-display_width (~> 1.1, >= 1.1.1) thread_safe (0.3.6) @@ -236,7 +236,7 @@ GEM ethon (>= 0.9.0) tzinfo (1.2.5) thread_safe (~> 0.1) - unicode-display_width (1.5.0) + unicode-display_width (1.6.0) PLATFORMS ruby @@ -245,4 +245,4 @@ DEPENDENCIES github-pages BUNDLED WITH - 2.0.1 + 2.0.2 From 77cfabfdc61f8d349693ec9024917a631cae7318 Mon Sep 17 00:00:00 2001 From: Li Wu Date: Fri, 30 Aug 2019 10:22:21 +0800 Subject: [PATCH 07/53] Fix custom plugin stale docs (#290) --- docs/PLUGINS.md | 90 ++++++++++++++++++++++++++----------------------- 1 file changed, 48 insertions(+), 42 deletions(-) diff --git a/docs/PLUGINS.md b/docs/PLUGINS.md index 1f42c973..5866133b 100644 --- a/docs/PLUGINS.md +++ b/docs/PLUGINS.md @@ -16,29 +16,37 @@ Plugins inherit from a base plugin class and are placed in their appropriate dir Let's take a look at the simplest plugin available to us, the Devnull output plugin: ```python -from __future__ import division from outputplugin import OutputPlugin -import sys +from logging_config import logger + class DevNullOutputPlugin(OutputPlugin): + name = 'devnull' MAXQUEUELENGTH = 1000 + useOutputQueue = True - def __init__(self, sample): - OutputPlugin.__init__(self, sample) + def __init__(self, sample, output_counter=None): + OutputPlugin.__init__(self, sample, output_counter) self.firsttime = True def flush(self, q): + logger.info('flush data to devnull') if self.firsttime: self.f = open('/dev/null', 'w') + self.firsttime = False buf = '\n'.join(x['_raw'].rstrip() for x in q) self.f.write(buf) + def load(): """Returns an instance of the plugin""" return DevNullOutputPlugin + ``` -First, we import the OutputPlugin superclass. For output plugins, they define a constant MAXQUEUELENGTH to determine the maximum amount of items in queue before forcing a queue flush. +First, we import the OutputPlugin superclass. For output plugins, they define a constant `MAXQUEUELENGTH` to determine the maximum amount of items in queue before forcing a queue flush. + +`useOutputQueue` is set to `True` here to use the output queue which functions as a reduce step when you need to maintain a single thread or a limited number of threads outputting data ``__init__()`` is very simple. It calls its superclass init and sets one variable, firsttime. ``flush()`` is also very simple. If it's the first time, open the file /dev/null, otherwise, output the queue by writing it to the already open file. @@ -56,26 +64,26 @@ class SplunkStreamOutputPlugin(OutputPlugin): intSettings = [ 'splunkPort' ] ``` -MAXQUEUELENGTH should look normal, but these other class variables need a little explanation. +`MAXQUEUELENGTH` should look normal, but these other class variables need a little explanation. ### Configuration Validation Config validation is a modular system in Eventgen, and plugins must be allowed to specify additional configuration parameters that the main Eventgen will consider valid and store. -*Note that eventgen.conf.spec generation is not yet automated, which means plugins must ship with the default distribution and eventgen.conf.spec must be maintained manually.* +> Note that `eventgen.conf.spec` generation is not yet automated, which means plugins must ship with the default distribution and eventgen.conf.spec must be maintained manually. Eventually spec file generation will be automated as well. The main configuration of Eventgen validates itself by a list of configuration parameters assigned by type, and each of the configuration parameters is validated by that type. The settings list is required: -* validSettings | Defines the list of valid settings for this plugin +* validSettings: Defines the list of valid settings for this plugin The following lists are optional and likely to be used by many plugins: -* intSettings | Will validate the settings as integers -* floatSettings | Will validate the settings as floating point numbers -* boolSettings | Will validate the settings as booleans -* jsonSettings | Will validate the settings as a JSON string -* defaultableSettings | Settings which can be specified in the [global] stanza and will pass down to individual stanzas -* complexSettings | A dictionary of lists or function callbacks, containing a setting name with list of valid options or a callback function to validate the setting. +* intSettings: Will validate the settings as integers +* floatSettings: Will validate the settings as floating point numbers +* boolSettings: Will validate the settings as booleans +* jsonSettings: Will validate the settings as a JSON string +* defaultableSettings: Settings which can be specified in the [global] stanza and will pass down to individual stanzas +* complexSettings: A dictionary of lists or function callbacks, containing a setting name with list of valid options or a callback function to validate the setting. ## Methods required per plugin type @@ -83,7 +91,7 @@ Each plugin type will define a different method required. **Plugin Type** | **Method** | **Returns** | **Notes** --- | --- | --- | --- -Rater | ``rate()`` | Integer count of events to generate | n/a +Rater | ``rate()`` | Integer count of events to generate | N/A Generator | ``gen(count, earliest, latest) `` | Success (0) | Events get put into an output queue by calling the Sample's ``send()`` or ``bulksend()`` methods in the output object. Output | ``flush(q)`` | Success (0) | Gets a deque list q to operate upon and output as configured. @@ -92,48 +100,46 @@ Output | ``flush(q)`` | Success (0) | Gets a deque list q to operate upon and ou We reviewed a simple Output Plugin earlier, let's look at a simple Generator Plugin: ```python -from __future__ import division +import datetime +from datetime import timedelta + from generatorplugin import GeneratorPlugin -import os -import logging -import datetime, time -import itertools -from collections import deque +from logging_config import logger + class WindbagGenerator(GeneratorPlugin): def __init__(self, sample): GeneratorPlugin.__init__(self, sample) - # Logger already setup by config, just get an instance - logger = logging.getLogger('eventgen') - globals()['logger'] = logger - - from eventgenconfig import Config - globals()['c'] = Config() - - def gen(self, count, earliest, latest): - l = [ {'_raw': '2014-01-05 23:07:08 WINDBAG Event 1 of 100000'} for i in xrange(count) ] - - self._out.bulksend(l) + def gen(self, count, earliest, latest, samplename=None): + if count < 0: + logger.warning('Sample size not found for count=-1 and generator=windbag, defaulting to count=60') + count = 60 + time_interval = timedelta.total_seconds((latest - earliest)) / count + for i in xrange(count): + current_time_object = earliest + datetime.timedelta(0, time_interval * (i + 1)) + msg = '{0} -0700 WINDBAG Event {1} of {2}'.format(current_time_object, (i + 1), count) + self._out.send(msg) return 0 + def load(): return WindbagGenerator + ``` -For this generator plugin, notice we inherit from GeneratorPlugin instead of OutputPlugin. This plugin is also quite simple. -In its ``__init__()`` method, it calls the superclass ``__init__()`` and it sets up two global variables, c, which holds the config -(and is a Singleton pattern which can be instantiated many times) and a copy of the logger which we'll use for logging in most plugins. +For this generator plugin, notice we inherit from `GeneratorPlugin` instead of `OutputPlugin`. This plugin is also quite simple. -Secondly, it defines a gen() method, which generates ``count`` events between ``earliest`` and ``latest`` time. In this case, we ignore the timestamp and return just event text. -Then we call bulksend. This plugin has several performance optimizations: using a list constructor instead of a loop and using bulksend instead of send. +Secondly, it defines a `gen()` method, which generates ``count`` events between ``earliest`` and ``latest`` time. In this case, we ignore the timestamp and return just event text. +Then we call `bulksend`. This plugin has several performance optimizations: using a list constructor instead of a loop and using bulksend instead of send. Let's see how this could be implemented in a slightly less performant but easier to understand way: ```python - def gen(self, count, earliest, latest): - for x in xrange(count): - self._sample.send({ '_raw': '2014-01-05 23:07:08 WINDBAG Event 1 of 100000' }) - + def gen(self, count, earliest, latest, samplename=None): + for i in xrange(count): + current_time_object = earliest + datetime.timedelta(0, time_interval * (i + 1)) + msg = '{0} -0700 WINDBAG Event {1} of {2}'.format(current_time_object, (i + 1), count) + self._out.send(msg) return 0 ``` @@ -142,4 +148,4 @@ Here, we use ``send()`` instead of ``bulksend()`` and a loop to make it easier t # Shipping a Plugin When you've developed a plugin that you want to use in your app, shipping it with your app is easy. -Place any Eventgen plugin in your Splunk app's ``bin/`` directory and we'll search for and find any plugins referenced by a ``outputMode``, ``generator`` or ``rater`` config statement. \ No newline at end of file +Place any Eventgen plugin in your Splunk app's ``bin/`` directory and we'll search for and find any plugins referenced by a ``outputMode``, ``generator`` or ``rater`` config statement. From f29c3320f3449cdc1ece1c6933152bbc8519fda1 Mon Sep 17 00:00:00 2001 From: Tony Lee Date: Tue, 3 Sep 2019 12:19:15 -0700 Subject: [PATCH 08/53] Server fix (#293) * Flag added * server fix for count and env clean --- .../eventgen_api_server/eventgen_controller_api.py | 2 +- .../eventgen_api_server/eventgen_server_api.py | 13 +++++++++++++ 2 files changed, 14 insertions(+), 1 deletion(-) diff --git a/splunk_eventgen/eventgen_api_server/eventgen_controller_api.py b/splunk_eventgen/eventgen_api_server/eventgen_controller_api.py index 55480909..cd0f2cd2 100644 --- a/splunk_eventgen/eventgen_api_server/eventgen_controller_api.py +++ b/splunk_eventgen/eventgen_api_server/eventgen_controller_api.py @@ -35,7 +35,7 @@ def gather_response(response_number_target=0): if not response_number_target: response_number_target = int(self.redis_connector.message_connection.pubsub_numsub(self.redis_connector.servers_channel)[0][1]) response_num = 0 - countdown = 1.5 / self.interval + countdown = 60 / self.interval for i in range(0, int(countdown)): if response_num == response_number_target: break diff --git a/splunk_eventgen/eventgen_api_server/eventgen_server_api.py b/splunk_eventgen/eventgen_api_server/eventgen_server_api.py index 9880e409..02fb882f 100644 --- a/splunk_eventgen/eventgen_api_server/eventgen_server_api.py +++ b/splunk_eventgen/eventgen_api_server/eventgen_server_api.py @@ -444,6 +444,11 @@ def set_bundle(self, url): def download_bundle(self, url): bundle_path = os.path.join(DEFAULT_PATH, "eg-bundle.tgz") + try: + os.remove(bundle_path) + shutil.rmtree(os.path.join(os.path.dirname(bundle_path), 'eg-bundle')) + except: + pass r = requests.get(url, stream=True) with open(bundle_path, 'wb') as f: for chunk in r.iter_content(chunk_size=None): @@ -457,9 +462,17 @@ def unarchive_bundle(self, path): output = '' if tarfile.is_tarfile(path): tar = tarfile.open(path) + foldername = '' + for name in tar.getnames(): + if '/' not in name: + foldername = name + break output = os.path.join(os.path.dirname(path), os.path.commonprefix(tar.getnames())) tar.extractall(path=os.path.dirname(path)) tar.close() + if foldername: + os.rename(os.path.join(os.path.dirname(path), foldername), os.path.join(os.path.dirname(path), 'eg-bundle')) + output = os.path.join(os.path.dirname(path), 'eg-bundle') elif zipfile.is_zipfile(path): zipf = zipfile.ZipFile(path) for info in zipf.infolist(): From 1720fee954bb47b5d023fede85b8307268ddb517 Mon Sep 17 00:00:00 2001 From: Li Wu Date: Thu, 19 Sep 2019 06:17:58 +0800 Subject: [PATCH 09/53] Fix bug 285 (#297) --- splunk_eventgen/lib/eventgentimer.py | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/splunk_eventgen/lib/eventgentimer.py b/splunk_eventgen/lib/eventgentimer.py index d500c0ba..5e3c8405 100644 --- a/splunk_eventgen/lib/eventgentimer.py +++ b/splunk_eventgen/lib/eventgentimer.py @@ -1,4 +1,5 @@ import time +import copy from Queue import Full from timeparser import timeParserTimeMath @@ -132,7 +133,10 @@ def real_run(self): break et = backfillearliest lt = timeParserTimeMath(plusminus="+", num=self.interval, unit="s", ret=et) - genPlugin = self.generatorPlugin(sample=self.sample) + copy_sample = copy.copy(self.sample) + tokens = copy.deepcopy(self.sample.tokens) + copy_sample.tokens = tokens + genPlugin = self.generatorPlugin(sample=copy_sample) # need to make sure we set the queue right if we're using multiprocessing or thread modes genPlugin.updateConfig(config=self.config, outqueue=self.outputQueue) genPlugin.updateCounts(count=count, start_time=et, end_time=lt) @@ -176,7 +180,10 @@ def real_run(self): logger.info("Starting '%d' generatorWorkers for sample '%s'" % (self.sample.config.generatorWorkers, self.sample.name)) for worker_id in range(self.config.generatorWorkers): - genPlugin = self.generatorPlugin(sample=self.sample) + copy_sample = copy.copy(self.sample) + tokens = copy.deepcopy(self.sample.tokens) + copy_sample.tokens = tokens + genPlugin = self.generatorPlugin(sample=copy_sample) # Adjust queue for threading mode genPlugin.updateConfig(config=self.config, outqueue=self.outputQueue) genPlugin.updateCounts(count=count, start_time=et, end_time=lt) From 906e4d92f2cc434c58031f94bbadee363d6becd8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Patrik=20Nordl=C3=A9n?= Date: Tue, 24 Sep 2019 09:20:51 +0200 Subject: [PATCH 10/53] Add syslogAddHeader config directive (#296) * Add syslog header to event in syslog mode --- docs/CONFIGURE.md | 14 ++++++- docs/REFERENCE.md | 3 ++ splunk_eventgen/lib/eventgenconfig.py | 3 +- .../lib/plugins/output/syslogout.py | 20 +++++++++- .../splunk_app/README/eventgen.conf.spec | 15 ++++++- .../plugins/test_syslog_output_with_header.py | 40 +++++++++++++++++++ .../medium_test/eventgen.conf.syslogoutput | 2 +- .../eventgen.conf.syslogoutputwithheader | 11 +++++ 8 files changed, 100 insertions(+), 8 deletions(-) create mode 100644 tests/medium/plugins/test_syslog_output_with_header.py create mode 100644 tests/sample_eventgen_conf/medium_test/eventgen.conf.syslogoutputwithheader diff --git a/docs/CONFIGURE.md b/docs/CONFIGURE.md index 14fe668e..78047bd5 100644 --- a/docs/CONFIGURE.md +++ b/docs/CONFIGURE.md @@ -382,8 +382,10 @@ specifically be supported by all plugins. Plugins that write to files like spool * Set event sourcetype in Splunk to Defaults to 'eventgen' if none specified. host = - * ONLY VALID WITH outputMode SPLUNKSTREAM - * Set event host in Splunk to . Defaults to 127.0.0.1 if none specified. + * When outputMode is splunkstream, set event host in Splunk to . + * When outputMode is syslogout and syslogAddHeader is set to true, add initial header with hostname , + see syslogAddHeader for details. + * Defaults to 127.0.0.1 if none specified. host.token = * PCRE expression used to identify the host name (or partial name) for replacement. @@ -420,6 +422,14 @@ specifically be supported by all plugins. Plugins that write to files like spool * Only supports UDP ports * Required + syslogAddHeader = true | false + * Controls whether syslog messages should be prefixed with an RFC3164 compliant header + including the host value defined for the sample. + * Useful in situations where you want to output generated events to syslog and make it + possible for the receiving syslog server to use the sample's defined host value instead of + the hostname of the host that eventgen is running on. + * Defaults to false + ###### tcpout tcpDestinationHost = * Defaults to 127.0.0.1 diff --git a/docs/REFERENCE.md b/docs/REFERENCE.md index 4488c48d..2c9c8a76 100644 --- a/docs/REFERENCE.md +++ b/docs/REFERENCE.md @@ -139,6 +139,9 @@ syslogDestinationPort = * Defaults to port 1514 * Only supports UDP ports +syslogAddHeader = true | false + * Defaults to false + tcpDestinationHost = * Defaults to 127.0.0.1 diff --git a/splunk_eventgen/lib/eventgenconfig.py b/splunk_eventgen/lib/eventgenconfig.py index 37af3789..fe0d4eed 100644 --- a/splunk_eventgen/lib/eventgenconfig.py +++ b/splunk_eventgen/lib/eventgenconfig.py @@ -88,6 +88,7 @@ class Config(object): 'minuteOfHourRate', 'timezone', 'dayOfMonthRate', 'monthOfYearRate', 'perDayVolume', 'outputWorkers', 'generator', 'rater', 'generatorWorkers', 'timeField', 'sampleDir', 'threading', 'profiler', 'maxIntervalsBeforeFlush', 'maxQueueLength', 'splunkMethod', 'splunkPort', + 'syslogDestinationHost', 'syslogDestinationPort', 'syslogAddHeader', 'verbosity', 'useOutputQueue', 'seed','end', 'autotimestamps', 'autotimestamp', 'httpeventWaitResponse', 'outputCounter', 'sequentialTimestamp', 'extendIndexes', 'disableLoggingQueue'] _validTokenTypes = {'token': 0, 'replacementType': 1, 'replacement': 2} @@ -99,7 +100,7 @@ class Config(object): _floatSettings = ['randomizeCount', 'delay', 'timeMultiple'] _boolSettings = [ 'disabled', 'randomizeEvents', 'bundlelines', 'profiler', 'useOutputQueue', 'autotimestamp', - 'httpeventWaitResponse', 'outputCounter', 'sequentialTimestamp', 'disableLoggingQueue'] + 'httpeventWaitResponse', 'outputCounter', 'sequentialTimestamp', 'disableLoggingQueue', 'syslogAddHeader'] _jsonSettings = [ 'hourOfDayRate', 'dayOfWeekRate', 'minuteOfHourRate', 'dayOfMonthRate', 'monthOfYearRate', 'autotimestamps'] _defaultableSettings = [ diff --git a/splunk_eventgen/lib/plugins/output/syslogout.py b/splunk_eventgen/lib/plugins/output/syslogout.py index b1faad28..226f3bd9 100644 --- a/splunk_eventgen/lib/plugins/output/syslogout.py +++ b/splunk_eventgen/lib/plugins/output/syslogout.py @@ -9,15 +9,26 @@ loggerInitialized = {} +# This filter never returns False, because its purpose is just to add the host field so it's +# available to the logging formatter. +class HostFilter(logging.Filter): + def __init__(self, host): + self.host = host + + def filter(self, record): + record.host = self.host + return True + class SyslogOutOutputPlugin(OutputPlugin): useOutputQueue = True name = 'syslogout' MAXQUEUELENGTH = 10 - validSettings = ['syslogDestinationHost', 'syslogDestinationPort'] - defaultableSettings = ['syslogDestinationHost', 'syslogDestinationPort'] + validSettings = ['syslogDestinationHost', 'syslogDestinationPort', 'syslogAddHeader'] + defaultableSettings = ['syslogDestinationHost', 'syslogDestinationPort', 'syslogAddHeader'] intSettings = ['syslogDestinationPort'] def __init__(self, sample, output_counter=None): + syslogAddHeader = getattr(sample, 'syslogAddHeader', False) OutputPlugin.__init__(self, sample, output_counter) self._syslogDestinationHost = sample.syslogDestinationHost if hasattr( sample, 'syslogDestinationHost') and sample.syslogDestinationHost else '127.0.0.1' @@ -26,6 +37,8 @@ def __init__(self, sample, output_counter=None): loggerName = 'syslog' + sample.name self._l = logging.getLogger(loggerName) + if syslogAddHeader: + self._l.addFilter(HostFilter(host=sample.host)) self._l.setLevel(logging.INFO) global loggerInitialized @@ -34,6 +47,9 @@ def __init__(self, sample, output_counter=None): if loggerName not in loggerInitialized: syslogHandler = logging.handlers.SysLogHandler( address=(self._syslogDestinationHost, int(self._syslogDestinationPort))) + if syslogAddHeader: + formatter = logging.Formatter(fmt='%(asctime)s %(host)s %(message)s', datefmt='%b %d %H:%M:%S') + syslogHandler.setFormatter(formatter) self._l.addHandler(syslogHandler) loggerInitialized[loggerName] = True diff --git a/splunk_eventgen/splunk_app/README/eventgen.conf.spec b/splunk_eventgen/splunk_app/README/eventgen.conf.spec index a4b45cea..422cbd00 100644 --- a/splunk_eventgen/splunk_app/README/eventgen.conf.spec +++ b/splunk_eventgen/splunk_app/README/eventgen.conf.spec @@ -129,6 +129,14 @@ syslogDestinationPort = * Defaults to port 1514 * Only supports UDP ports +syslogAddHeader = true | false + * Controls whether syslog messages should be prefixed with an RFC3164 compliant header + including the host value defined for the sample. + * Useful in situations where you want to output generated events to syslog and make it + possible for the receiving syslog server to use the sample's defined host value instead of + the hostname of the host that eventgen is running on. + * Defaults to false + tcpDestinationHost = * Defaults to 127.0.0.1 @@ -218,8 +226,11 @@ sourcetype = * Set event sourcetype in Splunk to Defaults to 'eventgen' if none specified. host = - * ONLY VALID WITH outputMode SPLUNKSTREAM - * Set event host in Splunk to . Defaults to 127.0.0.1 if none specified. + * ONLY VALID WITH outputMode SPLUNKSTREAM and SYSLOGOUT + * When outputMode is splunkstream, set event host in Splunk to . + * When outputMode is syslogout and syslogAddHeader is set to true, add initial header with hostname , + see syslogAddHeader for details. + * Defaults to 127.0.0.1 if none specified. hostRegex = * ONLY VALID WITH outputMode SPLUNKSTREAM diff --git a/tests/medium/plugins/test_syslog_output_with_header.py b/tests/medium/plugins/test_syslog_output_with_header.py new file mode 100644 index 00000000..4bc69cc2 --- /dev/null +++ b/tests/medium/plugins/test_syslog_output_with_header.py @@ -0,0 +1,40 @@ +#!/usr/bin/env python +# encoding: utf-8 + +import os +import sys + +from mock import MagicMock, patch + +from splunk_eventgen.__main__ import parse_args +from splunk_eventgen.eventgen_core import EventGenerator +from splunk_eventgen.lib.plugins.output.syslogout import SyslogOutOutputPlugin + +FILE_DIR = os.path.dirname(os.path.abspath(__file__)) + + +class TestSyslogOutputWithHeaderPlugin(object): + def test_output_data_to_syslog_with_header(self): + configfile = "tests/sample_eventgen_conf/medium_test/eventgen.conf.syslogoutputwithheader" + testargs = ["eventgen", "generate", configfile] + with patch.object(sys, 'argv', testargs): + with patch('logging.getLogger'): + pargs = parse_args() + assert pargs.subcommand == 'generate' + assert pargs.configfile == configfile + eventgen = EventGenerator(args=pargs) + + sample = MagicMock() + sample.name = 'test' + sample.syslogDestinationHost = '127.0.0.1' + sample.syslogDestinationPort = 9999 + syslogoutput = SyslogOutOutputPlugin(sample) + + eventgen.start() + for i in xrange(1, 6): + appearance = False + for logger_call in syslogoutput._l.info.call_args_list: + if "WINDBAG Event {} of 5".format(i) in str(logger_call): + appearance = True + if not appearance: + assert False diff --git a/tests/sample_eventgen_conf/medium_test/eventgen.conf.syslogoutput b/tests/sample_eventgen_conf/medium_test/eventgen.conf.syslogoutput index 000b2396..3c71311d 100644 --- a/tests/sample_eventgen_conf/medium_test/eventgen.conf.syslogoutput +++ b/tests/sample_eventgen_conf/medium_test/eventgen.conf.syslogoutput @@ -8,4 +8,4 @@ end = 1 outputMode = syslogout syslogDestinationHost = 127.0.0.1 syslogDestinationPort = 9999 - +syslogAddHeader = false diff --git a/tests/sample_eventgen_conf/medium_test/eventgen.conf.syslogoutputwithheader b/tests/sample_eventgen_conf/medium_test/eventgen.conf.syslogoutputwithheader new file mode 100644 index 00000000..8ce2910a --- /dev/null +++ b/tests/sample_eventgen_conf/medium_test/eventgen.conf.syslogoutputwithheader @@ -0,0 +1,11 @@ +[windbag] +generator = windbag +earliest = -3s +latest = now +interval = 3 +count = 5 +end = 1 +outputMode = syslogout +syslogDestinationHost = 127.0.0.1 +syslogDestinationPort = 9999 +syslogAddHeader = true From 901453c34233695fd8cc2858efd15f0220e21a97 Mon Sep 17 00:00:00 2001 From: Guodong Wang Date: Wed, 25 Sep 2019 10:47:59 +0800 Subject: [PATCH 11/53] timezone setting bugfix #249 --- splunk_eventgen/lib/plugins/generator/replay.py | 2 +- splunk_eventgen/lib/timeparser.py | 2 +- tests/large/conf/eventgen_replay_csv_with_tz.conf | 11 +++++++++++ tests/large/sample/timezone.csv | 5 +++++ tests/large/test_mode_replay.py | 14 +++++++++++++- 5 files changed, 31 insertions(+), 3 deletions(-) create mode 100755 tests/large/conf/eventgen_replay_csv_with_tz.conf create mode 100644 tests/large/sample/timezone.csv diff --git a/splunk_eventgen/lib/plugins/generator/replay.py b/splunk_eventgen/lib/plugins/generator/replay.py index 2de78619..d8c68a49 100644 --- a/splunk_eventgen/lib/plugins/generator/replay.py +++ b/splunk_eventgen/lib/plugins/generator/replay.py @@ -100,7 +100,7 @@ def gen(self, count, earliest, latest, samplename=None): current_event_timestamp = self._sample.getTSFromEvent(line[self._sample.timeField]) except Exception: try: - logger.debug("Sample timeField {} failed to locate. Trying to locate _time field.".format( + logger.error("Sample timeField {} failed to locate. Trying to locate _time field.".format( self._sample.timeField)) current_event_timestamp = self._sample.getTSFromEvent(line["_time"]) except Exception: diff --git a/splunk_eventgen/lib/timeparser.py b/splunk_eventgen/lib/timeparser.py index 812456b3..2298a6ee 100644 --- a/splunk_eventgen/lib/timeparser.py +++ b/splunk_eventgen/lib/timeparser.py @@ -28,7 +28,7 @@ def timeParser(ts='now', timezone=datetime.timedelta(days=1), now=None, utcnow=N return now() else: if utcnow is None: - return datetime.datetime.now() + return datetime.datetime.utcnow() + timezone else: return utcnow() + timezone else: diff --git a/tests/large/conf/eventgen_replay_csv_with_tz.conf b/tests/large/conf/eventgen_replay_csv_with_tz.conf new file mode 100755 index 00000000..21ac7878 --- /dev/null +++ b/tests/large/conf/eventgen_replay_csv_with_tz.conf @@ -0,0 +1,11 @@ +[timezone] +sampleDir = ../sample +mode = replay +sampletype = csv +outputMode = stdout +timezone = -0100 +timeField = _raw + +token.0.token = \d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2},\d{3,6} +token.0.replacementType = timestamp +token.0.replacement = %Y-%m-%dT%H:%M:%S,%f diff --git a/tests/large/sample/timezone.csv b/tests/large/sample/timezone.csv new file mode 100644 index 00000000..7591d079 --- /dev/null +++ b/tests/large/sample/timezone.csv @@ -0,0 +1,5 @@ +_time,_raw,index,host,source,sourcetype +"2015-08-18 16:28:54,569","2015-08-18T16:28:54,569 INFO streams_utils:24 - utils::readAsJson:: /usr/local/bamboo/itsi-demo/local/splunk/etc/apps/splunk_app_stream/local/apps",_internal,host5.foobar.com,/usr/local/bamboo/itsi-demo/local/splunk/var/log/splunk/splunk_app_stream.log,splunk_app_stream.log +"2015-08-18 16:28:54,568","2015-08-18T16:28:54,568 INFO streams_utils:74 - create dir /usr/local/bamboo/itsi-demo/local/splunk/etc/apps/splunk_app_stream/local/",_internal,host5.foobar.com,/usr/local/bamboo/itsi-demo/local/splunk/var/log/splunk/splunk_app_stream.log,splunk_app_stream.log +"2015-08-18 16:28:52,270","2015-08-18T16:28:52,270 ERROR pid=16324 tid=MainThread file=__init__.py:execute:957 | Execution failed: [HTTP 401] Client is not authenticated",_internal,host5.foobar.com,/usr/local/bamboo/itsi-demo/local/splunk/var/log/splunk/python_modular_input.log,python_modular_input +"2015-08-18 16:28:52,247","2015-08-18T16:28:52,247 INFO pid=16324 tid=MainThread file=__init__.py:execute:906 | Execute called",_internal,host5.foobar.com,/usr/local/bamboo/itsi-demo/local/splunk/var/log/splunk/python_modular_input.log,python_modular_input diff --git a/tests/large/test_mode_replay.py b/tests/large/test_mode_replay.py index d964101d..99821d99 100644 --- a/tests/large/test_mode_replay.py +++ b/tests/large/test_mode_replay.py @@ -1,4 +1,4 @@ -from datetime import datetime +from datetime import datetime, timedelta import re import time @@ -77,3 +77,15 @@ def test_mode_replay_csv(eventgen_test_helper): events = eventgen_test_helper('eventgen_replay_csv.conf').get_events() # assert the events equals to the sample csv file assert len(events) == 10 + + +def test_mode_replay_with_timezone(eventgen_test_helper): + """Test normal replay mode with sampletype = csv which will get _raw row from the sample""" + events = eventgen_test_helper('eventgen_replay_csv_with_tz.conf').get_events() + # assert the events equals to the sample csv file + assert len(events) == 4 + now_ts = datetime.utcnow() + timedelta(hours=-1) + for event in events: + event_ts = datetime.strptime(event.split(' ')[0], '%Y-%m-%dT%H:%M:%S,%f') + d = now_ts - event_ts + assert d.seconds < 60, 'timestamp with timezone check fails.' From e36b1f883d6f7da4725f71e47172d27f6d254663 Mon Sep 17 00:00:00 2001 From: Li Wu Date: Tue, 8 Oct 2019 10:57:19 +0800 Subject: [PATCH 12/53] Using multiprocess pool to address the OOM issue (#301) * Using multiprocess pool to address the OOM issue * Fix test case fail --- .../eventgen_server_api.py | 2 +- splunk_eventgen/eventgen_core.py | 110 +++++------------- splunk_eventgen/lib/eventgentimer.py | 18 ++- 3 files changed, 48 insertions(+), 82 deletions(-) diff --git a/splunk_eventgen/eventgen_api_server/eventgen_server_api.py b/splunk_eventgen/eventgen_api_server/eventgen_server_api.py index 02fb882f..58ae238b 100644 --- a/splunk_eventgen/eventgen_api_server/eventgen_server_api.py +++ b/splunk_eventgen/eventgen_api_server/eventgen_server_api.py @@ -392,7 +392,7 @@ def stop(self, force_stop=False): response = {} if self.eventgen.eventgen_core_object.check_running(): try: - self.eventgen.eventgen_core_object.stop(force_stop=force_stop) + self.eventgen.eventgen_core_object.stop() except: pass response['message'] = "Eventgen is stopped." diff --git a/splunk_eventgen/eventgen_core.py b/splunk_eventgen/eventgen_core.py index 70a93d0a..3db57660 100644 --- a/splunk_eventgen/eventgen_core.py +++ b/splunk_eventgen/eventgen_core.py @@ -6,9 +6,9 @@ import os import sys import time -import signal from Queue import Empty, Queue from threading import Thread +import multiprocessing from lib.eventgenconfig import Config from lib.eventgenexceptions import PluginNotLoaded @@ -140,7 +140,7 @@ def _load_custom_plugins(self, PluginNotLoadedException): # APPPERF-263: be greedy when scanning plugin dir (eat all the pys) self._initializePlugins(plugindir, pluginsdict, plugintype) - def _setup_pools(self, generator_worker_count): + def _setup_pools(self, generator_worker_count=20): ''' This method is an internal method called on init to generate pools needed for processing. @@ -150,7 +150,8 @@ def _setup_pools(self, generator_worker_count): self._create_generator_pool() self._create_timer_threadpool() self._create_output_threadpool() - self._create_generator_workers(generator_worker_count) + if self.args.multiprocess: + self.pool = multiprocessing.Pool(generator_worker_count, maxtasksperchild=1000000) def _create_timer_threadpool(self, threadcount=100): ''' @@ -162,11 +163,13 @@ def _create_timer_threadpool(self, threadcount=100): ''' self.sampleQueue = Queue(maxsize=0) num_threads = threadcount + self.timer_thread_pool = [] for i in range(num_threads): worker = Thread(target=self._worker_do_work, args=( self.sampleQueue, self.loggingQueue, ), name="TimeThread{0}".format(i)) + self.timer_thread_pool.append(worker) worker.setDaemon(True) worker.start() @@ -185,11 +188,13 @@ def _create_output_threadpool(self, threadcount=1): else: self.outputQueue = Queue(maxsize=500) num_threads = threadcount + self.output_thread_pool = [] for i in range(num_threads): worker = Thread(target=self._worker_do_work, args=( self.outputQueue, self.loggingQueue, ), name="OutputThread{0}".format(i)) + self.output_thread_pool.append(worker) worker.setDaemon(True) worker.start() @@ -202,8 +207,7 @@ def _create_generator_pool(self, workercount=20): has over 10 generators working, additional samples won't run until the first ones end. :return: ''' - if self.args.multiprocess: - import multiprocessing + if self.args.multiprocess: self.manager = multiprocessing.Manager() if self.config.disableLoggingQueue: self.loggingQueue = None @@ -234,22 +238,6 @@ def _create_generator_pool(self, workercount=20): worker.setDaemon(True) worker.start() - def _create_generator_workers(self, workercount=20): - if self.args.multiprocess: - import multiprocessing - self.workerPool = [] - for worker in xrange(workercount): - # builds a list of tuples to use the map function - process = multiprocessing.Process(target=self._proc_worker_do_work, args=( - self.workerQueue, - self.loggingQueue, - self.genconfig, - )) - self.workerPool.append(process) - process.start() - else: - pass - def _setup_loggers(self, args=None): self.logger = logger self.loggingQueue = None @@ -293,37 +281,6 @@ def _generator_do_work(self, work_queue, logging_queue, output_counter=None): self.logger.exception(str(e)) raise e - @staticmethod - def _proc_worker_do_work(work_queue, logging_queue, config): - genconfig = config - stopping = genconfig['stopping'] - root = logging.getLogger() - root.setLevel(logging.DEBUG) - if logging_queue is not None: - # TODO https://github.com/splunk/eventgen/issues/217 - qh = logutils.queue.QueueHandler(logging_queue) - root.addHandler(qh) - else: - root.addHandler(logging.StreamHandler()) - while not stopping: - try: - root.info("Checking for work") - item = work_queue.get(timeout=10) - item.logger = root - item._out.updateConfig(item.config) - item.run() - work_queue.task_done() - stopping = genconfig['stopping'] - item.logger.debug("Current Worker Stopping: {0}".format(stopping)) - except Empty: - stopping = genconfig['stopping'] - except Exception as e: - root.exception(e) - raise e - else: - root.info("Stopping Process") - sys.exit(0) - def logger_thread(self, loggingQueue): while not self.stopping: try: @@ -426,8 +383,12 @@ def start(self, join_after_start=True): self.logger.info("Creating timer object for sample '%s' in app '%s'" % (s.name, s.app)) # This is where the timer is finally sent to a queue to be processed. Needs to move to this object. try: - t = Timer(1.0, sample=s, config=self.config, genqueue=self.workerQueue, - outputqueue=self.outputQueue, loggingqueue=self.loggingQueue) + if self.args.multiprocess: + t = Timer(1.0, sample=s, config=self.config, genqueue=self.workerQueue, + outputqueue=self.outputQueue, loggingqueue=self.loggingQueue, pool=self.pool) + else: + t = Timer(1.0, sample=s, config=self.config, genqueue=self.workerQueue, + outputqueue=self.outputQueue, loggingqueue=self.loggingQueue) except PluginNotLoaded as pnl: self._load_custom_plugins(pnl) t = Timer(1.0, sample=s, config=self.config, genqueue=self.workerQueue, @@ -460,6 +421,12 @@ def stop(self, force_stop=False): self.stopping = True self.force_stop = force_stop + # join timer thread and output thread + for output_thread in self.output_thread_pool: + output_thread.join() + for timer_thread in self.timer_thread_pool: + timer_thread.join() + self.logger.info("All timers exited, joining generation queue until it's empty.") if force_stop: self.logger.info("Forcibly stopping Eventgen: Deleting workerQueue.") @@ -472,18 +439,9 @@ def stop(self, force_stop=False): self.kill_processes() else: self.genconfig["stopping"] = True - for worker in self.workerPool: - count = 0 - # We wait for a minute until terminating the worker - while worker.exitcode is None and count != 20: - if count == 30: - self.logger.info("Terminating worker {0}".format(worker._name)) - worker.terminate() - count = 0 - break - self.logger.info("Worker {0} still working, waiting for it to finish.".format(worker._name)) - time.sleep(2) - count += 1 + self.pool.close() + self.pool.join() + self.logger.info("All generators working/exited, joining output queue until it's empty.") if not self.args.multiprocess and not force_stop: self.outputQueue.join() @@ -531,17 +489,13 @@ def check_done(self): :return: if eventgen jobs are finished, return True else False ''' - return self.sampleQueue.empty() and self.sampleQueue.unfinished_tasks <= 0 and self.workerQueue.empty() and self.workerQueue.unfinished_tasks <= 0 + return self.sampleQueue.empty() and self.sampleQueue.unfinished_tasks <= 0 and \ + self.workerQueue.empty() and self.workerQueue.unfinished_tasks <= 0 def kill_processes(self): - try: - if self.args.multiprocess: - for worker in self.workerPool: - try: os.kill(int(worker.pid), signal.SIGKILL) - except: continue - del self.outputQueue - self.manager.shutdown() - except: - pass - - \ No newline at end of file + if self.args.multiprocess and hasattr(self, "pool"): + self.pool.close() + self.pool.terminate() + self.pool.join() + del self.outputQueue + self.manager.shutdown() diff --git a/splunk_eventgen/lib/eventgentimer.py b/splunk_eventgen/lib/eventgentimer.py index 5e3c8405..b802d9c9 100644 --- a/splunk_eventgen/lib/eventgentimer.py +++ b/splunk_eventgen/lib/eventgentimer.py @@ -26,7 +26,7 @@ class Timer(object): countdown = None # Added by CS 5/7/12 to emulate threading.Timer - def __init__(self, time, sample=None, config=None, genqueue=None, outputqueue=None, loggingqueue=None): + def __init__(self, time, sample=None, config=None, genqueue=None, outputqueue=None, loggingqueue=None, pool=None): # Logger already setup by config, just get an instance # setup default options self.profiler = config.profiler @@ -36,6 +36,7 @@ def __init__(self, time, sample=None, config=None, genqueue=None, outputqueue=No self.endts = getattr(self.sample, "endts", None) self.generatorQueue = genqueue self.outputQueue = outputqueue + self.pool = pool self.time = time self.stopping = False self.countdown = 0 @@ -141,7 +142,10 @@ def real_run(self): genPlugin.updateConfig(config=self.config, outqueue=self.outputQueue) genPlugin.updateCounts(count=count, start_time=et, end_time=lt) try: - self.generatorQueue.put(genPlugin, True, 3) + if self.pool is not None: + self.pool.apply_async(run_task, args=(genPlugin,)) + else: + self.generatorQueue.put(genPlugin, True, 3) self.executions += 1 backfillearliest = lt except Full: @@ -189,7 +193,11 @@ def real_run(self): genPlugin.updateCounts(count=count, start_time=et, end_time=lt) try: - self.generatorQueue.put(genPlugin) + if self.pool is not None: + self.pool.apply_async(run_task, args=(genPlugin,)) + else: + self.generatorQueue.put(genPlugin) + logger.debug(("Worker# {0}: Put {1} MB of events in queue for sample '{2}'" + "with et '{3}' and lt '{4}'").format( worker_id, round((count / 1024.0 / 1024), 4), @@ -231,3 +239,7 @@ def real_run(self): else: time.sleep(self.time) self.countdown -= self.time + + +def run_task(generator_plugin): + generator_plugin.run() From 48c25c41d82467f987a2130047252ca55a8b217c Mon Sep 17 00:00:00 2001 From: Jack Meixensperger Date: Tue, 8 Oct 2019 00:12:21 -0700 Subject: [PATCH 13/53] Remove workerQueue unfinished tasks (#302) --- splunk_eventgen/eventgen_core.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/splunk_eventgen/eventgen_core.py b/splunk_eventgen/eventgen_core.py index 3db57660..b5c8cf6d 100644 --- a/splunk_eventgen/eventgen_core.py +++ b/splunk_eventgen/eventgen_core.py @@ -489,8 +489,7 @@ def check_done(self): :return: if eventgen jobs are finished, return True else False ''' - return self.sampleQueue.empty() and self.sampleQueue.unfinished_tasks <= 0 and \ - self.workerQueue.empty() and self.workerQueue.unfinished_tasks <= 0 + return self.sampleQueue.empty() and self.sampleQueue.unfinished_tasks <= 0 and self.workerQueue.empty() def kill_processes(self): if self.args.multiprocess and hasattr(self, "pool"): From 0b8e3f8c18850f19393aca02b57e0df827a6ecce Mon Sep 17 00:00:00 2001 From: Lynch Wu Date: Tue, 8 Oct 2019 16:04:30 +0800 Subject: [PATCH 14/53] Bumped version to 6.5.2 --- splunk_eventgen/version.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/splunk_eventgen/version.json b/splunk_eventgen/version.json index ac6c6837..d04847bd 100644 --- a/splunk_eventgen/version.json +++ b/splunk_eventgen/version.json @@ -1 +1 @@ -{"version": "6.5.0"} +{"version": "6.5.2"} From 684d1f48bce503484dae026d9578346a07a83639 Mon Sep 17 00:00:00 2001 From: Tony Lee Date: Wed, 9 Oct 2019 14:29:29 -0700 Subject: [PATCH 15/53] controller fix (#304) * controller fix --- requirements.txt | 3 +- .../eventgen_controller_api.py | 191 ++++++------------ .../eventgen_server_api.py | 28 +-- 3 files changed, 83 insertions(+), 139 deletions(-) diff --git a/requirements.txt b/requirements.txt index e64a108c..7cb24a57 100644 --- a/requirements.txt +++ b/requirements.txt @@ -22,4 +22,5 @@ yapf>=0.26.0 isort>=4.3.15 Flask>=1.0.3 redis==3.2.1 -structlog==19.1.0 \ No newline at end of file +structlog==19.1.0 +uuid \ No newline at end of file diff --git a/splunk_eventgen/eventgen_api_server/eventgen_controller_api.py b/splunk_eventgen/eventgen_api_server/eventgen_controller_api.py index cd0f2cd2..e16be6a6 100644 --- a/splunk_eventgen/eventgen_api_server/eventgen_controller_api.py +++ b/splunk_eventgen/eventgen_api_server/eventgen_controller_api.py @@ -6,6 +6,7 @@ import json import requests import logging +import uuid INTERNAL_ERROR_RESPONSE = json.dumps({"message": "Internal Error Occurred"}) @@ -20,33 +21,47 @@ def __init__(self, redis_connector, host): self.logger.info("Initialized the EventgenControllerAPI Blueprint") self.interval = 0.001 + + self.server_responses = {} def get_blueprint(self): return self.bp def __create_blueprint(self): bp = Blueprint('api', __name__) - - def format_message(job, request_method, body=None, target='all'): - return json.dumps({'job': job, 'target': target, 'body': body, 'request_method': request_method}) - def gather_response(response_number_target=0): - response = {} + def publish_message(job, request_method, body=None, target="all"): + message_uuid = str(uuid.uuid4()) + formatted_message = json.dumps({'job': job, 'target': target, 'body': body, 'request_method': request_method, 'message_uuid': message_uuid}) + self.redis_connector.message_connection.publish(self.redis_connector.servers_channel, formatted_message) + self.logger.info("Published {}".format(formatted_message)) + return message_uuid + + def gather_response(target_job, message_uuid, response_number_target=0): if not response_number_target: response_number_target = int(self.redis_connector.message_connection.pubsub_numsub(self.redis_connector.servers_channel)[0][1]) - response_num = 0 - countdown = 60 / self.interval - for i in range(0, int(countdown)): + if target_job == 'bundle': + countdown = 120 + elif target_job == 'status': + countdown = 15 + else: + countdown = 5 + for i in range(0, int(countdown / self.interval)): + response_num = len(self.server_responses.get(message_uuid, {}).keys()) if response_num == response_number_target: break else: time.sleep(self.interval) message = self.redis_connector.pubsub.get_message() if message and type(message.get('data')) == str: - status_response = json.loads(message.get('data')) - response[status_response['host']] = status_response['response'] - response_num += 1 - return response + server_response = json.loads(message.get('data')) + self.logger.info(server_response) + message_uuid = server_response.get('message_uuid') + if message_uuid: + if message_uuid not in self.server_responses: + self.server_responses[message_uuid] = {} + self.server_responses[message_uuid][server_response['host']] = server_response['response'] + return self.server_responses.get(message_uuid, {}) @bp.route('/index', methods=['GET']) def index(): @@ -56,169 +71,95 @@ def index(): You are running Eventgen Controller.\n''' host = self.host return home_page.format(host, self.redis_connector.get_registered_servers()) - - @bp.route('/status', methods=['GET']) - def http_all_status(): - try: - self.redis_connector.message_connection.publish(self.redis_connector.servers_channel, format_message('status', request.method, target='all')) - return Response(json.dumps(gather_response()), mimetype='application/json', status=200) - except Exception as e: - self.logger.error(e) - return Response(INTERNAL_ERROR_RESPONSE, mimetype='application/json', status=500) + @bp.route('/status', methods=['GET'], defaults={'target': 'all'}) @bp.route('/status/', methods=['GET']) - def http_target_status(target): + def http_status(target): try: - self.redis_connector.message_connection.publish(self.redis_connector.servers_channel, format_message('status', request.method, target=target)) - return Response(json.dumps(gather_response(response_number_target=1)), mimetype='application/json', status=200) - except Exception as e: - self.logger.error(e) - return Response(INTERNAL_ERROR_RESPONSE, mimetype='application/json', status=500) - - @bp.route('/conf', methods=['GET', 'POST', 'PUT']) - def http_all_conf(): - try: - body = None if request.method == 'GET' else request.get_json(force=True) - self.redis_connector.message_connection.publish(self.redis_connector.servers_channel, format_message('conf', request.method, body=body, target='all')) - return Response(json.dumps(gather_response()), mimetype='application/json', status=200) + message_uuid = publish_message('status', request.method, target=target) + return Response(json.dumps(gather_response('status', message_uuid=message_uuid, response_number_target=0 if target == 'all' else 1)), mimetype='application/json', status=200) except Exception as e: self.logger.error(e) return Response(INTERNAL_ERROR_RESPONSE, mimetype='application/json', status=500) - + + @bp.route('/conf', methods=['GET', 'POST', 'PUT'], defaults={'target': 'all'}) @bp.route('/conf/', methods=['GET', 'POST', 'PUT']) - def http_target_conf(target): + def http_conf(target): try: body = None if request.method == 'GET' else request.get_json(force=True) - self.redis_connector.message_connection.publish(self.redis_connector.servers_channel, format_message('conf', request.method, body=body, target=target)) - return Response(json.dumps(gather_response(response_number_target=1)), mimetype='application/json', status=200) - except Exception as e: - self.logger.error(e) - return Response(INTERNAL_ERROR_RESPONSE, mimetype='application/json', status=500) - - @bp.route('/bundle', methods=['POST']) - def http_all_bundle(): - try: - self.redis_connector.message_connection.publish(self.redis_connector.servers_channel, format_message('bundle', request.method, body=request.get_json(force=True), target='all')) - return Response(json.dumps(gather_response()), mimetype='application/json', status=200) + message_uuid = publish_message('conf', request.method, body=body, target=target) + return Response(json.dumps(gather_response('conf', message_uuid=message_uuid, response_number_target=0 if target == 'all' else 1)), mimetype='application/json', status=200) except Exception as e: self.logger.error(e) return Response(INTERNAL_ERROR_RESPONSE, mimetype='application/json', status=500) - + + @bp.route('/bundle', methods=['POST'], defaults={'target': 'all'}) @bp.route('/bundle/', methods=['POST']) - def http_target_bundle(target): - try: - self.redis_connector.message_connection.publish(self.redis_connector.servers_channel, format_message('bundle', request.method, body=request.get_json(force=True), target=target)) - return Response(json.dumps(gather_response(response_number_target=1)), mimetype='application/json', status=200) - except Exception as e: - self.logger.error(e) - return Response(INTERNAL_ERROR_RESPONSE, mimetype='application/json', status=500) - - @bp.route('/setup', methods=['POST']) - def http_all_setup(): + def http_bundle(target): try: - self.redis_connector.message_connection.publish(self.redis_connector.servers_channel, format_message('setup', request.method, body=request.get_json(force=True), target='all')) - return Response(json.dumps(gather_response()), mimetype='application/json', status=200) + message_uuid = publish_message('bundle', request.method, body=request.get_json(force=True), target=target) + return Response(json.dumps(gather_response('bundle', message_uuid=message_uuid, response_number_target=0 if target == 'all' else 1)), mimetype='application/json', status=200) except Exception as e: self.logger.error(e) return Response(INTERNAL_ERROR_RESPONSE, mimetype='application/json', status=500) + @bp.route('/setup', methods=['POST'], defaults={'target': 'all'}) @bp.route('/setup/', methods=['POST']) - def http_target_setup(target): + def http_setup(target): try: - self.redis_connector.message_connection.publish(self.redis_connector.servers_channel, format_message('setup', request.method, body=request.get_json(force=True), target=target)) - return Response(json.dumps(gather_response(response_number_target=1)), mimetype='application/json', status=200) - except Exception as e: - self.logger.error(e) - return Response(INTERNAL_ERROR_RESPONSE, mimetype='application/json', status=500) - - @bp.route('/volume', methods=['GET', 'POST']) - def http_all_volume(): - try: - body = None if request.method == 'GET' else request.get_json(force=True) - self.redis_connector.message_connection.publish(self.redis_connector.servers_channel, format_message('volume', request.method, body=body, target='all')) - return Response(json.dumps(gather_response()), mimetype='application/json', status=200) + message_uuid = publish_message('setup', request.method, body=request.get_json(force=True), target=target) + return Response(json.dumps(gather_response('setup', message_uuid=message_uuid, response_number_target=0 if target == 'all' else 1)), mimetype='application/json', status=200) except Exception as e: self.logger.error(e) return Response(INTERNAL_ERROR_RESPONSE, mimetype='application/json', status=500) + @bp.route('/volume', methods=['GET', 'POST'], defaults={'target': 'all'}) @bp.route('/volume/', methods=['GET', 'POST']) - def http_target_volume(target): + def http_volume(target): try: body = None if request.method == 'GET' else request.get_json(force=True) - self.redis_connector.message_connection.publish(self.redis_connector.servers_channel, format_message('volume', request.method, body=body, target=target)) - return Response(json.dumps(gather_response(response_number_target=1)), mimetype='application/json', status=200) - except Exception as e: - self.logger.error(e) - return Response(INTERNAL_ERROR_RESPONSE, mimetype='application/json', status=500) - - @bp.route('/start', methods=['POST']) - def http_all_start(): - try: - self.redis_connector.message_connection.publish(self.redis_connector.servers_channel, format_message('start', request.method, target='all')) - return Response(json.dumps(gather_response()), mimetype='application/json', status=200) + message_uuid = publish_message('volume', request.method, body=body, target=target) + return Response(json.dumps(gather_response('volume', message_uuid=message_uuid, response_number_target=0 if target == 'all' else 1)), mimetype='application/json', status=200) except Exception as e: self.logger.error(e) return Response(INTERNAL_ERROR_RESPONSE, mimetype='application/json', status=500) + @bp.route('/start', methods=['POST'], defaults={'target': 'all'}) @bp.route('/start/', methods=['POST']) - def http_target_start(target): - try: - self.redis_connector.message_connection.publish(self.redis_connector.servers_channel, format_message('start', request.method, target=target)) - return Response(json.dumps(gather_response(response_number_target=1)), mimetype='application/json', status=200) - except Exception as e: - self.logger.error(e) - return Response(INTERNAL_ERROR_RESPONSE, mimetype='application/json', status=500) - - @bp.route('/stop', methods=['POST']) - def http_all_stop(): + def http_start(target): try: - self.redis_connector.message_connection.publish(self.redis_connector.servers_channel, format_message('stop', request.method, target='all')) - return Response(json.dumps(gather_response()), mimetype='application/json', status=200) + message_uuid = publish_message('start', request.method, target=target) + return Response(json.dumps(gather_response('start', message_uuid=message_uuid, response_number_target=0 if target == 'all' else 1)), mimetype='application/json', status=200) except Exception as e: self.logger.error(e) return Response(INTERNAL_ERROR_RESPONSE, mimetype='application/json', status=500) - + + @bp.route('/stop', methods=['POST'], defaults={'target': 'all'}) @bp.route('/stop/', methods=['POST']) - def http_target_stop(target): - try: - self.redis_connector.message_connection.publish(self.redis_connector.servers_channel, format_message('stop', request.method, target=target)) - return Response(json.dumps(gather_response(response_number_target=1)), mimetype='application/json', status=200) - except Exception as e: - self.logger.error(e) - return Response(INTERNAL_ERROR_RESPONSE, mimetype='application/json', status=500) - - @bp.route('/restart', methods=['POST']) - def http_all_restart(): + def http_stop(target): try: - self.redis_connector.message_connection.publish(self.redis_connector.servers_channel, format_message('restart', request.method, target='all')) - return Response(json.dumps(gather_response()), mimetype='application/json', status=200) + message_uuid = publish_message('stop', request.method, target=target) + return Response(json.dumps(gather_response('stop', message_uuid=message_uuid, response_number_target=0 if target == 'all' else 1)), mimetype='application/json', status=200) except Exception as e: self.logger.error(e) return Response(INTERNAL_ERROR_RESPONSE, mimetype='application/json', status=500) + @bp.route('/restart', methods=['POST'], defaults={'target': 'all'}) @bp.route('/restart/', methods=['POST']) - def http_target_restart(target): - try: - self.redis_connector.message_connection.publish(self.redis_connector.servers_channel, format_message('restart', request.method, target=target)) - return Response(json.dumps(gather_response(response_number_target=1)), mimetype='application/json', status=200) - except Exception as e: - self.logger.error(e) - return Response(INTERNAL_ERROR_RESPONSE, mimetype='application/json', status=500) - - @bp.route('/reset', methods=['POST']) - def http_all_reset(): + def http_restart(target): try: - self.redis_connector.message_connection.publish(self.redis_connector.servers_channel, format_message('reset', request.method, target='all')) - return Response(json.dumps(gather_response()), mimetype='application/json', status=200) + message_uuid = publish_message('restart', request.method, target=target) + return Response(json.dumps(gather_response('restart', message_uuid=message_uuid, response_number_target=0 if target == 'all' else 1)), mimetype='application/json', status=200) except Exception as e: self.logger.error(e) return Response(INTERNAL_ERROR_RESPONSE, mimetype='application/json', status=500) + @bp.route('/reset', methods=['POST'], defaults={'target': 'all'}) @bp.route('/reset/', methods=['POST']) - def http_target_reset(target): + def http_reset(target): try: - self.redis_connector.message_connection.publish(self.redis_connector.servers_channel, format_message('reset', request.method, target=target)) - return Response(json.dumps(gather_response(response_number_target=1)), mimetype='application/json', status=200) + message_uuid = publish_message('reset', request.method, target=target) + return Response(json.dumps(gather_response('reset', message_uuid=message_uuid, response_number_target=0 if target == 'all' else 1)), mimetype='application/json', status=200) except Exception as e: self.logger.error(e) return Response(INTERNAL_ERROR_RESPONSE, mimetype='application/json', status=500) diff --git a/splunk_eventgen/eventgen_api_server/eventgen_server_api.py b/splunk_eventgen/eventgen_api_server/eventgen_server_api.py index 58ae238b..4f09db98 100644 --- a/splunk_eventgen/eventgen_api_server/eventgen_server_api.py +++ b/splunk_eventgen/eventgen_api_server/eventgen_server_api.py @@ -52,7 +52,7 @@ def start_listening(self): data = json.loads(message.get('data')) self.logger.info("Message Recieved {}".format(message['data'])) if data['target'] == 'all' or data['target'] == self.host: - thread = threading.Thread(target=self._delegate_jobs, args=(data.get('job'), data.get('request_method'), data.get('body'))) + thread = threading.Thread(target=self._delegate_jobs, args=(data.get('job'), data.get('request_method'), data.get('body'), data.get('message_uuid'))) thread.daemon = True thread.start() time.sleep(self.interval) @@ -60,45 +60,47 @@ def start_listening(self): thread.daemon = True thread.start() - def format_message(self, job, request_method, response): - return json.dumps({'job': job, 'request_method': request_method, 'response': response, 'host': self.host}) + def format_message(self, job, request_method, response, message_uuid): + return json.dumps({'job': job, 'request_method': request_method, 'response': response, 'host': self.host, 'message_uuid': message_uuid}) - def _delegate_jobs(self, job, request_method, body): + def _delegate_jobs(self, job, request_method, body, message_uuid): if not job: return else: + self.logger.info("Deleted {} {} {} {}".format(job, request_method, body, message_uuid)) if job == 'status': response = self.get_status() - self.redis_connector.message_connection.publish(self.redis_connector.controller_channel, self.format_message('status', request_method, response=response)) + message = self.format_message('status', request_method, response=response, message_uuid=message_uuid) + self.redis_connector.message_connection.publish(self.redis_connector.controller_channel, message) elif job == 'conf': if request_method == 'POST': self.set_conf(body) elif request_method == 'PUT': self.edit_conf(body) - self.redis_connector.message_connection.publish(self.redis_connector.controller_channel, self.format_message('conf', request_method, response=self.get_conf())) + self.redis_connector.message_connection.publish(self.redis_connector.controller_channel, self.format_message('conf', request_method, response=self.get_conf(), message_uuid=message_uuid)) elif job == 'bundle': self.set_bundle(body.get("url", '')) - self.redis_connector.message_connection.publish(self.redis_connector.controller_channel, self.format_message('bundle', request_method, response=self.get_conf())) + self.redis_connector.message_connection.publish(self.redis_connector.controller_channel, self.format_message('bundle', request_method, response=self.get_conf(), message_uuid=message_uuid)) elif job == 'setup': self.clean_bundle_conf() self.setup_http(body) - self.redis_connector.message_connection.publish(self.redis_connector.controller_channel, self.format_message('setup', request_method, response=self.get_conf())) + self.redis_connector.message_connection.publish(self.redis_connector.controller_channel, self.format_message('setup', request_method, response=self.get_conf(), message_uuid=message_uuid)) elif job == 'volume': if request_method == 'POST': self.set_volume(body.get("perDayVolume", 0.0)) - self.redis_connector.message_connection.publish(self.redis_connector.controller_channel, self.format_message('volume', request_method, response=self.get_volume())) + self.redis_connector.message_connection.publish(self.redis_connector.controller_channel, self.format_message('volume', request_method, response=self.get_volume(), message_uuid=message_uuid)) elif job == 'start': - self.redis_connector.message_connection.publish(self.redis_connector.controller_channel, self.format_message('start', request_method, response=self.start())) + self.redis_connector.message_connection.publish(self.redis_connector.controller_channel, self.format_message('start', request_method, response=self.start(), message_uuid=message_uuid)) elif job == 'stop': message = {'message': 'Eventgen is stopping. Might take some time to terminate all processes.'} - self.redis_connector.message_connection.publish(self.redis_connector.controller_channel, self.format_message('stop', request_method, response=message)) + self.redis_connector.message_connection.publish(self.redis_connector.controller_channel, self.format_message('stop', request_method, response=message, message_uuid=message_uuid)) self.stop(force_stop=True) elif job == 'restart': message = {'message': 'Eventgen is restarting. Might take some time to restart.'} - self.redis_connector.message_connection.publish(self.redis_connector.controller_channel, self.format_message('restart', request_method, response=message)) + self.redis_connector.message_connection.publish(self.redis_connector.controller_channel, self.format_message('restart', request_method, response=message, message_uuid=message_uuid)) self.restart() elif job == 'reset': message = {'message': 'Eventgen is resetting. Might take some time to reset.'} - self.redis_connector.message_connection.publish(self.redis_connector.controller_channel, self.format_message('reset', request_method, response=message)) + self.redis_connector.message_connection.publish(self.redis_connector.controller_channel, self.format_message('reset', request_method, response=message, message_uuid=message_uuid)) self.reset() From 8dfcd1e10aa6da15555ed090e9fae20f92875e12 Mon Sep 17 00:00:00 2001 From: Tony Lee Date: Thu, 10 Oct 2019 13:05:05 -0700 Subject: [PATCH 16/53] variable assignment fix (#306) --- .../eventgen_api_server/eventgen_controller_api.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/splunk_eventgen/eventgen_api_server/eventgen_controller_api.py b/splunk_eventgen/eventgen_api_server/eventgen_controller_api.py index e16be6a6..160973ed 100644 --- a/splunk_eventgen/eventgen_api_server/eventgen_controller_api.py +++ b/splunk_eventgen/eventgen_api_server/eventgen_controller_api.py @@ -56,11 +56,11 @@ def gather_response(target_job, message_uuid, response_number_target=0): if message and type(message.get('data')) == str: server_response = json.loads(message.get('data')) self.logger.info(server_response) - message_uuid = server_response.get('message_uuid') - if message_uuid: - if message_uuid not in self.server_responses: - self.server_responses[message_uuid] = {} - self.server_responses[message_uuid][server_response['host']] = server_response['response'] + response_message_uuid = server_response.get('message_uuid') + if response_message_uuid: + if response_message_uuid not in self.server_responses: + self.server_responses[response_message_uuid] = {} + self.server_responses[response_message_uuid][server_response['host']] = server_response['response'] return self.server_responses.get(message_uuid, {}) @bp.route('/index', methods=['GET']) From 25a7f1c0d9ce9d36426a014e688f419299142304 Mon Sep 17 00:00:00 2001 From: David Wang Date: Mon, 14 Oct 2019 13:31:16 -0700 Subject: [PATCH 17/53] add healthcheck endpoint and ping it every half an hour (#308) --- .../eventgen_controller.py | 13 ++++++++++ .../eventgen_controller_api.py | 12 ++++++++- .../eventgen_server_api.py | 25 ++++++++++++++++++- 3 files changed, 48 insertions(+), 2 deletions(-) diff --git a/splunk_eventgen/eventgen_api_server/eventgen_controller.py b/splunk_eventgen/eventgen_api_server/eventgen_controller.py index 57e769d7..08bc78da 100644 --- a/splunk_eventgen/eventgen_api_server/eventgen_controller.py +++ b/splunk_eventgen/eventgen_api_server/eventgen_controller.py @@ -2,6 +2,9 @@ import socket import os import logging +import requests +import time +import threading from eventgen_controller_api import EventgenControllerAPI from redis_connector import RedisConnector @@ -21,6 +24,7 @@ def __init__(self, *args, **kwargs): self.redis_connector.register_myself(hostname=self.host, role=self.role) self._setup_loggers() + self.connections_healthcheck() self.logger = logging.getLogger('eventgen_server') self.logger.info('Initialized Eventgen Controller: hostname [{}]'.format(self.host)) @@ -40,6 +44,15 @@ def index(): return app + def connections_healthcheck(self): + def start_checking(): + while True: + time.sleep(60 * 30) + requests.get("http://{}:{}/healthcheck".format("0.0.0.0", int(self.env_vars.get('WEB_SERVER_PORT')))) + thread = threading.Thread(target=start_checking) + thread.daemon = True + thread.start() + def _setup_loggers(self): log_path = os.path.join(FILE_PATH, 'logs') eventgen_controller_logger_path = os.path.join(LOG_PATH, 'eventgen-controller.log') diff --git a/splunk_eventgen/eventgen_api_server/eventgen_controller_api.py b/splunk_eventgen/eventgen_api_server/eventgen_controller_api.py index 160973ed..5538412e 100644 --- a/splunk_eventgen/eventgen_api_server/eventgen_controller_api.py +++ b/splunk_eventgen/eventgen_api_server/eventgen_controller_api.py @@ -163,7 +163,17 @@ def http_reset(target): except Exception as e: self.logger.error(e) return Response(INTERNAL_ERROR_RESPONSE, mimetype='application/json', status=500) - + + @bp.route('/healthcheck', methods=['GET'], defaults={'target': 'all'}) + @bp.route('/healthcheck/', methods=['GET']) + def http_healthcheck(target): + try: + message_uuid = publish_message('healthcheck', request.method, target=target) + return Response(json.dumps(gather_response('healthcheck', message_uuid=message_uuid, response_number_target=0 if target == 'all' else 1)), mimetype='application/json', status=200) + except Exception as e: + self.logger.error(e) + return Response(INTERNAL_ERROR_RESPONSE, mimetype='application/json', status=500) + return bp def __make_error_response(self, status, message): diff --git a/splunk_eventgen/eventgen_api_server/eventgen_server_api.py b/splunk_eventgen/eventgen_api_server/eventgen_server_api.py index 4f09db98..19bc95be 100644 --- a/splunk_eventgen/eventgen_api_server/eventgen_server_api.py +++ b/splunk_eventgen/eventgen_api_server/eventgen_server_api.py @@ -59,7 +59,7 @@ def start_listening(self): thread = threading.Thread(target=start_listening, args=(self,)) thread.daemon = True thread.start() - + def format_message(self, job, request_method, response, message_uuid): return json.dumps({'job': job, 'request_method': request_method, 'response': response, 'host': self.host, 'message_uuid': message_uuid}) @@ -102,6 +102,10 @@ def _delegate_jobs(self, job, request_method, body, message_uuid): message = {'message': 'Eventgen is resetting. Might take some time to reset.'} self.redis_connector.message_connection.publish(self.redis_connector.controller_channel, self.format_message('reset', request_method, response=message, message_uuid=message_uuid)) self.reset() + elif job == 'healthcheck': + response = self.healthcheck() + message = self.format_message('healthcheck', request_method, response=response, message_uuid=message_uuid) + self.redis_connector.message_connection.publish(self.redis_connector.controller_channel, message) def _create_blueprint(self): @@ -213,6 +217,14 @@ def http_post_setup(): self.logger.error(e) return Response(INTERNAL_ERROR_RESPONSE, mimetype='application/json', status=500) + @bp.route('/healthcheck', methods=['GET']) + def redis_connection_health(): + try: + return Response(json.dumps(self.healthcheck()), mimetype='application/json', status=200) + except Exception as e: + self.logger.error(e) + return Response(INTERNAL_ERROR_RESPONSE, mimetype='application/json', status=500) + return bp def get_index(self): @@ -422,6 +434,17 @@ def reset(self): response['message'] = "Eventgen has been reset." return response + def healthcheck(self): + response = {} + try: + self.redis_connector.pubsub.check_health() + response['message'] = "Connections are healthy" + except Exception as e: + self.logger.error("Connection to Redis failed: {}, re-registering".format(str(e))) + self.redis_connector.register_myself(hostname=self.host, role="server") + response['message'] = "Connections unhealthy - re-established connections" + return response + def set_bundle(self, url): if not url: return From 418eeeb68e3aa0a25d6b8f99ba46ffe915b689d5 Mon Sep 17 00:00:00 2001 From: Li Wu Date: Tue, 15 Oct 2019 04:50:24 +0800 Subject: [PATCH 18/53] Fix security vulnerability issue reported by GitHub (#309) --- docs/Gemfile.lock | 31 ++++++++++++++++--------------- 1 file changed, 16 insertions(+), 15 deletions(-) diff --git a/docs/Gemfile.lock b/docs/Gemfile.lock index 5d1b4e46..d98655a0 100644 --- a/docs/Gemfile.lock +++ b/docs/Gemfile.lock @@ -6,8 +6,8 @@ GEM minitest (~> 5.1) thread_safe (~> 0.3, >= 0.3.4) tzinfo (~> 1.1) - addressable (2.6.0) - public_suffix (>= 2.0.2, < 4.0) + addressable (2.7.0) + public_suffix (>= 2.0.2, < 5.0) coffee-script (2.4.1) coffee-script-source execjs @@ -25,18 +25,18 @@ GEM ffi (>= 1.3.0) eventmachine (1.2.7) execjs (2.7.0) - faraday (0.15.4) + faraday (0.17.0) multipart-post (>= 1.2, < 3) ffi (1.11.1) forwardable-extended (2.6.0) gemoji (3.0.1) - github-pages (198) + github-pages (201) activesupport (= 4.2.11.1) github-pages-health-check (= 1.16.1) jekyll (= 3.8.5) jekyll-avatar (= 0.6.0) jekyll-coffeescript (= 1.1.1) - jekyll-commonmark-ghpages (= 0.1.5) + jekyll-commonmark-ghpages (= 0.1.6) jekyll-default-layout (= 0.1.4) jekyll-feed (= 0.11.0) jekyll-gist (= 1.5.0) @@ -47,7 +47,7 @@ GEM jekyll-readme-index (= 0.2.0) jekyll-redirect-from (= 0.14.0) jekyll-relative-links (= 0.6.0) - jekyll-remote-theme (= 0.3.1) + jekyll-remote-theme (= 0.4.0) jekyll-sass-converter (= 1.5.2) jekyll-seo-tag (= 2.5.0) jekyll-sitemap (= 1.2.0) @@ -72,8 +72,8 @@ GEM listen (= 3.1.5) mercenary (~> 0.3) minima (= 2.5.0) - nokogiri (>= 1.8.5, < 2.0) - rouge (= 2.2.1) + nokogiri (>= 1.10.4, < 2.0) + rouge (= 3.11.0) terminal-table (~> 1.4) github-pages-health-check (1.16.1) addressable (~> 2.3) @@ -108,10 +108,10 @@ GEM jekyll-commonmark (1.3.1) commonmarker (~> 0.14) jekyll (>= 3.7, < 5.0) - jekyll-commonmark-ghpages (0.1.5) + jekyll-commonmark-ghpages (0.1.6) commonmarker (~> 0.17.6) - jekyll-commonmark (~> 1) - rouge (~> 2) + jekyll-commonmark (~> 1.2) + rouge (>= 2.0, < 4.0) jekyll-default-layout (0.1.4) jekyll (~> 3.0) jekyll-feed (0.11.0) @@ -133,7 +133,8 @@ GEM jekyll (~> 3.3) jekyll-relative-links (0.6.0) jekyll (~> 3.3) - jekyll-remote-theme (0.3.1) + jekyll-remote-theme (0.4.0) + addressable (~> 2.0) jekyll (~> 3.5) rubyzip (>= 1.2.1, < 3.0) jekyll-sass-converter (1.5.2) @@ -203,7 +204,7 @@ GEM jekyll (~> 3.5) jekyll-feed (~> 0.9) jekyll-seo-tag (~> 2.1) - minitest (5.11.3) + minitest (5.12.2) multipart-post (2.1.1) nokogiri (1.10.4) mini_portile2 (~> 2.4.0) @@ -215,11 +216,11 @@ GEM rb-fsevent (0.10.3) rb-inotify (0.10.0) ffi (~> 1.0) - rouge (2.2.1) + rouge (3.11.0) ruby-enum (0.7.2) i18n ruby_dep (1.5.0) - rubyzip (1.2.3) + rubyzip (1.3.0) safe_yaml (1.0.5) sass (3.7.4) sass-listen (~> 4.0.0) From b6b4de50a263e1de5263c530d6f330b6f23a2e5d Mon Sep 17 00:00:00 2001 From: David Wang Date: Mon, 14 Oct 2019 14:13:04 -0700 Subject: [PATCH 19/53] make controller also hit redis on healthcheck (#311) --- .../eventgen_api_server/eventgen_controller.py | 5 ++++- .../eventgen_api_server/eventgen_controller_api.py | 10 ++++++++++ .../eventgen_api_server/eventgen_server_api.py | 2 +- 3 files changed, 15 insertions(+), 2 deletions(-) diff --git a/splunk_eventgen/eventgen_api_server/eventgen_controller.py b/splunk_eventgen/eventgen_api_server/eventgen_controller.py index 08bc78da..1b2ae3cd 100644 --- a/splunk_eventgen/eventgen_api_server/eventgen_controller.py +++ b/splunk_eventgen/eventgen_api_server/eventgen_controller.py @@ -48,7 +48,10 @@ def connections_healthcheck(self): def start_checking(): while True: time.sleep(60 * 30) - requests.get("http://{}:{}/healthcheck".format("0.0.0.0", int(self.env_vars.get('WEB_SERVER_PORT')))) + try: + requests.get("http://{}:{}/healthcheck".format("0.0.0.0", int(self.env_vars.get('WEB_SERVER_PORT')))) + except Exception as e: + self.logger.error(str(e)) thread = threading.Thread(target=start_checking) thread.daemon = True thread.start() diff --git a/splunk_eventgen/eventgen_api_server/eventgen_controller_api.py b/splunk_eventgen/eventgen_api_server/eventgen_controller_api.py index 5538412e..4b8d75a9 100644 --- a/splunk_eventgen/eventgen_api_server/eventgen_controller_api.py +++ b/splunk_eventgen/eventgen_api_server/eventgen_controller_api.py @@ -167,6 +167,16 @@ def http_reset(target): @bp.route('/healthcheck', methods=['GET'], defaults={'target': 'all'}) @bp.route('/healthcheck/', methods=['GET']) def http_healthcheck(target): + try: + self.redis_connector.pubsub.check_health() + except Exception as e: + self.logger.info("Connection to Redis failed: {}, re-registering".format(str(e))) + try: + self.redis_connector.register_myself(hostname=self.host, role='controller') + except Exception as connection_error: + self.logger.error(connection_error) + return Response(INTERNAL_ERROR_RESPONSE, mimetype='application/json', status=500) + try: message_uuid = publish_message('healthcheck', request.method, target=target) return Response(json.dumps(gather_response('healthcheck', message_uuid=message_uuid, response_number_target=0 if target == 'all' else 1)), mimetype='application/json', status=200) diff --git a/splunk_eventgen/eventgen_api_server/eventgen_server_api.py b/splunk_eventgen/eventgen_api_server/eventgen_server_api.py index 19bc95be..0c1a9d4a 100644 --- a/splunk_eventgen/eventgen_api_server/eventgen_server_api.py +++ b/splunk_eventgen/eventgen_api_server/eventgen_server_api.py @@ -218,7 +218,7 @@ def http_post_setup(): return Response(INTERNAL_ERROR_RESPONSE, mimetype='application/json', status=500) @bp.route('/healthcheck', methods=['GET']) - def redis_connection_health(): + def http_get_healthcheck(): try: return Response(json.dumps(self.healthcheck()), mimetype='application/json', status=200) except Exception as e: From 5352ac2103acb98a0cdea96d6e068858ba0d51ae Mon Sep 17 00:00:00 2001 From: Tony Lee Date: Wed, 16 Oct 2019 18:08:13 -0700 Subject: [PATCH 20/53] Fixed typo (#313) --- splunk_eventgen/eventgen_api_server/eventgen_server_api.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/splunk_eventgen/eventgen_api_server/eventgen_server_api.py b/splunk_eventgen/eventgen_api_server/eventgen_server_api.py index 0c1a9d4a..37c68f3e 100644 --- a/splunk_eventgen/eventgen_api_server/eventgen_server_api.py +++ b/splunk_eventgen/eventgen_api_server/eventgen_server_api.py @@ -66,7 +66,7 @@ def format_message(self, job, request_method, response, message_uuid): def _delegate_jobs(self, job, request_method, body, message_uuid): if not job: return else: - self.logger.info("Deleted {} {} {} {}".format(job, request_method, body, message_uuid)) + self.logger.info("Delegated {} {} {} {}".format(job, request_method, body, message_uuid)) if job == 'status': response = self.get_status() message = self.format_message('status', request_method, response=response, message_uuid=message_uuid) From f8e44065f5a6d426246a5b1d38d0f23c490222bc Mon Sep 17 00:00:00 2001 From: David Wang Date: Wed, 16 Oct 2019 22:40:52 -0700 Subject: [PATCH 21/53] update redis version to use new functions (#318) --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 7cb24a57..e232e488 100644 --- a/requirements.txt +++ b/requirements.txt @@ -21,6 +21,6 @@ flake8>=3.7.7 yapf>=0.26.0 isort>=4.3.15 Flask>=1.0.3 -redis==3.2.1 +redis==3.3.10 structlog==19.1.0 uuid \ No newline at end of file From f22869a2727e8923565e24b09d984d684e46f5fa Mon Sep 17 00:00:00 2001 From: Li Wu Date: Thu, 17 Oct 2019 14:28:42 +0800 Subject: [PATCH 22/53] Update tutorial related docs (#315) --- docs/BASICS.md | 268 +++++++++--------- docs/CONFIGURE.md | 16 +- docs/SETUP.md | 16 +- docs/TUTORIAL.md | 172 ++++++++--- docs/index.md | 4 +- .../README/eventgen.conf.tutorial0 | 18 ++ .../README/eventgen.conf.tutorial1 | 10 - .../README/eventgen.conf.tutorial2 | 3 +- .../README/eventgen.conf.tutorial3 | 43 +-- .../README/eventgen.conf.tutorial4 | 9 - splunk_eventgen/samples/sample.tutorial0 | 12 + tests/sample_bundle.zip | Bin 0 -> 41702 bytes 12 files changed, 321 insertions(+), 250 deletions(-) create mode 100644 splunk_eventgen/README/eventgen.conf.tutorial0 create mode 100644 splunk_eventgen/samples/sample.tutorial0 create mode 100644 tests/sample_bundle.zip diff --git a/docs/BASICS.md b/docs/BASICS.md index 107c9575..b0834083 100644 --- a/docs/BASICS.md +++ b/docs/BASICS.md @@ -1,7 +1,7 @@ # Welcome Welcome to the basics of Eventgen. -This should hopefully get you through setting up a working eventgen instance. For a complete reference of all of the available configuration options, please check out the [eventgen.conf.spec](REFERENCE.md#eventgenconfspec) in the docs directory. With that, feel free to dig right in, and please post to the Issues page if you have any questions. +This should hopefully get you through setting up a working eventgen instance. For a complete reference of all of the available configuration options, please check out the [eventgen.conf.spec](REFERENCE.md#eventgenconfspec). With that, feel free to dig right in, and please post to the Issues page if you have any questions. ## Replay Example @@ -15,7 +15,7 @@ To build a seed for your new Eventgen, I recommend taking an export from an exis * Third, make sure you find all the different time formats inside the log file and set up tokens to replace for them, so limiting your initial search to a few sourcetypes is probably advisable. ### Running the example -You can easily run these examples by hand. In fact, for testing purposes, I almost always change outputMode = stdout to visually examine the data. Run the command below from directory `$EVENTGEN_HOME/splunk_eventgen`. +You can easily run these examples by hand. In fact, for testing purposes, I almost always change `outputMode = stdout` to visually examine the data. Run the command below from directory `$EVENTGEN_HOME/splunk_eventgen`. python -m splunk_eventgen generate README/eventgen.conf.tutorial1 @@ -32,173 +32,169 @@ Next, lets build a basic noise generator from a log file. This will use sample ### Grabbing and rating events -We have a file in the samples directory called sample.tutorial2 that we'll use as the seed for our event generator. It contains some random noise pulled from Router and Switch logs. It will provide a good basis of showing how we can very quickly take a customer's log file and randomly sample it and make it show up in real time. We won't get too sophisticated with substitutions in this example, just a timestamp, and some more varied interfaces to make it look interesting. +We have a file in the samples directory called `sample.tutorial2` that we'll use as the seed for our event generator. It contains some random noise pulled from Router and Switch logs. It will provide a good basis of showing how we can very quickly take a customer's log file and randomly sample it and make it show up in real time. We won't get too sophisticated with substitutions in this example, just a timestamp, and some more varied interfaces to make it look interesting. When we're defining a new config file, we need to decide which defaults we're going to override. By default for example, we'll rate events by time of day and day of week. Do we want to override that? There's a variety of defaults we should consider. They're listed in the [eventgen.conf.spec](https://github.com/splunk/eventgen/blob/master/README/eventgen.conf.spec) in the README directory for reference. Let's list out the file here and then break down the config directives we've not seen before: - [cisco.sample] - interval = 15 - earliest = -15s - latest = now - count = 20 - hourOfDayRate = { "0": 0.8, "1": 1.0, "2": 0.9, "3": 0.7, "4": 0.5, "5": 0.4, "6": 0.4, "7": 0.4, "8": 0.4, "9": 0.4, "10": 0.4, "11": 0.4, "12": 0.4, "13": 0.4, "14": 0.4, "15": 0.4, "16": 0.4, "17": 0.4, "18": 0.4, "19": 0.4, "20": 0.4, "21": 0.4, "22": 0.5, "23": 0.6 } - dayOfWeekRate = { "0": 0.7, "1": 0.7, "2": 0.7, "3": 0.5, "4": 0.5, "5": 1.0, "6": 1.0 } - randomizeCount = 0.2 - randomizeEvents = true - - outputMode = file - fileName = /tmp/ciscosample.log - - ## Replace timestamp Feb 4 07:52:53 - token.0.token = \w{3}\s+\d{1,2}\s+\d{2}:\d{2}:\d{2} - token.0.replacementType = timestamp - token.0.replacement = %b %d %H:%M:%S +``` +[sample.tutorial2] +interval = 15 +earliest = -15s +latest = now +count = 20 +hourOfDayRate = { "0": 0.8, "1": 1.0, "2": 0.9, "3": 0.7, "4": 0.5, "5": 0.4, "6": 0.4, "7": 0.4, "8": 0.4, "9": 0.4, "10": 0.4, "11": 0.4, "12": 0.4, "13": 0.4, "14": 0.4, "15": 0.4, "16": 0.4, "17": 0.4, "18": 0.4, "19": 0.4, "20": 0.4, "21": 0.4, "22": 0.5, "23": 0.6 } +dayOfWeekRate = { "0": 0.7, "1": 0.7, "2": 0.7, "3": 0.5, "4": 0.5, "5": 1.0, "6": 1.0 } +randomizeCount = 0.2 +randomizeEvents = true + +outputMode = file +fileName = /tmp/ciscosample.log + +## Replace timestamp Feb 4 07:52:53 +token.0.token = \w{3}\s+\d{1,2}\s+\d{2}:\d{2}:\d{2} +token.0.replacementType = timestamp +token.0.replacement = %b %d %H:%M:%S +``` First: - - interval = 15 - earliest = -15s - latest = now +``` +interval = 15 +earliest = -15s +latest = now +``` Let's us decide how often we want to generate events and how we want to generate time stamps for these events. In this case, every 15 seconds should be sufficient, but depending on your use case you may want to generate only once an hour, once every minute, or every second. We'll generally want to set earliest to a value that's equal to a splunk relative time specifier opposite of interval. So, if we set it to an hour, or 3600, we'll want earliest to be -3600s or -1h. For this example, lets generate every 15 seconds. - - count = 20 - hourOfDayRate = { "0": 0.8, "1": 1.0, "2": 0.9, "3": 0.7, "4": 0.5, "5": 0.4, "6": 0.4, "7": 0.4, "8": 0.4, "9": 0.4, "10": 0.4, "11": 0.4, "12": 0.4, "13": 0.4, "14": 0.4, "15": 0.4, "16": 0.4, "17": 0.4, "18": 0.4, "19": 0.4, "20": 0.4, "21": 0.4, "22": 0.5, "23": 0.6 } - dayOfWeekRate = { "0": 0.7, "1": 0.7, "2": 0.7, "3": 0.5, "4": 0.5, "5": 1.0, "6": 1.0 } - randomizeCount = 0.2 - randomizeEvents = true +``` +count = 20 +hourOfDayRate = { "0": 0.8, "1": 1.0, "2": 0.9, "3": 0.7, "4": 0.5, "5": 0.4, "6": 0.4, "7": 0.4, "8": 0.4, "9": 0.4, "10": 0.4, "11": 0.4, "12": 0.4, "13": 0.4, "14": 0.4, "15": 0.4, "16": 0.4, "17": 0.4, "18": 0.4, "19": 0.4, "20": 0.4, "21": 0.4, "22": 0.5, "23": 0.6 } +dayOfWeekRate = { "0": 0.7, "1": 0.7, "2": 0.7, "3": 0.5, "4": 0.5, "5": 1.0, "6": 1.0 } +randomizeCount = 0.2 +randomizeEvents = true +``` Eventgen by default will rate events by the time of day and the day of the week and introduce some randomness every interval. Also by default, we'll only grab the first X events from the log file every time. For this example, we're looking at router and switch events, which actually is the opposite of the normal business flow. We expect to see more events overnight for a few hours during maintenance windows and calm down during the day, so we'll need to override the default rating which looks like a standard business cycle. `hourOfDayRate` is a JSON formatted hash, with a string identifier for the current hour and a float representing the multiplier we want to use for that hour. In general, I've always configured the rate to be between 0 and 1, but nothing limits you from putting it at any valid floating point value. `dayOfWeekRate` is similar, but the number is the day of the week, starting with Sunday. In this example, Saturday and Sunday early mornings should have the greatest number of events, with fewer events evenly distributed during the week. `randomizeCount` says to introduce 20% randomness, which means plus or minus 10% of the rated total, to every rated count just to make sure we don't have a flat rate of events. `randomizeEvents` we discussed previously, it makes sure we don't grab the same lines from the file every time. - outputMode = file - fileName = /tmp/ciscosample.log +``` +outputMode = file +fileName = /tmp/ciscosample.log +``` As you saw with the last example, we can output straight to Splunk, but in this case we're going to do a simple output to file. The file outputMode rotates based on size (by default 10 megabytes) and keeps the most recent 5 files around. - - ## Replace timestamp Feb 4 07:52:53 - token.0.token = \w{3}\s+\d{1,2}\s+\d{2}:\d{2}:\d{2} - token.0.replacementType = timestamp - token.0.replacement = %b %d %H:%M:%S +``` +## Replace timestamp Feb 4 07:52:53 +token.0.token = \w{3}\s+\d{1,2}\s+\d{2}:\d{2}:\d{2} +token.0.replacementType = timestamp +token.0.replacement = %b %d %H:%M:%S +``` As we've seen before, here's a simple token substitution for the timestamp. This will make the events appear to be coming in sometime during the last 15 seconds, based on earliest and latest configs above. -Let's look in detail at this configuration format. token is the configuration statement, 0 is the token number (we'll want a different number for every token we define, although they can be non-contiguous). The third part defines the three subitems of token configuration. The first, token, defines a regular expression we're going to look for in the events as they stream through Eventgen. The second, replacementType, defines what type of replacement we're going to need. This is a timestamp, but we also offer a variety of other token replacement types such as random for randomly generated values, file for grabbing lines out of files, static for replacing with static strings, etc. We'll cover those in detail later. The third subitem, replacement, is specific for the replacementType, and in this case defines a strptime format we're going to use to output the time using strftime. For a reference on how to configure strptime, check python's documentation on strptime format strings. +Let's look in detail at this configuration format. `token` is the configuration statement, `0` is the token number (we'll want a different number for every token we define, although they can be non-contiguous). The third part defines the three subitems of token configuration. The first, `token`, defines a regular expression we're going to look for in the events as they stream through Eventgen. The second, `replacementType`, defines what type of replacement we're going to need. This is a timestamp, but we also offer a variety of other token replacement types such as random for randomly generated values, file for grabbing lines out of files, static for replacing with static strings, etc. We'll cover those in detail later. The third subitem, `replacement`, is specific for the `replacementType`, and in this case defines a strptime format we're going to use to output the time using strftime. For a reference on how to configure strptime, check python's documentation on strptime format strings. -This should now replay random events from the file we have configured. Go ahead and cd to `$EVENTGEN_HOME/splunk_eventgen` and run `python -m splunk_eventgen generate README/eventgen.conf.tutorial1`. In another shell, tail -f /tmp/ciscosample.log and you should see events replaying from the cisco.sample file! You can reuse this same example to easily replay a customer log file, of course accounting for the different regular expressions and strptime formats you'll need for their timestamps. Remember to customize interval, earliest, and count for the number of events you want the generator to build. +This should now replay random events from the file we have configured. Go ahead and cd to `$EVENTGEN_HOME/splunk_eventgen` and run `python -m splunk_eventgen generate README/eventgen.conf.tutorial2`. In another shell, `tail -f /tmp/ciscosample.log` and you should see events replaying from the `sample.tutorial2` file! You can reuse this same example to easily replay a customer log file, of course accounting for the different regular expressions and strptime formats you'll need for their timestamps. Remember to customize `interval`, `earliest`, and `count` for the number of events you want the generator to build. ## Second example, building events from scratch Replaying random events from a file is an easy way to build an eventgen. Sometimes, like in Eventgen we're building for VMware, the events you're modeling are so complicated it's simplest way to do it without investing a lot of time modeling all the tokens you want to subtitute etc. Also, sometimes so many tokens need to move together, it's easiest just to replay the file with new timestamps. However, if we're building a new demo from scratch, a lot of times we want to generate events from a basic template with values we're providing from files. Let's look at an example: - - [sample.tutorial3] - interval = 3 - earliest = -3s - latest = now - count = 10 - hourOfDayRate = { "0": 0.30, "1": 0.10, "2": 0.05, "3": 0.10, "4": 0.15, "5": 0.25, "6": 0.35, "7": 0.50, "8": 0.60, "9": 0.65, "10": 0.70, "11": 0.75, "12": 0.77, "13": 0.80, "14": 0.82, "15": 0.85, "16": 0.87, "17": 0.90, "18": 0.95, "19": 1.0, "20": 0.85, "21": 0.70, "22": 0.60, "23": 0.45 } - dayOfWeekRate = { "0": 0.55, "1": 0.97, "2": 0.95, "3": 0.90, "4": 0.97, "5": 1.0, "6": 0.99 } - randomizeCount = 0.2 - backfill = -1h - backfillSearch = sourcetype=be_log - - outputMode = splunkstream - index=main - host=host1.foobar.com - source=/var/log/be/event.log - sourcetype=be_log - - # Host/User/pass only necessary if running outside of splunk! - splunkHost = localhost - splunkUser = admin - splunkPass = changeme - - token.0.token = \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2},\d{3} - token.0.replacementType = timestamp - token.0.replacement = %Y-%m-%d %H:%M:%S - - token.1.token = transType=(\w+) - token.1.replacementType = file - token.1.replacement = $SPLUNK_HOME/etc/apps/SA-Eventgen/samples/orderType.sample - - token.2.token = transID=(\d+) - token.2.replacementType = integerid - token.2.replacement = 100000 - - token.3.token = transGUID=([0-9a-fA-F]+) - token.3.replacementType = random - token.3.replacement = hex(24) - - token.4.token = userName=(\w+) - token.4.replacementType = file - token.4.replacement = $SPLUNK_HOME/etc/apps/SA-Eventgen/samples/userName.sample - - token.5.token = city="(\w+)" - token.5.replacementType = mvfile - token.5.replacement = $SPLUNK_HOME/etc/apps/SA-Eventgen/samples/markets.sample:2 - - token.6.token = state=(\w+) - token.6.replacementType = mvfile - token.6.replacement = $SPLUNK_HOME/etc/apps/SA-Eventgen/samples/markets.sample:3 - - token.7.token = zip=(\d+) - token.7.replacementType = mvfile - token.7.replacement = $SPLUNK_HOME/etc/apps/SA-Eventgen/samples/markets.sample:1 - -### Output modes - index=main - host=host4.foobar.com - source=eventgen - sourcetype=business_event - -Note here that we've specified index, host, source and sourceType. In the past examples, this has been defined in the actual sample file on a per event basis by specifying sampletype = csv, but here we're reading from a plain text file so we need to specify this in the config file if we're setup as outputMode = splunkstream. +``` +# Note, these samples assume you're installed as an app or a symbolic link in +# $SPLUNK_HOME/etc/apps/eventgen. If not, please change the paths below. + +[sample.tutorial3] +interval = 1 +earliest = -1s +latest = now +count = 10000 +hourOfDayRate = { "0": 0.30, "1": 0.10, "2": 0.05, "3": 0.10, "4": 0.15, "5": 0.25, "6": 0.35, "7": 0.50, "8": 0.60, "9": 0.65, "10": 0.70, "11": 0.75, "12": 0.77, "13": 0.80, "14": 0.82, "15": 0.85, "16": 0.87, "17": 0.90, "18": 0.95, "19": 1.0, "20": 0.85, "21": 0.70, "22": 0.60, "23": 0.45 } +dayOfWeekRate = { "0": 0.55, "1": 0.97, "2": 0.95, "3": 0.90, "4": 0.97, "5": 1.0, "6": 0.99 } +randomizeCount = 0.2 +outputMode = stdout + +token.0.token = \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2},\d{3} +token.0.replacementType = timestamp +token.0.replacement = %Y-%m-%d %H:%M:%S + +token.1.token = transType=(\w+) +token.1.replacementType = file +token.1.replacement = $SPLUNK_HOME/etc/apps/SA-Eventgen/samples/orderType.sample + +token.2.token = transID=(\d+) +token.2.replacementType = integerid +token.2.replacement = 10000 + +token.3.token = transGUID=([0-9a-fA-F]+) +token.3.replacementType = random +token.3.replacement = guid + +token.4.token = userName=(\w+) +token.4.replacementType = file +token.4.replacement = $SPLUNK_HOME/etc/apps/SA-Eventgen/samples/userName.sample + +token.5.token = city="(\w+)" +token.5.replacementType = mvfile +token.5.replacement = $SPLUNK_HOME/etc/apps/SA-Eventgen/samples/markets.sample:2 + +token.6.token = state=(\w+) +token.6.replacementType = mvfile +token.6.replacement = $SPLUNK_HOME/etc/apps/SA-Eventgen/samples/markets.sample:3 + +token.7.token = zip=(\d+) +token.7.replacementType = mvfile +token.7.replacement = $SPLUNK_HOME/etc/apps/SA-Eventgen/samples/markets.sample:1 + +token.8.token = value=(\d+) +token.8.replacementType = random +token.8.replacement = float[0.000:10.000] +``` ### Defining tokens If you look at the `sample.tutorial3` file, you'll see that we took just one sample event and placed it in the file. Eventgen will look at this one event, continue to replay it a number of times defined by our rating parameters, and then substitute in tokens we're going to define. First, let's get the one token we understand out of the way, the timestamp: - - token.0.token = \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2},\d{3} - token.0.replacementType = timestamp - token.0.replacement = %Y-%m-%d %H:%M:%S - +``` +token.0.token = \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2},\d{3} +token.0.replacementType = timestamp +token.0.replacement = %Y-%m-%d %H:%M:%S +``` Now, let's look at some new token substitutions we haven't seen: +``` +token.2.token = transID=(\d+) +token.2.replacementType = integerid +token.2.replacement = 100000 - token.2.token = transID=(\d+) - token.2.replacementType = integerid - token.2.replacement = 100000 - - token.3.token = transGUID=([0-9a-fA-F]+) - token.3.replacementType = random - token.3.replacement = hex(24) +token.3.token = transGUID=([0-9a-fA-F]+) +token.3.replacementType = random +token.3.replacement = hex(24) - token.4.token = userName=(\w+) - token.4.replacementType = file - token.4.replacement = $SPLUNK_HOME/etc/apps/SA-Eventgen/samples/userName.sample +token.4.token = userName=(\w+) +token.4.replacementType = file +token.4.replacement = $SPLUNK_HOME/etc/apps/SA-Eventgen/samples/userName.sample +``` -There are three types of substitutions here. Integerid is a constantly incrementing integer. The replacement value is the seed to start with, and state will be saved between runs such that it will always increment. Random supports integer, float, hex digits, ipv4, ipv6, mac, and string types. These will just randomly generate digits. In the case of integer, we also have a unix timestamp in this event we don't use, so we're telling it just to generate a random integer that looks like a timestamp. For the two hex tokens, we're saying just generate some hex digits. Note that where we have more complicated strings, we create a RegEx capture group with parenthesis to indicate the portion of the string we want Eventgen to replace. +There are three types of substitutions here. `integerid` is a constantly incrementing integer. The replacement value is the seed to start with, and state will be saved between runs such that it will always increment. Random supports integer, float, hex digits, ipv4, ipv6, mac, and string types. These will just randomly generate digits. In the case of integer, we also have a unix timestamp in this event we don't use, so we're telling it just to generate a random integer that looks like a timestamp. For the two hex tokens, we're saying just generate some hex digits. Note that where we have more complicated strings, we create a RegEx capture group with parenthesis to indicate the portion of the string we want Eventgen to replace. Next, let's look at the file substitution: - - token.1.token = transType=(\w+) - token.1.replacementType = file - token.1.replacement = $SPLUNK_HOME/etc/apps/SA-Eventgen/samples/orderType.sample - -If you look in the sample file, you'll see various text values which are Order types for our application. You'll also notice them repeated multiple times, which may seem strange. The file based substitution will grab one line from a file, and then replace the RegEx capture group with the text it grabbed from the file. This is very powerful, and we include many different types of common data with Eventgen, like internal and external IP addresses, usernames, etc, which may be useful for common applications. Back to why in orderType.sample we see repeated values, because the selection is random, in this case we want the data to appear less than random. We want a certain percentage of orders to be of type NewActivation, ChangeESN, etc, so we repeat the entries in the file multiple times to have some randomness, but according to the guidelines that a business would normally see! +``` +token.1.token = transType=(\w+) +token.1.replacementType = file +token.1.replacement = $SPLUNK_HOME/etc/apps/SA-Eventgen/samples/orderType.sample +``` +If you look in the sample file, you'll see various text values which are Order types for our application. You'll also notice them repeated multiple times, which may seem strange. The file based substitution will grab one line from a file, and then replace the RegEx capture group with the text it grabbed from the file. This is very powerful, and we include many different types of common data with Eventgen, like internal and external IP addresses, usernames, etc, which may be useful for common applications. Back to why in `orderType.sample` we see repeated values, because the selection is random, in this case we want the data to appear less than random. We want a certain percentage of orders to be of type NewActivation, ChangeESN, etc, so we repeat the entries in the file multiple times to have some randomness, but according to the guidelines that a business would normally see! We'll cover one more substitution type, mvfile: +``` +token.5.token = city="(\w+)" +token.5.replacementType = mvfile +token.5.replacement = $SPLUNK_HOME/etc/apps/SA-Eventgen/samples/markets.sample:2 - token.5.token = city="(\w+)" - token.5.replacementType = mvfile - token.5.replacement = $SPLUNK_HOME/etc/apps/SA-Eventgen/samples/markets.sample:2 - - token.6.token = state=(\w+) - token.6.replacementType = mvfile - token.6.replacement = $SPLUNK_HOME/etc/apps/SA-Eventgen/samples/markets.sample:3 - - token.7.token = zip=(\d+) - token.7.replacementType = mvfile - token.7.replacement = $SPLUNK_HOME/etc/apps/SA-Eventgen/samples/markets.sample:1 +token.6.token = state=(\w+) +token.6.replacementType = mvfile +token.6.replacement = $SPLUNK_HOME/etc/apps/SA-Eventgen/samples/markets.sample:3 -Mvfile is a multi-value file. Because sometimes we need to replace more than one token based on the same random choice, we implemented the mvfile replacement type. Mvfile will make a selection per event, and then re-use the same selection for all tokens in the event. This allows us to replace City, State and Zip code together as you can see from the example above. It can also be used to substitute the same choice into multiple tokens in the same event if that's required, as you can reuse the same file:column notation multiple times if you so choose. +token.7.token = zip=(\d+) +token.7.replacementType = mvfile +token.7.replacement = $SPLUNK_HOME/etc/apps/SA-Eventgen/samples/markets.sample:1 +``` +`mvfile` is a multi-value file. Because sometimes we need to replace more than one token based on the same random choice, we implemented the mvfile replacement type. Mvfile will make a selection per event, and then re-use the same selection for all tokens in the event. This allows us to replace City, State and Zip code together as you can see from the example above. It can also be used to substitute the same choice into multiple tokens in the same event if that's required, as you can reuse the same file:column notation multiple times if you so choose. Go take a look at the full file now. You'll see we've built a model of 8 tokens we're replacing for every event. We've modeled a set of business transactions without needing to write a single line of code. Go ahead and run the tutorial and take a look at the output in Splunk (note to run this example, you'll need to set $SPLUNK_HOME and Eventgen app will need to be installed as SA-Eventgen) @@ -242,7 +238,7 @@ Secondly, in the replacement clause, we have a JSON formatted list. This allows ## Command Line -This revision of Eventgen can be run by itself from a command line for testing. This means you can simply run `splunk_eventgen generate eventgen.conf` and start seeing output, which is great for testing. **Command Line and Embedded Defaults are defined in the `splunk_eventgen/default/eventgen.conf` file in the [global] stanza**. +This revision of Eventgen can be run by itself from a command line for testing. This means you can simply run `splunk_eventgen generate eventgen.conf` and start seeing output, which is great for testing. Command Line and Embedded Defaults are defined in the `splunk_eventgen/default/eventgen.conf` file in the [global] stanza. ## Splunk App diff --git a/docs/CONFIGURE.md b/docs/CONFIGURE.md index 78047bd5..449d1a8e 100644 --- a/docs/CONFIGURE.md +++ b/docs/CONFIGURE.md @@ -1,5 +1,5 @@ -## Configure ## +## Configure After you have installed Eventgen by the method of your choosing, you may be asking some of the following questions: * How much data should Eventgen send? @@ -20,12 +20,12 @@ In addition, common use cases work around bundling these relevant files. Because Eventgen configs can be tightly coupled with custom sample files, they can be bundled up into a package itself, in the format: ``` bundle/ - default/ - eventgen.conf - samples/ - users.sample - hosts.sample - firewall.logs + default/ + eventgen.conf + samples/ + users.sample + hosts.sample + firewall.logs ``` @@ -36,7 +36,7 @@ sample you wish to create, followed by key = value tuning options for that sampl [] * This stanza defines a given sample file contained within the samples directory. * This stanza can be specified as a PCRE. -configurationkey = configuration value + = [windbag] count=100 diff --git a/docs/SETUP.md b/docs/SETUP.md index ebb1f996..bc993c65 100644 --- a/docs/SETUP.md +++ b/docs/SETUP.md @@ -21,7 +21,7 @@ Below are the two major ways to use Eventgen - as a PyPI module and as a Splunk * Install / Use Eventgen as a [Python (PyPI) package](#pypi-installation) - Benefits: + Benefits: * Support for threading / multiprocessing * Support for a centralized service that can controll and run multiple threading workers * Able to run a larger amount of datavolume with less overhead @@ -29,13 +29,14 @@ Below are the two major ways to use Eventgen - as a PyPI module and as a Splunk * Exposes more of the plugin system * Includes/installs the Jinja2 templating engine - Draw Backs: + Drawbacks: * More complex installation - * You have to run the "build" command to produce a Splunk app + * You have to run the `build` command to produce a Splunk app * Harder to troubleshoot (especially in multiprocess mode) --- + ## PyPI Installation / First Run To use Eventgen as a PyPI module, you need to either download/clone the source code or install direct from github. @@ -44,7 +45,7 @@ To use Eventgen as a PyPI module, you need to either download/clone the source c ``` $ git clone https://www.github.com/splunk/eventgen ``` -Depending on your desired case, you may wish to use a specific branch. Eventgen's branching model will always have the "master" branch as the most stable and released version of Eventgen, while the "develop" branch will contain the bleeding edge codeline. +Depending on your desired case, you may wish to use a specific branch. Eventgen's branching model will always have the `master` branch as the most stable and released version of Eventgen, while the `develop` branch will contain the bleeding edge codeline. To select your codeline, simply checkout your desired branch (develop is selected by default). ``` @@ -113,11 +114,12 @@ Please follow these instructions to run an Eventgen cluster on your Docker envir --- + ## Splunk App Installation / First Run To use Eventgen as a Splunk app, you need a SPL file. This SPL file can be obtained in one of two ways: -1. Through running the "build" process of the splunk_eventgen pypi module -2. Downloading the SPL direct from [splunkbase](https://splunkbase.splunk.com/app/1924/): +1. Through running the `build` process of the splunk_eventgen pypi module +2. Downloading the SPL direct from [splunkbase](https://splunkbase.splunk.com/app/1924/) ###### Gerating the SPL file In order to generate the SPL file, install Eventgen through PyPI with the instruction above. @@ -164,4 +166,6 @@ Make sure the bundle app permission is global. You can config this in two ways: export=system ``` +You can use this [sample bundle](https://github.com/splunk/eventgen/tree/develop/tests/sample_bundle.zip) to have a quick start for data generation. + --- diff --git a/docs/TUTORIAL.md b/docs/TUTORIAL.md index 75499fac..c5ad1ec4 100644 --- a/docs/TUTORIAL.md +++ b/docs/TUTORIAL.md @@ -7,90 +7,178 @@ The primary source of configuration done in Eventgen is governed by the `eventge The INI format of `eventgen.conf` can have one or more stanzas. Each stanza name is a sample file it will be reading from. There a number of options available in each stanza. For instance, breaking down this tutorial file option-by-option, we can see how this file will be used to set up Eventgen: +### Simple Configuration +Sample conf from [sample bundle](https://github.com/splunk/eventgen/tree/develop/tests/sample_bundle.zip). ``` - [sample.tutorial1] - mode = replay - sampletype = csv - timeMultiple = 2 - backfill = -15m - backfillSearch = index=main sourcetype=splunkd +[film.json] +index = main +count = 1000 +mode = sample +end = 1 +autotimestamp = true +sourcetype = json +source = film.json - outputMode = splunkstream - splunkHost = localhost - splunkUser = admin - splunkPass = changeme +token.0.token = "FILM_ID":(\d+) +token.0.replacementType = integerid +token.0.replacement = 0 - token.0.token = \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2},\d{3} - token.0.replacementType = timestamp - token.0.replacement = %Y-%m-%d %H:%M:%S,%f +token.1.token = "REGION_ID":(\d+) +token.1.replacementType = seqfile +token.1.replacement = $SPLUNK_HOME/etc/apps/sample_conf/samples/count10.txt ``` ``` - [sample.tutorial1] +[film.json] ``` -This is the stanza name and the name of the sample file in Eventgen or your bundle that you want to read from. You can also specify a regex here to match multiple files of similar extensions/naming conventions. +This is the sample file name under `samples` folder. + +``` +index = main +``` +Destination index of the generated data in Splunk. + +``` +count = 1000 +``` +Maximum number of events to generate per sample file. + +``` +mode = sample +``` +In sample mode, eventgen will generate count (+/- rating) events every configured interval. + +``` +end = 1 +``` +After Eventgen started, it will only generate one time with 1000 events based on the configuration. +The value is `-1` by default and the data generation will not end. ``` - mode = replay +autotimestamp = true ``` -Specify replay mode. This will leak out events at the same timing as they appear in the file (with intervals between events like they occurred in the source file). Default mode is sample, so this is required for replay mode. +Eventgen will detect timestamp from sample if any. ``` - sampletype = csv +sourcetype = json +source = film.json ``` -Specify that the input file is in CSV format, rather than a plain text file. With CSV input, we'll look for index, host, source, and sourcetype on a per event basis rather than setting them for the file as a whole. +Set the `sourcetype` and `source` in Splunk. ``` - timeMultiple = 2 +token.0.token = "FILM_ID":(\d+) +token.0.replacementType = integerid +token.0.replacement = 0 +``` +Eventgen will replace the matched token with an increasing integer id starting with 0. In this case it will generate 1000 events with `FILM_ID` with value from 0 to 999. + +``` +token.1.token = "REGION_ID":(\d+) +token.1.replacementType = seqfile +token.1.replacement = $SPLUNK_HOME/etc/apps/sample_conf/samples/count10.txt +``` +Eventgen will replace the matched token with value from file `count10.txt` located in `samples` folder. + +Extract and place the `sample_bundle` under `$SPLUNK_HOME/etc/apps` folder, enable `SA-Eventgen` modular input in Splunk. +Search with `index=main sourcetype=json source=film.json` and check the results. + +### More Complicated Configuration + +``` +[sample.tutorial0] +mode = replay +timeMultiple = 2 + +outputMode = httpevent +httpeventServers = {"servers": [{"protocol": "https", "port": "8088", "key": "00000000-0000-0000-0000-000000000000", "address": "localhost"}]} +end = 1 +index = main +sourcetype = httpevent + + +token.0.token = \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2} +token.0.replacementType = replaytimestamp +token.0.replacement = %Y-%m-%d %H:%M:%S + +token.1.token = @@integer +token.1.replacementType = random +token.1.replacement = integer[0:10] +``` + +``` +[sample.tutorial0] +``` +This is the stanza name and the name of the sample file in Eventgen or your bundle that you want to read from. You can also specify a regex here to match multiple files of similar extensions/naming conventions. + +``` +mode = replay +``` +Specify replay mode. This will leak out events at the same timing as they appear in the file (with intervals between events like they occurred in the source file). Default mode is `sample`, so this is required for replay mode. + +``` +timeMultiple = 2 ``` This will slow down the replay by a factor of 2 by multiplying all time intervals between events by 2. For example, let's assume that you have 3 events generated like below: -12:05:04 helloworld + +``` +12:05:04 helloworld1 12:05:06 helloworld2 12:05:09 helloworld3 +``` -Applying timeMultiple=2 would instead generate 3 events like below: -12:05:04 helloworld +Applying `timeMultiple=2` would instead generate 3 events like below: +``` +12:05:04 helloworld1 12:05:08 helloworld2 12:05:14 helloworld3 +``` ``` - backfill = -15m +outputMode = httpevent ``` -Eventgen will startup and immediately fill in the last 15 minutes worth of events from this file. This is in Splunk relative time notation, and can be any valid relative time specifier (**NOTE:** the longer you set this, the longer it will take to get started). - +There are various `outputMode` available (see the [spec](REFERENCE.md#eventgenconfspec)). The `httpevent` mode will output via the Splunk [HEC](http://dev.splunk.com/view/event-collector/SP-CAAAE6M) endpoint straight into Splunk. ``` - backfillSearch = index=main sourcetype=splunkd +httpeventServers = {"servers": [{"protocol": "https", "port": "8088", "key": "00000000-0000-0000-0000-000000000000", "address": "localhost"}]} ``` -A search to run to find the last events generated for this stanza. If this returns any results inside the backfill time window, eventgen will shorten the time window to start at the time of the last event it saw (**NOTE:** this only works with outputMode=splunkstream) +This is the Splunk destination server to receive the generated events. Change the detail information in your environment. Please refer [HEC](http://dev.splunk.com/view/event-collector/SP-CAAAE6M) for more detail. ``` - outputMode = splunkstream +end = 1 ``` -There are various outputModes available (see the [spec](REFERENCE.md#eventgenconfspec)). The splunkstream mode will output via the Splunk [receivers/stream](http://docs.splunk.com/Documentation/Splunk/latest/RESTAPI/RESTinput#receivers.2Fstream) endpoint straight into Splunk. This allows us to specify things like index, host, source and sourcetype to Splunk at index time. In this case, we're getting those values directly from our sample rather than specifying them here in eventgen.conf. +Generate one time for the sample events. ``` - splunkHost = localhost - splunkUser = admin - splunkPass = changeme +index = main +sourcetype = httpevent ``` -Parameters for setting up outputMode = splunkstream. This is only required if we want to run Eventgen outside of Splunk. As a Splunk App and running as a scripted input, eventgen will gather this information from Splunk itself. Since we'll be running this from the command line for the tutorial, please customize your username and password in the tutorial. -Note: ->When using outputMode=splunkstream for running Eventgen outside of Splunk, use parameter `PYTHONHTTPSVERIFY=0` to ignore the SSL error: `SSLError: [SSL: CERTIFICATE_VERIFY_FAILED]` +Events destination `index` and `sourcetype` in Splunk. ``` - token.0.token = \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2},\d{3} - token.0.replacementType = replaytimestamp - token.0.replacement = %Y-%m-%d %H:%M:%S,%f +token.0.token = \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2} +token.0.replacementType = replaytimestamp +token.0.replacement = %Y-%m-%d %H:%M:%S ``` -All sets of token replacement lines will look very similar, with different regular expressions and strptime formats. This is a replaytimestamp replacement, which will find the timestamp specified by the regular expression and replace it with a current (or relative to the first event) time based on the stprtime format. Generally you'll define a regular expression and a strptime format to match. +This is a `replaytimestamp` replacement, which will find the timestamp specified by the regular expression and replace it with a current (or relative to the first event) time based on the stprtime format. Generally you'll define a regular expression and a strptime format to match. For more information see [regular expressions](http://lmgtfy.com/?q=regex) and [striptime](http://lmgtfy.com/?q=strptime). You can also read more about the different replacement types on the [reference page](REFERENCE.md#eventgenconfspec). -That's it, pretty simple! +``` +token.1.token = @@integer +token.1.replacementType = random +token.1.replacement = integer[0:10] +``` +This will replace token `@@integer` with a random integer between 0 and 10. + +Go to $EVENTGEN_HOME and use the following command to generate data via Eventgen: +``` +python -m splunk_eventgen generate splunk_eventgen/README/eventgen.conf.tutorial0 +``` + +That's it, pretty simple! Check more example conf files under `splunk_eventgen/README` folder. --- -## The Sample File ## +## The Sample File Sample files are seed files that Eventgen uses to send data. When a sample file matches the stanza in an eventgen.conf, it uses those configuration options to write data, using that sample file as a template. This flexible format lets you take real sample logs from anywhere and use it to replay/continuously feed data of the same variety. The use of tokens or regexes allow for dynamically-updated data, which is crucial for mimicking the latest timestamps or meeting specific cardinalities for fields. diff --git a/docs/index.md b/docs/index.md index 1e4dfbfc..12b2e030 100644 --- a/docs/index.md +++ b/docs/index.md @@ -5,9 +5,9 @@ Splunk Event Generator (Eventgen) is a utility that helps users easily build rea **Eventgen features:** * Allows every type of events or transactions to be modeled * Allows users to quickly build robust configuration-based event generators without having to write code -* Can be executed inside of Splunk (relying on a comment event generation framework) as well as outside of Splunk +* Can be executed inside of Splunk (relying on a common event generation framework) as well as outside of Splunk * Event output can easily be directed to a Splunk input (modular inputs, HEC, etc.), a text file, or any REST endpoint in an extensible way -* Easily configurable to make fake data look as real as possible, either by ordering events and token replacements by time of the day or by allowing generators to replay real data replacing current time by generating data exactly at the same time =intervals as the original data +* Easily configurable to make fake data look as real as possible, either by ordering events and token replacements by time of the day or by allowing generators to replay real data replacing current time by generating data exactly at the same time intervals as the original data * For scenarios in which simple token replacements do not work, developers can quickly build sophisticated event generators by writing a generator plugin module while re-using the rest of the framework ## Table of Contents diff --git a/splunk_eventgen/README/eventgen.conf.tutorial0 b/splunk_eventgen/README/eventgen.conf.tutorial0 new file mode 100644 index 00000000..a059a63e --- /dev/null +++ b/splunk_eventgen/README/eventgen.conf.tutorial0 @@ -0,0 +1,18 @@ +[sample.tutorial0] +mode = replay +timeMultiple = 2 + +outputMode = httpevent +httpeventServers = {"servers": [{"protocol": "https", "port": "8088", "key": "00000000-0000-0000-0000-000000000000", "address": "localhost"}]} +end = 1 +index = main +sourcetype = httpevent + + +token.0.token = \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2} +token.0.replacementType = replaytimestamp +token.0.replacement = %Y-%m-%d %H:%M:%S + +token.1.token = @@integer +token.1.replacementType = random +token.1.replacement = integer[0:10] diff --git a/splunk_eventgen/README/eventgen.conf.tutorial1 b/splunk_eventgen/README/eventgen.conf.tutorial1 index 7d6adef6..282d978e 100644 --- a/splunk_eventgen/README/eventgen.conf.tutorial1 +++ b/splunk_eventgen/README/eventgen.conf.tutorial1 @@ -2,17 +2,7 @@ mode = replay sampletype = csv timeMultiple = 2 -#backfill = -15m -#backfillSearch = index=main sourcetype=splunkd - outputMode = stdout -#outputMode = splunkstream -#splunkHost = localhost -#splunkUser = admin -#splunkPass = changeme - -# outputMode = file -# fileName = /tmp/internal.log token.0.token = \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2},\d{3,6} token.0.replacementType = timestamp diff --git a/splunk_eventgen/README/eventgen.conf.tutorial2 b/splunk_eventgen/README/eventgen.conf.tutorial2 index 3f0a782b..9624cc9d 100644 --- a/splunk_eventgen/README/eventgen.conf.tutorial2 +++ b/splunk_eventgen/README/eventgen.conf.tutorial2 @@ -8,8 +8,7 @@ dayOfWeekRate = { "0": 0.7, "1": 0.7, "2": 0.7, "3": 0.5, "4": 0.5, "5": 1.0, "6 randomizeCount = 0.2 randomizeEvents = true -outputMode = stdout -# outputMode = file +outputMode = file fileName = /tmp/ciscosample.log ## Replace timestamp Feb 4 07:52:53 diff --git a/splunk_eventgen/README/eventgen.conf.tutorial3 b/splunk_eventgen/README/eventgen.conf.tutorial3 index 739d84a4..03005485 100644 --- a/splunk_eventgen/README/eventgen.conf.tutorial3 +++ b/splunk_eventgen/README/eventgen.conf.tutorial3 @@ -5,46 +5,23 @@ interval = 1 earliest = -1s latest = now -count = 100000 +count = 10000 hourOfDayRate = { "0": 0.30, "1": 0.10, "2": 0.05, "3": 0.10, "4": 0.15, "5": 0.25, "6": 0.35, "7": 0.50, "8": 0.60, "9": 0.65, "10": 0.70, "11": 0.75, "12": 0.77, "13": 0.80, "14": 0.82, "15": 0.85, "16": 0.87, "17": 0.90, "18": 0.95, "19": 1.0, "20": 0.85, "21": 0.70, "22": 0.60, "23": 0.45 } dayOfWeekRate = { "0": 0.55, "1": 0.97, "2": 0.95, "3": 0.90, "4": 0.97, "5": 1.0, "6": 0.99 } randomizeCount = 0.2 -backfill = -1h -backfillSearch = sourcetype=be_log - -# outputMode = spool -# spoolDir = $SPLUNK_HOME/var/spool/splunk -# spoolFile = - -# outputMode = file -# fileName = /tmp/lotsofevents.log - -# outputMode = splunkstream outputMode = stdout -index=main -host=host1.foobar.com -source=/var/log/be/event.log -sourcetype=be_log - -# Host/User/pass only necessary if running outside of splunk! -splunkHost = host3.foobar.com -splunkPort = 10089 -#splunkHost = localhost -splunkUser = admin -splunkPass = changeme - token.0.token = \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2},\d{3} token.0.replacementType = timestamp token.0.replacement = %Y-%m-%d %H:%M:%S token.1.token = transType=(\w+) -token.1.replacementType = random -token.1.replacement = list[ "New", "New", "Change", "Change", "Change", "Delete" ] +token.1.replacementType = file +token.1.replacement = $SPLUNK_HOME/etc/apps/SA-Eventgen/samples/orderType.sample token.2.token = transID=(\d+) token.2.replacementType = integerid -token.2.replacement = 100000 +token.2.replacement = 10000 token.3.token = transGUID=([0-9a-fA-F]+) token.3.replacementType = random @@ -52,23 +29,19 @@ token.3.replacement = guid token.4.token = userName=(\w+) token.4.replacementType = file -#token.4.replacement = $SPLUNK_HOME/etc/apps/SA-Eventgen/samples/userName.sample -token.4.replacement = /Users/csharp/local/projects/eventgen/samples/userName.sample +token.4.replacement = $SPLUNK_HOME/etc/apps/SA-Eventgen/samples/userName.sample token.5.token = city="(\w+)" token.5.replacementType = mvfile -#token.5.replacement = $SPLUNK_HOME/etc/apps/SA-Eventgen/samples/markets.sample:2 -token.5.replacement = /Users/csharp/local/projects/eventgen/samples/markets.sample:2 +token.5.replacement = $SPLUNK_HOME/etc/apps/SA-Eventgen/samples/markets.sample:2 token.6.token = state=(\w+) token.6.replacementType = mvfile -#token.6.replacement = $SPLUNK_HOME/etc/apps/SA-Eventgen/samples/markets.sample:3 -token.6.replacement = /Users/csharp/local/projects/eventgen/samples/markets.sample:3 +token.6.replacement = $SPLUNK_HOME/etc/apps/SA-Eventgen/samples/markets.sample:3 token.7.token = zip=(\d+) token.7.replacementType = mvfile -#token.7.replacement = $SPLUNK_HOME/etc/apps/SA-Eventgen/samples/markets.sample:1 -token.7.replacement = /Users/csharp/local/projects/eventgen/samples/markets.sample:1 +token.7.replacement = $SPLUNK_HOME/etc/apps/SA-Eventgen/samples/markets.sample:1 token.8.token = value=(\d+) token.8.replacementType = random diff --git a/splunk_eventgen/README/eventgen.conf.tutorial4 b/splunk_eventgen/README/eventgen.conf.tutorial4 index 9e93cad5..6a98d63c 100644 --- a/splunk_eventgen/README/eventgen.conf.tutorial4 +++ b/splunk_eventgen/README/eventgen.conf.tutorial4 @@ -12,9 +12,6 @@ hourOfDayRate = { "0": 0.30, "1": 0.10, "2": 0.05, "3": 0.10, "4": 0.15, "5": 0. dayOfWeekRate = { "0": 0.97, "1": 0.95, "2": 0.90, "3": 0.97, "4": 1.0, "5": 0.99, "6": 0.55 } randomizeCount = 0.2 -# outputMode = file -# fileName = /tmp/mobilemusic.log - outputMode = splunkstream # Host/User/pass only necessary if running outside of splunk! @@ -22,12 +19,6 @@ splunkHost = localhost splunkUser = admin splunkPass = changeme -# outputMode = stormstream -# projectID = -# accessToken = -# source = eventgen -# sourcetype = business_event - token.0.token = ((\w+\s+\d+\s+\d{2}:\d{2}:\d{2}:\d{3})|(\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}:\d{3})) token.0.replacementType = replaytimestamp token.0.replacement = ["%b %d %H:%M:%S:%f", "%Y-%m-%d %H:%M:%S:%f"] diff --git a/splunk_eventgen/samples/sample.tutorial0 b/splunk_eventgen/samples/sample.tutorial0 new file mode 100644 index 00000000..b906dc98 --- /dev/null +++ b/splunk_eventgen/samples/sample.tutorial0 @@ -0,0 +1,12 @@ +2014-01-04 20:00:00 WINDBAG Event 1 of 12 randint @@integer +2014-01-04 20:00:01 WINDBAG Event 2 of 12 randint @@integer +2014-01-04 20:00:02 WINDBAG Event 3 of 12 randint @@integer +2014-01-04 20:00:03 WINDBAG Event 4 of 12 randint @@integer +2014-01-04 20:00:03 WINDBAG Event 5 of 12 randint @@integer +2014-01-04 20:00:04 WINDBAG Event 6 of 12 randint @@integer +2014-01-04 20:00:05 WINDBAG Event 7 of 12 randint @@integer +2014-01-04 20:00:06 WINDBAG Event 8 of 12 randint @@integer +2014-01-04 20:00:08 WINDBAG Event 9 of 12 randint @@integer +2014-01-04 20:00:20 WINDBAG Event 10 of 12 randint @@integer +2014-01-04 20:00:21 WINDBAG Event 11 of 12 randint @@integer +2014-01-04 20:00:21 WINDBAG Event 12 of 12 randint @@integer diff --git a/tests/sample_bundle.zip b/tests/sample_bundle.zip new file mode 100644 index 0000000000000000000000000000000000000000..f33c83cbe8ef8307e4c4bb9ca426400424e6c19e GIT binary patch literal 41702 zcmZ^qV~{A#maf~jZQHhO+qP}nw%xmJbGL1Cw{6}1&6#`R&YhX6$cm~T>#fKq-^^H< zS*0Ki3<3r4&vEQ2C;uNe|9OD}Kmc$yv~{pC)i-jrGqEwHSJi+4Xe21n_4`|%08s!G z{>GvJK>R&HrKtW(Ec$45=iA0L5HBEFlH3RtBCI*N$N-p)vH}XZ^VXS%ZfEiqLY5>9sgjwV; zN{%uTVXnQiTBUHHE~~$JX6RWv#ezcFGz^U~{3vMQS8a@W<81b$Ei;gWAP7Zx8=y|> z;eTKXMUaF{Lfo5cPL_q8*7(8Dr>Q}h@$d82 zk(%=wPw1kJpf7yay0Q?8Je^CeY`mfX>|B}q>Fxq*?vt*ngNExcNULI6+#1~54SHedrbLhvi9 zz!ZS;K0$u~Fh^Jjh+rlvgYKVy0Dwb4NAz>B{srRkxBO$Jf7R?$Q}Vx93GKgG$;8ym z(ACD}-*he%a{o=|Uln`%|Ex^>ziODencBISo7&MC+uND_W6g@csLTEnw{3#F{15}o z$jvv(;xHgfrEGn2dxPM}cLAf^rb_5&fxuzNXyRyFepl?3fc%~J>>KIx$q*aeM^v7h z!S6jk2Y!bQq$NJYXo?i2y>7wEVz)v;0VZ7lt_Jf9hQ`+ehirr*dE3}e>`rLhm&=Gw z9?<@8Y)a+zyA%r!TJwi0AB;HrHZvFZ-`X&-b@POG?;*{@uaUK^zPw4*{2q>>^Fb*Z9bQ^9<18L(jQn)J5)fZt!2bas z;Ig*5^cT7r;QtH$|L_0*9sIw1<@|5(|9u=#cl@tz#{X~N|F4FbrHw6}m9xFwKhTT+ z0yjham&ccSww;dHTIoFa5g(9xvm)1gAzW9G`e%|e*qwe`7 zzPJ1O-r8=Q1|3!u%D!{=aSK!5+rn5tzpr=uCLNXy(vqWADol6pelY8p#o%Cnw7yp@ zoZUaJ-oDfA^&5BX^6mKfcX^j*cm2K(@6tElH*ey{%Dd`k^{?~hhGyH|wuc_RR!{xE zo?k!O>Ss5-@5JN#cKA)&^soIreZ1QHzT|jB0u^I_4nq(>u6K8PJ)NKIcTp=gS}el5 z#JgVGZYYSkl?o5|Yx-WVe>`5Fk0*cDedDY4{j=L(p5)*BUYWniKZSL_+v4%x+D4Y^ z)*o(OziVGVgaes$?&&|`V^&~)4%@cXBX1P1UfcLDU)$>7Ify?$Y#6Py!mPhf-?r1c zPP_6iX5Hc4_4|@$Rq5eP5pY^t*pNfO!2h&=9DWye{ybevD*Fl3c)di$-Ud*6V6pKb!Nf>ttAkKN#X`F-%;;QQ6)<97io z?0o0&enUUK#Q2=9zUcQ>Fx=||fyL`+I*5*1y!x5&_KR$IAoqlk$+k=ij{H zrri~HuSt5GiX7LXrrzuZC_3H)1_vkce(&fHynG5tIq~AY-cdlzZnAUy)<^-X_+pS644`XaBN?}>r`diqYz&-L_hmE`DTt$j)rj65|~P)#H0V)ZSd-MGn~ ze0K7z?&!PtIN+8J7%_h(YC_a$#dpU_9wHSi`7oiXK{z`t9TU@be-!g zFRNLUCIt)zF2iJ$GF%D5=ST5tXfLk25BT?(yDyI0-*<1775DYMKi-`^=eI@YMxLb| z0&AT1`kIUH=Mac+VUq}RYblQMhtywjZ1eGIq}FFkydx6=ku8Xa>}AZ|FZ_i@|4X(J$edcQ?gQ%Sax ziz+s+KXC|v(s%dXrhi`jmbl9v+oT)f-p3%Q*7x`By=sz3KX2L$*VY5&|1~!I^XKLO z1!95}M&I|RJk+XkPDNr*vkT}r4{28{N&@E8 zi!H)s`zTR?r5tFeU2|^>zP{Pp)x*`UXMB*~_t*EnUPPly@>$5Wy>2En-dz*;NpND6VCDWb`#Ccfy%@+rK8d$_U%q51h^Iw zP68he2Y6HO@&-1C<93e~CL+fNye-U+%+C>_w-&I9=N+8f_e~JiXUYo(v8!@XXGwFF zCndmRW?peq&qX~WBIU3>v!02+vW4mM>uvt7N5t?lc|qf5_^!v0^M}J8{&m}!z5b0+ zssAZXPdMUnbdxwWB$24ol!wVHdc5%P$9On+o{N}702lE#=<2pPlh9M!Y`4KHcRjF2 z!1>?7sLy+6o0sn^3r0GuzCGSBRWk=bC6gi*+v(_O4Um~fl~UT|(qU456o1{`z3(yL zIJkH074t0=KkKxRXJ?DowplYt;~Qc}!fq#{UsuWKwS^t* zEot`Juea-CJK+AT?X>UVGMU0sXJBqEyy;>Gqn79=?Spp66K=WxOGD2n+006T1*OX# zztIcGEy`Dlum9<0(=`lW5!h;Y>MjfbSlpHFGLF~V>+|!Kng4g;uhqe}6_}M{%96To zq*b-Rb=T=#U^;;Q_P1K;efYj}&0o=F7|f}Jkq=1jcNy)p5y^w~0u;wYFjAVm{OAV1 z6B{prF~*Q1kOlF2cP4S{a$yM!D^7%G{rY@5ykDQ{>dn#PhGcqgQ1h|!SQkM>c1#p_We);$ny|@)?LVE>lz2Znuv&!0eBMN(9Gr86@Zg)@qLH# z+xPKarVoQ`H|?|R-L7*Wx5wt^D2`&s+#l&foRtMp31Vo_x)^w|N&QU<-q<<4v_>Sp zfT!$ynpSc_zH_*>jqyqn$ftIGkK8 zI^~-mDvRS9_M2CGlmk$)=M$uby0mOA2h`F_ecZ+xrSiPT{) zLjoV4&#pax&)9d~vVJrSIta4oTpjIs?EZEKan>ZPKP2ZCno_s^dE8SULJ$PN_qxmsMM75$0J#1?W?oT zXOI0V?)eo1t@;E2w76*}!Z+Tq&GuA-94HxHvR&tVJ*)n1b{&y zj`0D@(7O*dNOgD!g`9%Kc0o6Z=XM&Y3fPCLEE(5tzCI1)_}3^bI8)q{)NW16W~IUy zwT{<#+B)lR)c#1u5-gV}#-anLXs9{q#@&S~3m*{cI)=N_3E)+L5*{5$L4!(y=qZx=?+@R?m0&@iGv)N26BZ2+JG;iI}D_*Y*CX+ z(zdKL-RJCiRiEji{zl`|&q9eqeFLU2B{poqX0KdB4T*KtX7Q6n zJz;g!zYQ2>;*&@(NG66g9f=pEM@D#g7qLO`gPR%DN>##!eMdJsEiEFkpcD~px7)ur z-t!64ir8L!!^sAs!Y3aC-wNwhH6t9?bF+B;JF&KJWkLd2od*OJok-oT8Rg~WrC7+X zsR_G=_KKt3Y7;K~1lI9RkHF9<-d4AFJ=lL{KgijU*9nt8-P+jPGMpyq&Zo9)88|+L za`RCfi!g5t;aGRjx|i$ST#{UC<&u_6Tnc`DUZ->WjgeCAn>{ko?=9>dbSF1|@{}4c zvSJ9AW^>lH#l^P7z|j(UxO+}~gmde)d?8(Za#Wu3Mi6zCKIt30pfNTRQj~W&1w~b9 ziXUIiHeNNW_#Gi)^&f@C(v}hW!J|JN<~wxIq6XRM&kyC@OECDwF-%hYP&O^4N1`0T zM1!~fnofzK|6|Ku8Q3EV>aHEY>;+u__#VhM#yUpWGz|QwO1WZ^9}nn{~u#8H}BQw+|i0HxzXh z;d8QL(Oyc5X-7V|XeH{F^P@m{%X0>7QMGuW;g<@Rdrq38JES=46a$vf{=5sAmV~{P z0#~x8PD*4$$@Ryk&j!xX1K_Mo#zt~1!{lZTKnn#o-J22jobi>#UMK+I?ygFK08X!W zw?M{>v6mp$E{VH+-QHINR6aI@2kqiiu>_hb>v}l@+D%L2Nsx^@Hvzu>h&5=f7m7QJ z?A974v3O+Oh#FgGdGHF3+MZa`5-^jn(ibqi$ez&)DnF~;f@#ZmC_o0VKgg0Byd1}9Fl|WXgaS|t^*Fh0+*)0+vc0%f&W;P0X z#nfU+k9iVW%iH0BU{Y(!>zz-T*yF$}{doj2s0r2?a9Xn;OiYpmo`U)@sx1ItW#O2- z!f&a}QS3Z{3WX!8J4D94`-N^F<^3gOT<$yf&iG1kN@lTNg3Y%je9%NfF$;|)>#Y7c zuz^J-rj53k&WrZLAYr*-qw(@!cQaRIRVkct+tQFoD+ zK5(dzc$8tr_rIeJ3GX>b<7^v~a3U`e75mI%Fq2x&ZfXm@x|v&IjF8{iBJO^MLSJWK z4#|L9T&4slrIH{+&mKxzA4-5f7b5*TnSxbwb(!>qZl$~5X-i@065q z1(?)y2%$P;Q!=1V;ym^vm#JxJ(4)j;nZBI@dZ1LHvwe!d$J50Pv>mgPG4ISUeqhtv zq8&gDQ?6bQ`eKn8{5kXB04*migThm*V}mLA$Y9G$SJq*Hce3G!WRh^gS~;&WJY|v+>q9tMjSOM z(+>@z5MnS`A6kZiMc8P5S>1&4vhrwvu9o0r{R`z95Zg(Xs&OAP7f(*kW=R)GV={G;X;#7T(N%;q9!9AJgzj?KR8( z+&U(r_LA4b4F!X~satwICw|$&8GEKW1Ss&rVsl4j-~j4K@b{)bT~dHI2BT6VNL&vN(( z`}nVe8g1)B^qJ@?sMV)BP^6G^Mln9-)=sz8oN6no+OZ0HbAO$S8}8CK0;r?Ox$~#& z`wc9tvvK}3WR9P%ze|Jp_V{o-GJT~@>2G*)EA^TzF~})st0h%$52gaE<{qpX=?OsF zxvW*!4*hgX)rCSKZ>{wXMiP>N&VB&|pePAawpK9KVG{sJM_Hc;eN|5OOzraiZCp7~ zP=$}Yv=s_&87M$l5z8mdb0vi?#8e$LxWpifumxjSY&P`dLY&fPaeF)0=ASFD!7+-0 z)&(kLjsuD&SkpGhj&fTKggL5%Vv2Xkkfo|}P1Y^rV7T#d4z^TW{X;ISL@8+piA9XH zS1-Ia_z2f|IOJe8RqzMOAJRI)j*9NyY^&nuzekW5awXyM_ye3pGmVDYt>m~JFanFz zf76_2U@!~r4Pu#{I-Zp-k`E>G?Bv0plvPx30PPZ_rMn#V{E7j^P5dP;QgeB?pI$Eh zO<1!-0*K2;LT4UjO8>M{&9i9?GI%!RF(iR9a6*Q~-5k_#iDMwm< z2S7cE8@_GIZ54$bQJB4b6Zw&Y))Hp8K^9~GPzj(~|8H98I~(xJu=ISr za~~QMp!kCJr}gh#?u!&;Nm43!sz^T5+Hz&8D_>>&#A{t*3Xy*wDA#plYm}^;`4wn?_;{aO%t@vytS=A^B1Yp^A_}GO@-oE8ui4!e z_~q@sKgjV@Lq<3WXLb9$-N|TTfRYfIO*2-V6CRMz+;W?FM$c6R##C&rxpCdAH!4zr zJ_GAbzm~G=-43+4Aej-81Tn&WUXsF!LlF$IpW%X4C$pgY-ZE3Z=on1NJ#SL<(95Ix ztftCjpq6fDTw+?aff+Ds;(Xwk?Sa?jrRi`6J z?Q6Nrzj+{`*)(fGDH1zkEF=Q|QnpY=#TKgxl^6p>`+oIy^SL4A48s$1^1#&_y5pC6 zC9QO@j8gHqBc*r;&HsBJh(ooQq^ADQ_E~3a2oa3r&AI|C-+5_pkmb zoI?W>3*oBTUw9aO4-Ek&MrLVcl$|-@a6!Y0R4bOfnc_G{vk8QH6i}Xi2TzJ_Uy!E( zM9tzH_0|MONzu=hH)F_3b+l0tM~|2u@bD7q)k{f{vW5es_SLlj#^e|v!P5a|TxTcK zAiQ1@JawQ2+|*E7=zQ8Gv@@it3ACoDg4;wpA_6Aw&kLL~Q8gma1NE{^tY`hlw`^@mkOm6KIR+>f{nKRfbd#)O~TsD-;@)u7NL9# zehrMH4aVUM(7GPCjKoO*tHX3IsR(=19^7)nFWFl3cn=)1l`EBCBN!8P>(P8u0nV%o zokSL(b?z{@23*?kma(M5U7nmwOG7op#<5~amSLTH^f_UA22Hrc4=UtZ%dnwKp|SOOniJ$!y(;y8=}mq1`F1>`e7y?%*=98OdZIw zMi%1Lv?W4CDc(FEKQp6(_T^+B6RsQr-eCIsjaSh#xNjfzl73uy7Lo*f!?*8a3}*a` zOreZ5$c>*B)}8e)Q(i+24Jhgw(NST0Sn8OS0M>#%P(+F{)AYE}q>&xQ_R+~3%j->i z*k*_=xAp$X^HMdt0vQcEoXm}3S=7(7#q9FqHh2xyL;iYvC$ZN_m;GxJ?BT5fV$@g+}e|t5L z4q%|mn0}bWK40Ns*-Q45IrQALBM<Ku<1ig)~(ZmLzSL0IP6K z@cb47|Ay%eb(VeNp4P+VG+E8>BprB9zWm9;fGZ%DQIZW?BMrk!ZzLSuX`sU7MO@gvxR9J!&!TlCp zflM8IcnRW7ItJNJYe|8aY#0j-J~>1fy@x9kQJac+&yIvy!Ya)OD;9+0NG>T}>TrZ| zjVGdyD~S#~UQB^IIEY?O6K)x;uI6rFEJU~rMCaT78KCu$v1?1!fwDzcGYcUDPg&u( z1cSFbDCR{wN@=IL%`Y4@LMp!-4OvL~B#N*z`ryrCwN($JWf3F*Ka#9Xjv3l)vlHt&K;p8nTITDPd zLAsV!*)afeK~rNOkuiSxOb^c$|52JZXI%)#V{YO%zG7Z6 zgR@f$86&X-9sQ%B!=5^qkeT&19Z9A6(e^cyY}0wD)<`I}a=mAtT0PIh#KF%fMgHhh zGtf?k`fTNkHi5T0?tT)W1G=n6FYbvR{kw%hk7pjLqG{S3*lk{n4+LLV}4}B(n(inI!#h7AV`?je! zXLYrWXi9l%i@e3ZF)(btN7e$T7R#S19!ffN{W4TtzdM!g2Rya$47j?C!R(bbu3Xpc zDkY&uM7wLs(5?0gYbMVntH>tcr$(9Q68I)}0Fz~7uC)bt4Yfj%4@a=u1GM5T@|DP_ z1(kv%kkv+4L=YHEA%bQFLgSj%IHwg}Dh5L8@5|d0-V+SeT3uzKl>nLiq+X;BI)Y=P z+{muzNGuXlo4E~$@2(7is&y`rOFj{fjywJygysF#OTYn_{}qsCe`4pc|r zpY>PpQnfbQwt}Gd=g4nKd(|9~5_D?sZdY0F#?sf5ovg^uM7D=W5)gAnFR#k;D1~mp zfy>1>=`fz_affTv<|Cad2S}h8_;xm<(cvHs73C3fnt8xb(VF3od+9<1uY!^*4 zujZmSPdClxK=S5%0w7SHg z>mQQ{gPT6{IdY!i4JA}LF$@}Fueeb~I0;YFW)JYR!V4lw=J9fsH{%=vJE&&L2(O2J zpyFooK$P`uBkN(+OCr%%vcYH)Q6WA&69P`rT^LF7#fLe9cP6X3>ZmVN1 z{!S7=o0D=&!LQ(vZnC(>BgNQ)YO;!SG|Ue}@pdEP+yK5{HA)s(>68P8BY|*b;@XbB ze4hNGkC%u4Jv_WKsPQ{|&yB}Ra@k;+?0w8%&fczmdCkmiC@p-zPnss~<~d66_~zY2 z`$Zxnv_cfP$i;ot}ce+B?H-J`aP=)CrC+X0&l^nGHwV{oT zS2{?Jr-aU%JOeQo@R1DU4EJivOIbQ)mHtQ&J1hhWIAIN3uBf{>hHog9Tn$wq9K+scQn)`$mfl9y9>b>7Otq~wBl4tqvr-sUs|-nu$La%S8XRUiGYA6_ z30#c5eItq%*aD?!f;N?fC0W;fKMZtUI3{=fOz#uv8UQ#Q#25$kyEl5VF0=y1>bX$I zikDeH+g)VF9F>x#5nmmF+(lPJ2QhdNG1|If=awBrN9@_F?DM#drvuL)7m|`sox^Fx)~o2b1z>U}BzZDz?_VF$_0vz>zE zrM3>z3})ap!W?#5ZvDOyiCu;0K?}Syl$}L!M4qT?;Y!A?(iQY=~=EL9Y+?TLMT;SdIyG*`5QUy*o(3GNr;mB8n^N z!h2Xy6KR7UbApsh{%JuQZhiSpopO~2eL+R~UTaY{?0$ZNLBv>{neUs&kyX^AM7OcV zWjy@u41vN;FpmW=;&bRhLd#QHB96qi+Ab~YCW`35eQtW-9MMbkjnRxms4iG0NbM|} zwLG+rJw_Fk-V+eGuyKo;Bq&VYWu(pDMYwU|Gqlg`Gh~@y?A+LZdF7J1TVbb-LZthz zWuX+kaLtQhiLz8n>ShX76E3t}@aa+Y%KPadrMW|L8TbfUrVa7tL__eg7BY;D=U&vv zU4qW)Y{6>q%0y(m@WsRh9M!+O63w}f+Y3t~en0?3MJbnOSA=P`EoJOLybSLhgGP!p zp;5|l*}P>1HmSk*T9Z?3795}N)fqJLI5RK z<_?V5ECcmT^n15>KfTy(GR4mU%ZpLS7~Sxc2}LWSe3WmkVNN-XKqXp(h5=z3lHhFd z&*^ldNH-nboW88pa*W@{7K0jrZ?+WUpugoIv~b9p%RX*sIf|}SOOHDRiROlFm4Z~5 zR+N6snnjEZ_}KGO4HG=KB^{iR5jdde4@2pmPpWO=F>4(|rRyn3Eh-Pj zL-{^!@PaVf=2?WCK!HijgSry^e%ex4S*@)D@NH#5r;jyz3d}C_DFhGoj`<2fBAFip zGHtEh)XPjMOL~3Rv49$U79Z@qIH;O<#=T=&edGF}PwyRo?1up&!E5f~!M}qCRucMT zc?*+BlS5>|X0mxHaeU;Et71bLymjJ+CRnJ!i!VD%ST=#rOcCfAQ(2Z0w)Y9746GH@ zqihy&%+kMvL`x-JG+U0vLw;m+i*zS{*p)2~1q9Bj^=Qe7XU8*mdTMO9U3lvD@ECPO zh}8fjX7J;GY?N3twh>^Oul9h-mrj@;B-EW@9VV%8%=!BlH_|OlXwZNSwe#+V6$L!H zygtG{)PBy(r}Mnht_#$W)`#X%HG?|T_lE|a0?3x()DPNO%JH>e8ctN88VKnyn-ceZ z+|6hN=S_iGr|n)5C@!(U_{E?kM*3n*Y!?|+fF_k%6FY+BP|S!f=ytOEl^pL*yXwr@ zd|U=!$?h(y=`fn9Bu6Rymy&`|9mMCI;36;)y8sbOP(rF#xAKwa!po)yeLvlOBfm_Y$ zcBbQT8B+glj&{LgmCi;wo)*62g5-sV>~x;S{|D zFpf#!rK=pfOr6w)pi~Gu%igwosK<|f1RO~wrrVqh>9_BjlWtW>OJ>b=pAJ2AZ{#XK;va_%JSCTEYq#qMo^houZ<`d$jmxsFt9ks6#F< zl1q_Ht0?Gnpk^YehQ*c6)8U8lEf~DdJ9Esx$KqxQ0xjVcxKc*+Yig}??!m-D#l5tw zVKnvFKVwrNn7%{gQ+jd_^8dIy8$n)cds=;58R8??Lk3UO1=$Ubhl)f;pDd|qPq-a{ zRgs3df@5?#Hn!ybCbqUtIpnQn5a@ZJ`OMOYH9e_C$xpFVI$E$b zYX!}RZgY_aBWzNh#`h&&`XfNq05=LTr7ndCP}S|~`bUib^ahhD?2r*LJm$(!7;)0` zw~j3IkT$u98EE<8r6-LW!-|X0&Jf15SB!I08f%0KJIPqHAb34&1uC>N=Zp!eBqyD% z8)C2~%OSw~b<6g#m4s(`%^s&|w3HG+26Yy$Vf<0}ddbd&Tyy{NQm6W_@%v?|$y%Jm_hI}L ze+RG*3~Z~lFRCb8e-1v3&f0iDGI?-dxr=(luqfoDqDwwKU3^ErO7PON9o-K`v|q{_ zpbNhA2N3>px#Hb<|J;UwFTwz&7Jr7x7tJIqmZ|3gtYQnUA-;-INHe- z8yzCWOjABM42U`!9Nqzq&X>lI*pDVP;hLlu!TL333p_QVhrHlEhU7|X{4>}53 zZH^r*cA)inj)QR01V9glu`5a4B^}a(TrWkxdxZ#&O&>TT7A-PZWhsX_?ym%B!-Az5 z!7HS&)N(|p^`|ovcyX`aw$hO&yD5SBSDC=jnl% zz9`0YROIo!0P+TU4H>%UwKe&TUJ;IZ5v~N4lXS_1SfJk+py@G5_%rfDTE=V31}OlT z9wpA$wE)!x227SEoVVm7GSW{zt;-6Zlrf(|f#(A!fL$pL0oYX}>#G&CHBcq?xaK_z zedTrBMJ3#^RYMKiHSPp}J~d3R@1nBRZ}6?K7QiXWcAh}Fv+&Px$j~$cAMvd!CV(eN z$zX*+!@NX}p;PcW7&}aEKT!aAA)uHdM1wMveebsao2LCt`5LXOQlB`)5q*T9kxzmT z+8)bOSbdbJ2^BU6B9>eNqqgQou$kBz^k~P*R=EM8&hV==fkKB_hPKs`;sh*wu^zfJ z%c2rVbXg`Q1CJ_a@oBuHCF;QpQgBKmZHNq4&^cQVNhMBVmt^r~FU3`mVFr?JE=rOAVjS;Mf^7p~ps^_QQ`)$4FgF!DQmy1*W#8(S zoT=gwU!f@mq(OuDgPKO!DsF%>%nd6VSWzPWYvg zcYtkRV`( ztLr>k2-IM_t-|a;jc2~pH>i-X`bNS2_HhkcL*tPus7qfi)|f29T1!n6fL0%2R_W}6 zOA{Ub)|RBs04{OcKuy?;hLRtYH@6nal=&@;iN z2@0=N$3DXa)YJZF2#Qz59ZVBRG)pUi^Py0G^m@%YFQ zYn_A(CLh!91`>;$n^eAI;Gzc5I2VI*&a#i?h;fsjAmO9d2 z#RXF>wuZUf7^{xDSBvKA%5tWMR+5x#!q^ z*gp*wI#Hynts=re1tp0G%KUf>YYYA^PFznOinkAjxHVVyKiE=lPLU2*b_r`>t>2xW zZT!>(rRAPWp+$>9BjKx`*AQ zWs%vjAPyXW6Jd-*2nO4@PkzoUxlU~A@j`rUQ2^MG4UGTP1fYb2U6@qtQU$q)b-RMA zE@>soP0IAS=hgY}_>BdCZ~Dp^=hsa0+QWTR9IaVVTe%%#lHusz>hF?IRrfq%aWfRTRj zmrN%O7BX_j3A0J8$6-yTEy(N>svcaq+%=3Tt^d28q(hN&^GkJXWbuhJDKdNRy4o!R z_Uc0GLt3ud6FlvGO}K`=-W$i-UK&nUm#@*Nzkg9f>sC5k{aHx5s^kn%{BY}@c%Um6 zMSzs=y2b6+B6o9U!*GKsGa3Xqy5hD@(9m zGrQO}H?hS#+>SQCIt&9}#Nc@A08O!JWR(sNSBb+!v5Bs>G8_3&iHLnWsq0F>6W~~4 zCO(;1lz3Z>vfxI*sE04e5O(hPRr0i3O3b1Mq>ow9A5B~MmnOM&awf*xZCndMs{+b~ zIpmfm0~Ph#QM>X(j;MtLv0u(D3^1_zIr(NpE<`bSai5zFw8aSD5Xm91spR5#rTFw# zs1L^%^&JNNOo4`Bhlq{_D`}vWzfgX1v+l7dqRc?EC5nv~Oz8bbFEGqsbVZtejM#cS;I>OSJ9PM{1r1HX>iWF`2`<+X}2sz zeNM^4T)_bU5@C9Y*kBtAm7vF~Bf>;qjUh0!p2*=ZwV2E9zw(4kjGyzmm;#~Gs_LvU z(w3yze}L^4b%U$U&PfbdcH2(usQ-m|ypc#N5WFUzpaE}_bh;T6G<-N*TCzGvOwSK) zyO*>7tLf98U#tsgHIIiOO)jauBN5~pJWa0o?fG%yZNyX|zgwz^&|f-qEKAoaDpMn@ zDclTN&AmARk2k)5I9G1eLH{x@xJ4}FZj3lJVRAemNVUb@xV3W(N@hZPBJg7Hp~q6< z%s7EqLI|Gin<}eEKF$yU9hb4>-E2zmNk+T=NGRgh>mz1#Q)L9lr)aL*f9zpE74D`{ zw1;WKO3efi0?H*q;+E3U83H|SJ^;kLva9qCt(H7!f^%_-%l%H=S#C?h+2g#@S#Ycx z>*D^55#jhdF#J?SJ`})SKYhoH;4S@_F+1@@b>7MJp6GNMqV1b%R5y5N$GUSjZVJqe zA|PmkDZ07!t4Jhrs2}F@#YXCb-a!ciA)h{P*L$gY8{D;agV&QXQuxEj9f*2xE^uNAl9T55!^BR zirEwHKBX^R6os67-``g!GQ!$Jh&k+bF<-yG)^81x8+v-RSfvKB=X430H$U0!(Xb@* zRtwsFZ&;6+6y0Q+0In2gc0iX+ebTKg%r({_$2URmmpb2S#ZdiEb{;^CIAXk*Hs=O^ zcK-fI<;Xk9-1`^`%s3c^e`^0l|M>3w4S-oZhGW4?vyYdHYGd&7a(57x)~2W@FNwR!C5TNV6cDLu4N@EAh~;_jn!V zA$0}vtpbDF$x8ao@r33{^70t7)t=oOsm5V!0ewyGimU~E#4x9st-raeHsNn=ST+cgIzjxy z+o;7!j0WA~tJQpf>`9SZm4I9Ak3HS8tmd-5Y0&%xZZ!Eb7C%{eGII= znUPrL&F4n~;DEy>vX}zevt1Q&Vn&;-vCTbI#<>3aBtF5U#LimtdF%x(3D08iTD8+X zEgi9`25xGezSFmWxD)@G*cq>>evpYkutik3!dN|Vg8uwlkddD_z47`4ampCly)`l+ z^)0*JshRRu!en4-%uzP@QCF&dr9QwuVW?y)@Ka9|5)!)Gmm-D4ijgBm3c+=Y))C~d zcktEALVL^u=AT^Y-%ZWFc^;=i^;s|TDaW6Sjml!x_*y$c0HKLS&Q*x<_aiB$i7g)x zPQ^)Npz-xjoubRPkgH0jK!dC$e(7c3{J=m;+Kkx@6N^hxf6#Y_jOSRnxFBRA8+8DoP#VTTY9`Yk7}9ZNQYnjfm40UlR*(jn_UL$)ZNeOHH`Zc$7p-u zd~dE3J3~@?INv88gcB@*eVN5?y+jh8)B~W3HMm(p@ErFo{+6|Q)K2qri?=UE zYD*wsUP3}oxZ?S-Nx0E_zo68)KPF`b8>7$9+fNm1pJ1=3V9ZM%uEKT$$pQuDy2=$F zQ-rjd=t?cvSaz*U@`Zu&B$Qe&5f0e6B##+}^~joKX!Cllb2&WkAa~qtnAI8fGG~?( zF;QsYL|e@Lvyyz2=Xw5CD>73$aozq?I)?OJWa3Uxdb8d^R0fG@faH(HF2XX5f?djO za$uj7c<;LCjSK7Eh`=)4-^N%Eafh_bf+wId)4a2%EF!Kh-v>xsb{KhRPfW-Wt!t(uN)jcC^qT|PtyviL zV3g&RyeG*v`#`Fcc`9)|4c#5{9ir)ov?*S%V6wR6?aNeq1x1|a5&@pvTYn&Pfkpw7=~QnA*CA3d z%CY*=%X^5k_P@Whai&DPURr8Dpu}fGr7~OYbOjcc%jw43m%WH=qSJkX;wP?B{ia7r z@sfwX8YTfrgJN?U2KdSPFpwrrU}gc0$9Bqi)S8g079jO!(|Ul>i=+*x0hK(_&j)qf zzcz<(C<}4jjnKfU_mM0%YD zJ34jH7M}Gg1Ud*38N&39BP&Z?p^JZAOJE=FcQeJuG>UdPsFcWzRn6dc&sVV(<)|Wp z!d0!SV(L9r(RTzS7LJ^i5G?h0KeNP)dILITX6j{K(8%X9&3mNUV&DU=@Ay$rYG$mgFwfWZgX44 z-*sJ(!vU7!u|P*#7+s@Uk`JU{t#k&Lc8c)O94^kw5D~C^47i{cCtB|}c{`^3{mpE2C{5JW=>-9(RZmGKAMV+0_;lC)(N zRBl2U_7=2ij}l8?g=zSQ1@?cIi9@?14Y|Q3+O^;UiI}tNGRkbp0f{krK1^-5rLU}M zYzsaXE(-i|s_#NEDGE)12+s4CwvsvPRhhn9B~6jz9Y`z4O?(w+gPnX1G`>Ai#ao;J zetVf8m084%#iRE1`EEG{%9#ybkKomsl)QJH+S`;!@konRF1gu7uZDN6MUf~vpfgdp z3M#JBPY-vfKtJ{hGJD}EJGJa)TbG$jB6e6DTqLffjLVvXsVHIFAu&!uiR%qLZV8O1 z8}kp_m2uFXRFl8akx=Ty%U?)y11T6yUzey!uV-?7M4+b0An#)mORPo@kk&9bKg%A0 z8AD|dFh8H`v(#(f)dxm$PpvwdA`S3|wevCX*ax~o+)KxqQFuJ8*Y+DPSZ$={pBZS| zEN}ykg1Y=IO>X7m@Q^~sCy1$j=E`QAP}Cg{H)Cy$prat0Xx+JO9y=A8p-BK(2#I!? zT~>6^&ff~!gI%ZOBRsEAA@qnRR%InKL>4`6f2M@FHFGKwhB32y2D_S!hZ5^iH{9{Z z?3{J$hJSqERCs-Lwa${Q$1rHW4RTlUO{(Yga)Z{NUc_`;CP;h!fICK8UO!O)B_C8~ zY=bhH#T^2uaukO&J9@ml2*kmZCQKq zCdjdDedCKA>3yj^DwZSU!?Hgyg)H@=vxEZU=`hW$Lcw_X+7|tHq}X8A9B@H0$Ryk* zf=Y^xdn5n@lz7S*Ge_hC#%ABW{Qg=b4xifHuE1>18xqKKKz~>ZRiDVF09y}L^vVIZv4G~9>w*-7_@X0jJryJm6GB0VksNp| z4*ZFZ{J{XHQMK2v^RzB>oqK5*rN#iDX2izeqwvDrjW|)L_=S`ew1zO*URey#xcI|0 z+m!>(jVi3Xv)~kA1LUTlLbs6Vc7R=$h)WU-@&7)zeqB5} zTqE)DC8f{;>@b~>*}=jE^~Pln3Ys(0RUjg7g8;(BFzq#evNpTsO+(i>jDw)o^vw`> zT~Le|#Ff)?t>WOB(7CW+;@0|_WY;yiK~;r-Y(30PmFl5R*5-|*mR@BOw|v1g#~%m` z&FXwn?bcdSG&l-L8F9`-|DiK_+)&RL zTh>W(nZ+W`4r~s?iE#iq5dVrhaoZklpB5{Yrt`OFttea*CnA@#K*s{t^$n>(ctf8D z84rnV04tEZKK=FELtjLtR$jUI@EU*K6ba%eggtOUZ$O{$9tTuz4nekaGIbAQro;d>d@f=7A(G?IJiJaGp*QW>tt{cXmp!AS$vh?Aj_Fce$PhM7zz=8$6fes5+SAFgm;XVnwwRFvf@W9nIaRM1iTVWH#} zBe&zX>C+K=ND+2EUSNG3CmM$XZb~wf>&F&j_O^Fd3J*|7Bt`D&w){ODhd`t}W?SBQ z^`MIXJ8MCMM5Qb|Y}NF#Q&2K@9V`EcCqgwdx$tx*BYN}-FRVH%@P>)afpKt@DwU!7 zZ03nEYe#R0_ne&w;?Faw#N*-+HpGZ=K5CL}co~(0f(XOpLg}{*qykqtRi<&h51XEv z;|?-{DC%cv>_j>t*SNu$)kj2tGorW#^8PY7h9+~W$F=&YM{)-~kGF1M|sTO1-#o0UMkIwj2p!ajF}V@nl{?V%x@gtU*ibn{9h7 zKwnfVkhSorc)pO179$K5Dsj%QrA!!w`Y$EffxAgtLy555^Zlb=t0fe(kr;KUk-_x%Qd08Inb_%fwOv2U9xF?aAX`QJO-b7qd;n97r2yHU*ah+OciHs&50fZ)QKpG>r5$U6Yfs^22^Ei5(Ry~u zX!Nemi`4&2WR&SrreXfWneyH-ZmwfPs|n(|G7K7pqkG+*MT+D_n7d~E(sLZ#cZP@p zo|ute^168tl|bc7#J_Y;3V=~M%uY1uHHf!bjP@RVG;AXxwvls6#X%=#g_DztR=7S8 zA;Jzs=5fTba};IV3T-RyG6R@gi%}o10Y>8?St*M`lHa?Yu?I= ziZTyLB8&zc=Y+6k-{2owQULNLezjL^4`@29Nc$mO9miJ{4bgo4Ggg=*-CsO9M_e}t zT96vr-r=hIcUfNRXQpNO9&Z27q(UzA>iL%t+SUxW6@e&pBdhp90omyq`^>J` zj0=wvY;qlgVV{#Sb-MBKbg{p1phCqZy<77yJkSVs{ zo{Vn#KCCUoM^)V31D0HPbN-DLBVkkiB^%7@cEejjel}(&SOe0i*Q*BCl)(@!_I9(w zV}wt`Yi;VHgd@-E4W&B*1wye=Bp1SRe>e5QC2K3qHbPK(c!BL!{7_sMXirNdJ+MP= zDMR22yrfLSz!Z9-&ZH5JO&~}TB?NNkGb|K%Kv z9rFGi3nE6~F3#=gz}h6QP>4wYgwVr*)}+CT9szY5-PVpSt(T$^_gpi`hX4U{p0H!PaXG_3_*!};gLI(?@SBTufDNu=1!=uBR~+R|@Vc|X+| zVmnacQ!4<-)hI7dk)Im#dQ#<6l(mDIb8-RmdIEEFlw%KZV_tK<#9@l=p>4JV z7kP>58fB1H$ryYx#TazwZF8u>Fs^}F<=f+Y0V5>J0V>dOkE-j*sh&cnEJZ{|oHcuV z_|I>=-{{KQb+`<9Alq=$r-*g#Ps%-I90y}*(52#JW50KkADv_g#Lgk`ez^;qpIuHx zxw{T16V559Bz1$PbaPc|k*0Mn;GvD;S43R%7|#ZpsH_E?;vu@mtVB84`Sxv%8Mw1M z1p(=n3RZuXR<;WQD~_&3L+*e+p_2dqu`A4=UQzpU$#gFnQ%=8VD`wd8OUZYqVA1Ko zTPk&v`9SVMZ=P0dm9+plO3jt@Z@x~@c2U)3Z)a&Esuo7bLhb8h-Vr-_*gY5uaaLV)hA0r0YH50Gn9GPzg#`^Q|U`*p! zU@xlo{W3UxZ3@%{EfD{(kk#DC7RYV%KpC-V3}n%WOjG|oqGGR+*=AT8RL&k`Ff^T6&oA_>FS2^l z^hgO_ggMtDBDL`%WjSKxCt3l!O4f$5E~{k1?qrRJ3d@ng(6sZ|1UA=PE67teRa-6Q zt^~9ZQW})@GNsA(htjy?;S^sCaZa2BJ~JOym(pv@_HE7o4Jfo!XjB3jmX93=6}f_; zf{|2S%p+U%l96Aed#P?VgaP?`z?nu>?hpryDI(>Q##Yzy5Zf6cEBJ;Ar|rZ@<1MKP zs?Z)WNXi1>)~c#%68=+KVtv4x+lOddlMZtV?leZHBUO^P!F5YAl0$U0K@GuBE7r_G zhtsU4tb?$6Jd>N5VfY7FY9|bKin@>_!cm+)bvTG0e^t8PCS`VFFrmrjx^h~6mw2YY zg=c){s;$S03G=G6s*E9HqQyc6TCGgg<0P>xL1g;)KoSs*6kx{LIO#A-AeDy{#j%g5kHvgdkkods!n> zl=Htpn*zc?)F^4fv?-+j)?A##HbOxl;VU`MF`UrQj$|b&3jUCgkdAchus3OJ;?8Ku z)c20DwTfp(8yE1U_8RTgr2}ZUSX^V~tBD7u z!Wa&oa+*~n9xcngFo$O*iM)f?GDad)E7Dcq74J2@A=&1syk^4-U@rsu_5umOBK(rEMEQgMz(mF-(b_&-|KAOeib zRnpLub*XBh$K{GKA^4x_Y><-rMd`e@RFu$Rx=7xcO;uZ z;<=JXqTjvRUyUkmG)-iKc_e2HWTPK|S~$o-6m*XhhCxC%2jtyYR&LhE&&BTQ7+fh` ztvL!f-?W>}6sOPu$@OxjF98$n+#8aqr%{vY08s9a$zDY;>|x#hjX{ny_HO{2(Q)68_-Be@-D z;G5u#G>rUMSM&&GaBGK57PnRJM1^4uIrWi%rJloCp~=|_ZH`L{U?77JnVnRi zi^&#G=ncsR$*{%unx>PA$L{LP9q}lkz_`iwG^2(1p1w-g3Wnj-Ot#)Yw+_Bw04Eb+;PUNk_&3;e?@BbruXw>W9v=hlHDu8UO^Q%tBRQ#R{>9VY z@2U#3=i#Q-g>vwd5LR+itT#dOYil7=mzaML#6%^W=M*Trw93UW`qNh8P*L2q??XO` zqA)qr!^3s(8F+fSbJ8%{*^6eh1Y{M4eKe(S?~^VbLBB)d*IPR)&;o_M{a$IX80Q#~ zjPZ)BE@^#0OpvZ2A!At@09zur&ton_C-<$y#Uj_nuJ#4IK=?AiWuXA%o!h%GC(K(oZ-Fz$g+=nZEqQw1G1^x7o&sBJ5?(Q?F-;{g zm3qF~;x7?2tESmhuvqlDa=0;Kz>mR6ED}yl2MwD`M@dWqzBZxCrz94sU7XW*gI8_? zbr0j@hXR*vF=HoZ7-Zc7s%}Hl?#AV@-u;X0ct^XOtD#eoLMVTNHf4hVh7F!rAmhkB ze5_2ek{*Ah+JZ>fK3RLG=a8xk-diem_I4u?#B*C0?T}QM|91rW z4MIL0sHbC z(CPdEQ$XEIDhbp}7|IHKLs$dOY{x*a_Vv>Qw=O~03udcWGXE20N@ARVy5@g8fx3_6 zKb?h5dvl}v_1Ci9(Uhy^E+rT_Tp90^{3wt@%Aj=(p0NkO)~uTJh2-$940g%#%)=Rw zBW&uF`t{RI-fp)TxR9)2hck@T90r5o(;b7Zm*7`YcKdM4-~9u7{QQp|q)9;i*U<#;?6XwSi{s zN&6gs6K&d;cxHS~&V{A%&X{zkn>p}nh-Rp*n?2d>H>dzof~D}Hgd~mGx1$x-wQ{Q< zKYLncw?Rv0p26S!sX_UHLM&UK+PM^U9)qk(Otm`@NhYSwxB-T`!?;j!e)UUyAdupO z>dMCHckC3ZS+sfznh4N#ZY7T?;`^Wgq`Z+zEQW7FU>KrCxf3|cE_8&f+UDO8Rkmk# zxZi*)0dD6!6P%^qe<$wjS;NrNAG==C`gj_*NHc4g>Ci5Nq@7caq4%`Hr-nc)^y0UF z*Z`a0_N(&9tW}qnRDF@9a?n` zXM*tVtiCu!LZCIltQK8!1}<9|chXQODUz%MR}9-0ypdjmBZ<2~95hl`852|N(l?YJ z5CX-fpYzzs!dTh^1`Bc!H(;xa-&->)Fzil0fTUW}9Iwc01j*)Lj|2Ba;|sxFlm3@_QF_Hzi(= z+zSfRj-lP{kVi_P!)RAEQn@BNao@b1GV4&~?RSjlBaScFnhsC3Oz}U66*RA7pVmWNYH|TS@;z40p?;^g43xCG4 zt*>!|F-XHvRk4+AjAFnsb&!DLkdD$Gn-J?b*C~7@m08Oe$xwxP8Gs0IY(T2NcWKYm zv*=3aK|XRd(eX@a0~JOR;*z~1?*TDrhB(}Awq>$IDeMsxM@3Rj&=XNS9j9Jcy6TmB z+;QS_%5hG`*FpUzq+un&d-Kf5^kZIYi549Ob zF!_5wVz={34}5iCeZt|bTO-3Uc#XM6gouy~XH2v#5EQJHZ>7+x=An0+m45H0bWw3( z`k>Aw8QX{CNRX{=re%kfSCH;hKxqVjKcCd2_rfE9G}45AjY>k4*?>48-vH_@H|9St}$MDhY2DLL8k90UgdV{Ix?r-^8z(i6MtyoGz zB`%=h`Orid`(ct0Z4RKE9$fYhF+04MjYh&IhS3{`PSW$hNR@%}LZbmU-TgE`1!ZmR z=qJUY<88~#rEJ(_as6I?q&%QZ2v6lXWE>zau#rfPfy7&7ZGVl8#)``WjWc+!XPxW? zF4s}wAqOG{hCj7Af#eaOWddy!YnhbwlPk@t?q?$zKmXB&i>@jgOxg|K^N0|HD%*d8 zOn%ga9nieB6{BR)s@$)ZFQDdBpo7E-6Tfspm*)S$Ge*ZYnSj$}&){4&&c3(E;zoxZ zyM%R>(%~humAbm~U=UQnjkdwp9bhMm?*>#=o!^xqFdT8!d(MCf9&3c#3aqBtuC|nw znB}wu$LXG3sO}OQLxvKv^_rk0T2givH{rhU-RokvP;@FP{CPdL16sOUR>H=N&~G^~ zkcQ$zR_^j9Oh0*nuxE;c(}~~K4XvQ=QMB;GY0-eLZr?Q<-Vn))2k6f5OkvxUmk{dW zw~$pW*4GdtKk@D!xX}QIEh`aOm9SjxRDFFEoMjr;3Tqkz_s*A!0DlVJ9};$;3@C_U z^0nrmjLod&F56HTM>{8T$S$}vGG?QkOGQ~IE{#CeDZjbO1A+9`A$>_HWvHbX)T~^& zy`%PTOAcjcFqAiNk1|?7n9m_`E~f^w8-kAoL8PXjI-6N>*$f6fnX*Lh2Y}0~pO@^o zI36nS9OV0zVv$ut3!X@LU93;e46LG<+ZtCHNq4Ia9+TIQXg$}C&K1lz2?xAVM2NqF z7JIxj71Lw{MVdPfK-)Q#X2$mV!pxI7EE0BtazYcrJzXdcH%QDRHGGb@SA$D_z_Myqj7F}SeVhOq>w9w&utm<_*LpAYU4^4!D)ctOSs?))Vqp* zL)H5RF-EhV>>JRH@D35!_fa)hKkZEigE>eFyp4Dnl+TmRuIf&N@<+-tGLWqw&$53YhGezFAMDI>qgbR+sWl-WybwWNN zab59f)EVH0t$}?1GTcb_zPr3EM~-Ea$P8c6Q&0URfX7cL#~ZN;Hx(L_9bwqlSroYe zRxttL$eJ88fiY=o%{>=Us-SBAEqmh9xWCleE zHl`QMdLJaYasI?GP9DMZfLMOQRYoeHH|%$F#@fAB-?4Sj+z8GQGi--9aLNc1tB!}0 zd=g8Yd+PC_vt6AGO;~GGC>f@mB<8~9(;WF7-Wd$9ZDYLe>jOGvdklFodfi}c5wj_^ z6DA1OhLKHti_D-MLU)+{7oaur(AD@u4$Km9X{FzB^K6%d@Asq)%1ID?C)2x}sY`JgO=0X1 zb!cY0__pp0g5u#|g)!u!LW*_6kZ>yRlw|`tL1#(V9dGCcJ^ogLyK@B(2g4AP1P6if zkB!CB6%PT->NTfLMo8>BOGUsD&@i?W^f5vFEAGVSJn;J~7LF2R}!;tS)H)IW#-dVvZ%V5#+Kxc(i!I=;PRyOma8riwy9Cx;kjuJ`scSdOz z7Nl`O$d#lSjIJ5Ucetb#P$^H4gER)_+-+UP=`?DVH<6r4`0rrVrWa`b#)<}RGQ_3` z@wy%!MP$l1XwGuK(+{yd{7vrHBB5|ygj_?H6H+t z(%7!ru4z~hg>JoqzjeDQ#eu;a3>XfNz%MDQWsTbV^ghUOM42ki=rFzw5++DtqF1*& z#E!JO26jKyc~EOQxn4V%!s!(!2$mv4t4KIq472O9T*Yr#NVI)yB_JzsWMt^#iaw* zuUb{Z^k$Q5Q6qOrgIN+eCCtzhVYAgUc|Vcjkt2Tjt1Uu!AeDnh!lx84l(9(!8|+AY zgj>9D5_^KITrpCMH??bln`$^9nt!Dy=N8?h&&aC~IB_v4*r7XoR2V4^HEtMxMrd=; z#0dSi+}8X_g1YdGFypEFm&d1vx;&w2CS4Gv+W1N|fi7WWn;bQyoNI+ue09W;%RA2%TR-iUPQDPpQ8krM|j?x=XVsVCvcHjFlV%=VN52gEWpG!_pWS1CY7*~7?MmMJ#_s(*m#SYbSq2=J5$$DY5B zrdAo_oH?u^H4fRbswPp-%TqY9x^xD()(Qg**Fw8)P=25VJ|G4V=7+jV?%#R@;RlYK zyH^+mRGC>we-F2fXWHE_pIP_^;E#v%J?~AjIhLG}@r{89bZt2#Y^{^MEYv=;lMYX5 z19MIC6T^N&)eR33ZO~!bpvps#2NcZIrnFt4C2}=%#IsG(cY_1{ha}r;!3!Ftk7~1qn;ZEKRqAvv{OvUY zy>Gyw76cDN(Q$K(I#OCbb_IMOc$qh}E{>jHDqL+)ThOvh_SQVN?`d0 zvyy*${&iW=ZsvJ&7BK9t@{|!lmFZjQ1B|JhM^a3ZJbpy=QFyV=RKT+IGSpA(@s&dy zjSZ&EaNj-hvF96xE)#@3rtDPmlYt13A?)JU@*y^ngIx|B=SR*LB=t^)x z_5^PaxPS#?z5XIv-N9$zYRWvs+X|f_TYE$o!UnXRenwU@7EU z-o9KeG15uk5vqlJ*$j`t=&mh&A7E%cG}r(cZh3d;nPr>6nNq@xOiSKf^Q?@EVk=Sv zi0f*JYKU3w3R?~WynVxwUQXr;2G;{Yz(`@BjM3x1t@$4S)sc>XcJktV-PqHhOV38u zUdeRz;xcI2Ak_|3LupeJd>{pdfIn-vXZ3ED_{B&Vypwpg;WfKXsk4O)Y_vG9J(v~hG5eA#S52~pmBIC4Wq;9;>MqkCM z%SjsxkCMZPqOr<+OEo_Z#sfX4#$qOew!`f-x=fEucwi z?iw7S+%*0sWM*`p_hzh!ZMQXS~K&SP37+#~Te?gtYwo*60+-QLsGiVf_1V+iWH)?5(|!OR|@=M)4|aH9P7 zS9Q^SmR=eru+CDJDkjT7&gl|Z%FhG=h6GpiyMNfVC*T*~Ldl12yO)vn+&G?GoCp?l zr>0=pj(ldwRR?e`MXr9?=48`mKfh;d@@gkq0mxz6)MhSU<8u1Z& zqpmraN)ph<))AIbbHwI+I!+&805@=0D_Ut{^b@Mo^a<$x1`%UpB5P2hfPt(``O-)tLC@jX+J)_HtR)&$I6jt`e;WTu zQO$-1dDV=3#CA?PZ@Pflfp3D^z4({Z0ae$hK@`_t^xDv_v6u`ThG7)0R4~$Ota}7N zUjr;AiIwOK8P2DgLvu$%Bk=Z~%)mik#bUt4o7Ym$Q^Z0T5M|c|s2)08T9ZQ3axqq6 zS7`>bL8ig$FSZUyVDL4r(S~1SfY>y7cz(M3=l#!WML@+vOI@fTZ5oBdY3q4-Q}zrQ zrQ}x^BSJ>z8bb}FijC?}Otjrq8Iz)kiev?;6rU{%|&S*{|Lhw8U&FQ)VX}lx01F&9hslCe0WJeV)p}CrKaNfXu zeIQLVDwkbs#wu-m|LrA(l>8J?jZO}N$ov|+{mTDqG%;kC2LuOU1k0a!IwD1-+1DAZ zcmoOajk<}qTNh2z(({tuQdmvdV=$=a0OCKT3X2p3!k@4vbpy>z00mJqxjWK{y%3y9 zM~Nb)FwNHTEb@mtqsJSf^I$MS`FSY8JxqQRNrZlwn@{?z4A+(6v=U#I+D;o+u;kmT z7$-}`qM?;==3+Ml11RIbYH&nFjfw7f4^2&WI}~_XoNHQF_u_jP=L!r&grjiE_Pjqo zB9PzXRcjxnilf0**D6MvAkOf2FM||A*8{m9Fv|9%;-xJHZ39h_kulPnTg&0Sj%%OC zV}rfUA-ID1Z{Y*==)+tdi0dt7_-1+=**O!wwb(&V0rujG-eNd{BXD%ypP4IC_vQSB z6+BSpxlEK)Vk2_jBcmlhDv;Q5XMtn4 z6*6xCJRsw2#Ld{3ArRtAM37?S3zie{skVsi06w)QAr2x)w@yq-BcrtTpo$E{N1@70vI*FIv+KUT~-lPk-DL6vn3cd9^=nYunbb7kyg4DTiyAk((-XS zKG=xsujQ!HNoDLQtqH6I?u8XCVFrLQ*M-cZ2ELj#>GwfNX${!BTEwkMIV<+RJ{a!5IZV<#f0c8^Za57GJG6t>hBrYp%i3S6Ve+^;- zX-*!^pMI0aBNS(n86{25Q${bW@-R(0@$@#~1NG$_O!CuD=u=yw>JynnT(Vne`&=TA z%J61coPBOg3lra8W_P~K%Pvqu#iU$JI5J>3INU&%`@ngn5!lxMehck8qAu{R-C4CMwYwJT>0G`+mSKY8?bWwJ5 z?i!`Zn`^&eNH@IwPTkPnExEmDj>ss|e`n zjNNmieUMaj)9qn-3hoR&-Q&qTB9EMj@y0n1H6}T&ygVOB+1S!_g-uiP!E+!w%InhB zgE+Z?W8H;f&=hTUU6LO$)W)e9mAa1P5FN(Z<()M2*J;hlVd+-4v`r{0EhMZk%V<^c zFD6+#)l%r+f1}7U7Xy07CP!m^SuV*;I)E3<$HS-OeFejx+MHzA-&+`+w8WHbrl;D- z9Hz2yjjbILPf%&Q#-Sho@g(Puyg|@pNiLSn7?&DNp3K$~-iQMB&7=d5o^D_n8|6-7 zJOZddCBS9V*;rLced|19`#z*)ofjhLj?DCb7qGi>IIlCtaUdmPryc*!pt@ndW@(LC z^(=L~W!bgi9icYf5Mp{^$>Vl;6_lV;!rqMV)~&901VK9jF7MC|idC`78!Da?$IR#( z);@f}23eHTal^k`&M1IOscudyiH#CP&+Kuwl`LnD$b3+DW(xB|&a9r6di6iHyigK9 z4{92Qx8GMx?x@=6r-%8d?${Ez0)^GB?}7l?m?xk#weJazh!cYNVDfq||{^7a4qj&b-x-t&da~&GZQ{J7es6vahG>UMkhpV;`8Kj_b>Xy8;Xa;F0L7wmMfU) z_kv~n1brP3NZpieUDJY80G~#++(AOCvbBrPZk{QwJoM)E&<%cQNm10B7Se|v`z#uk z4r7juR6|F}f*@r)2TiZUiQ73PZ-Z*yb(|9Z!FP!8x(f9BGZejs z!(v3kKR=ZLG654FrcZazF?pHp1p774*3r+3FrKSX#s7e{wcjrbV0kzq-QAvxls zGn*#wdv?Xc@sTKzs5bOi??H)3i>j$Ixn8x@HSW-DSgU*Kow5Rb1lG+)W2nk$35!-( zg|5LkuzffNCA@x7Ke~^&GrfR&kMmRfQg3_5yKq_^QE@%3Y2>a>s&nC}OARmIkb#a% zxN)c3aBK1B$rvpY7>o{WwMC@aZ(FeE7*TL6>AP|3XW7YRkN=8itMwxH*zrth1!R97 z6(-$Ef&AfJUxjE0(*lKNgPw#U;z(NrZ6B|(Pr(~ph?BG}QN->H6X-t&acs|^Q6;4D2z#1tw`m-lu~?z&B;agPvb&$}?tUMLJW14` zgN})tNkWo+oM7N>jf%E`@D!_E?4d5Bb04iP$-|;KCCgUDF5vDO)xb&PD)@-RdHrO^ z`;&_QTM5}}47ZhXr7!mc+;B%II zW9cfWi9aVUkm`rMhvAgl;@{<4=WeletWjXz4T;VExTHl0+QE| zmZj?WI1rw;4036;q?}6n*tpt=2oL4V;Y-{z!T|3i2#AfQUKj$qz?nRP+u9W~87LUmVn3hK5BlUNFXu-7*Su-vhXtu4^$%DOcbq zW0I-T+ykqyxMpb%4I2jMoh+1$Z%XMl+oSRTPC^WV10OQOqSxT!yA^VU;& zrN|((^tfDG>D`oRnD$i`kfUDsT>g5Rj)%ur_Ek#Z7YS0ekjt)DyeR$4Cx059t zfY0U+yY8?@rgP#$yDxKn4Cgi`k5Gq4cEs$d^E9kdTiOp(q?I0x zDdPx(&(P~&&9boF>I5-qcl@F1dXKJgEex|E`$6fTrmQbeG67_LxZU|gX|4#cv()9@P_^C?YD>hl5#e4D%Eh0x!yxm9Z*KLB=a90 zNsW{Yypg-S$~cxhN2FobOxl5gnZOPw`Dv-p5uF-b;cA205||SEgrL+MU$cq36>J6h zCp#)mbl|4{g?_h7#egUM66ItEU0S~PZ>5nz=Yb4-%RFb@c2gJicQ1LK_EN(HZR(j1<#T2$UjvPN+3ZH9MALCl8 zQJxDA1h0IGQua06C=6Ri%Y#x1*u@H?JIuu-xxa1C?k? zaWhlJw?Pui$&z%x35q(uv~!6O(xAoBbL!_NBR*Y1o$)ZanzI(I;uo?tD*4AH1vq3h z^y$WGjV|M{uc>6a7k6PdXfx3zccpT%6Y6H@fEWqa6zEFVo+JM>ylu70)9)@sS>l;t zGr~%9M;GT;zphUQl!0PS20zH&v{LqI-QuA=UhlO^<7r&a^w^$od^|rzL<&bk z>MhE8)s1S~8Abk{UxsM@%l4iD>KRjGVHC#C1Qms0-Y%x zoBVLxARdu@LyvQ8A=!Oc!=|ZuZND{YU1W*c5Qnk?rn|#3h%3_z*-p4Mo@^1%vVYk74sjSHQ-Qeh_j4W+kSzIFhWkyH+r-75j~y zS+YH)#V1i0&+7nc{+7-xa`jD99OG5QGnB3O@YE?h6wAU^8L4Z^_NdD1x&UXd|9^GY z*5kH~h5u5nP4h@g9E-Iiy1LkP-Uh+69W0t@)1*NE{gM(%k<_6`+YqS&`%o-`R^|zL zZr=sOWl3idSdx4wfhWK7cn(RPCf%Xu|Movo24}S4u56 zy}eh~H1;co(TOzd2*O)?AT%@Bi4^!3z5co`(H^5DxLXO-Fpm;}9ci%5?l)cFji9Uv+=DmKp{!Kp<*Y_-Nc}{s$e~lMD10UGe+1Nax-9f9ul%6CFw5QGh<=$C91C_;RrF+k8fJJ zJmCKbzfyt5GI64CVsSX5rkV#}StD3%gvYd@KT)!YkIN~wP5Iz4%MEnvb&h@FQH|Ke zrS3yZ(&huTR7wdJ>F7In*GG5mNY9sFk@y-Lq0gUHNt_ z4c$V(>WdA{ny=a)&Dzfm5ts)h~T+DOR0;VI;kqEg)EzJuOyC>G{fo*hUX}2%8)} zQ15ji3e02>S58w4LrofhHnT*vMStd)-rS66X~Se6be zHOiwQeMZ=E>YsZ8k@%{=Y)TSvX0dEf%pKHU^Bnt7 z#Y_GKwsvzv{GqCJBGK@Hbua6vd- z)A($%BhWnF8E%j}4b~oPUUhXP0Fi{3^fb+lXLgGM=|T=co{cCS%kbBy$YkhAQnbl= z?&Y@c@Z<|5P;<+D3TnwmO%rCnS~C9g@se>i?yO|1siXfo}}RT@CX z7uAz&8++zV@O9c2fWu+?rj@De2Az5GeiS4CXHI(j5mWC%f}40i zKP9SX_o=232>bcqM+$tEQH+S{c=UYXDQ5{KaqX@`-oX|CGDRIPL)NraN4-~ynn*WZ0ccVP)JYk+d1_J&V zIMWZ*2<}}w#^QHso-Sg3X)R7mLWo5m$Sy>2*FNx0FjgWhsIK8MDe$_|(4s?#&l{HU z+)MoW?l{BbCfl3f&O})P_bMLyN3f6D9GO1@W&fzWzBgflg<49kqP^272sZ}l_O=`I8Yq(NrBfs`cjvI@r5Vr=5|W$q|pI#{GbJo zc%mJ=$bq%bz(9t$k{Yqr=jJq}m-fs51z%>bd3*65z4R91C zU7v?n-KcQ{t61jqLT_W`L%($8u8Go+0Um9@LYjGj@WwAEJS8=G?H3&XkJWjr%{Ij zJ&iJ?xllTwUsM-$UugPUZx}f!^v4>>-K1uF0cYjdS1)>>PKF}3;8Hbei6H5GyjFyj zZna?g(N`hXT^KWSrwR*biT8zhU+ZYXmHT9R;_A5@SR2>7)J+mx#6$)&tGxV`>h{BO zYLYVR>8#S%U;8ZE8VRAgzevTqg2pq#7)Nt^?us>8U)QPEA%tTu+S6;J80rbFmJ;0d zf$&=oT54GDPL*Ke7%QEk^R7?82}ghgThHOB>POO(W`fimW`#T6g{sRdh*mKkMszC9 z-8y@O!Q4SNi(U5;h_?CZ!s}O4rUPbv0LX9m1M3f z{m**aZ7IeZ?VaiP(Sza%JX#O6WmFV}Em1Y|W4rdq03?}!O} zI4iyv@VUJ??wwF0k=|Dk7+@1k&9$?88-q_W%cmFWx23v9xz((k?&u!rEo*IYL|d|i zNLNaOfI6~3>N(-s)$MU}Y$r!I=(H_YBlL(DpL|lLINb`#jX9#5MYfzOec%&n4Q-kZ6=+|ii{aKo~lK4pjee;RM6L% zAg4dM*#eU;>hPqu*3Y8)+q;6bi;$X78NEG#*>#X1<~LAb-E;X8t- zuS0r5!%^G)g0QZX%_N;lkLg)MszKa-AjIk;cjq<1 zDjD|`2bRSX&1#LR{pv@kZS{1&+r?0PY*pR%VFZtNkD-3ld-EaXa}gh$xc#H`M02YY zN{2=(@b~rSQP*lD_fwdQQ22DPK}jAsT%8-zMHN?!;bTpDBrdR@k+ZfQwQ+Cmey`3~UowO?3^xsURv{Jjq;Thn;CKabYNbLr z+9xOr@Rk+w-(!vWT8s9v7Ymp}l2UPx`HRwLbma8CH=t|FX^^fpgQdwY?egY~zTCBp zb`BV4GwNuC_eezW_Vh~48sc2=HFw|`RV2PYf(l6Du;VYGXmJ>{h?yS%?|U}edUJAf zYIlEneD*=@X*5|7;K>tuTB@?s=NVAJ`5c5o)j3Xib@f@RQ@k~wgePDJ06U6$Lbu8=SsN~DaLeAbnC|y8e z#Zf6?}EVr_O=DX>$J)4$IRhIWmk#qI=uMDLjp9sqMm#7{2c-(9n zQX-2LnZIiR?{ClU`nki@&uux%r)a|AxMAByd9}k)nTvYp)Po-#Ro+nmgKvJxyS*fS z&iZ`w@>)_-g@8jv?t#0fn2`&OViSnSvw3J6n-4E;)2xbiay9 zwiyLuQX%IcI*EBr?{&!2ea)E7%*4mm@xY_(Px3tUH_0Rwn$f#Ax{`R3v+HzpiySHw zSjpk6#u7-{r%Z=7 zd^L_Stj*!B9oXJ0|2Cg(N*$)pG#3!gtc!=4!^Uk_W~-{U0CzFgG?V*GQO0RV7ARIJ&wwzjEDZxYEg+k9Y9+ex%O6#w7~`5%YzuZk9|p-yi1vc^2V%H*}caRjQ{ zcJqzHnt-`!$!mE;LXez(Kt#d|_wN2uV_j(!&aD`q?^N%RkKeZCv`Te-n}2%4TDT>* zkcB@}P~fBS-TEE)A3};aQBL~7_lA=k0<)^XJ|phKp2Ll0f;Mq%{k2N&8kKB4*N#uaWR=4A^sy6;$6eXeAEDU3 z1*6w-jy_wx$Xkf}3_nlk03%6F5Y@7U*nCBV!_GIhfS&Riv%CRaueBF*KxiGD9%m8BYjwZL>t?Me zHnlQio>(gj1&TR}3Q23$rfVWfx|sTX*Cn?U?7V9GqIxyUsmC?*LRE!_AETEeEQ}!N z!(;=7eo1-~%>ahwFn9r%fSxz`UPu{F1MKi;C>i!xF3O!i9jnn7*)lBHN?5$h1+-y| zh#PTk*&n!)+p5tClGU2{bi5BV7W64Cv1-_YO9qxv(Qv!hIe{Y8vr}U<2_w~9hoFqy z^=et-Q3Nig9cB_FxVR~oJJ9|-{>QJ!LimvtY%7H?{E!lIQQ@L{TDcnqRw}y`x>56j z;K>LT<;N$GJzP^4OWE6)QL|IunVXR{64r{p8NEIU&WPE!Qpl*}UBkt{;>XqGuEMay zEaJ@6WtD0lW%?08wqj+XH6C%Fk8x+gTps=9C5&l?tftgLl9o!fek}rAj-YQOcbb8e zVOP*4!zbi5*I4kj8{*Qg;O(}$2Sn@`wK*0FJR;Q6khM*eRBhR2y;iD1qr>{R(ZQ>N)Hzj~ zLPF3&g*WmvxKI5O%xqgK#c@6FDWLu z>wb4aSN0AA1|ki!tbuiA{Z?Z-^$2=hFp9b5#W1y`wAf&4C(Ho_`Ha@>z$Ckn&Q-2^ zVQYWx=j9XN&Ki?IeVZtUM^Qm5xxG%Cv>Ae0qjw&C3~)IKA4CrR9+?ei*%pfuqsvXY zr3@sLx1IfIa`sz}`H3q!XMOS~^qUJ3mz-AWg!kN8#4hMt!`T$}GS`UlF^Q7?rOi=Y zoI;t5ykDF3_-e+H^RSNJm6t>4i0LmMR$#w|3UvrR)=tF5P7kP38&BP^?m*Q3@Iglr zKISa+jL~k9Z+EChGBQ0zWFgk7uky{u^q#04;-Zv~##>Np~fMIi+e$L&a*63t*oH1S%S^xzRMVqGAOaU;n@a*C& zw0f?PyTRLm2o(TcjWs`k$2lw(rtBFa!o|)uoJr8&61^n@7x>K=Nh7EDmnliB)70Er z*mCJ4BaIyvKp}vNa^Lu6O;`3JVt`7&W#n>{jz1Rfu!EXYkYv6K@+e=$sH&&>x58eO`%YCSsL}9<;_IP_9gm(1ev0BOk}uT&FA|j|U_noel(i42iFlPr=a(s*x0F zkfAoVwk8(E!P-|PKelBC zF;&Z^Gm!br$lUzJSJ68Or}qk9VHztg2ogWVr@K0A+OjU92SHVy(9qd!+**DT?~GhB z#6*QkkX5!f6Sq?myv7ZUSTqStBsT~@e_u!gIyFnt4kUnlE`$AEc|}Zf=A*8ZDqnCJ zPYKS5%05kiTEaog1oU-g{CItIqr4mE{6H7o4?O6X@+TVgBxH*9_y;l>XV9tdzoe19 zP=ki>EGEg$e_y36@EwyilMZKr>NSV0+q#JLYAf!sI(sRPyh*%~VhNv*ZIIhFqAz$U zW+K(CxIo7`PWyA{DUjSiRLZo`ta4~PY0<0=0z7Z>2=9%?Fd~C6F;8a}1zPGJtyi)7 z#IbG$&HP0fpQNA|y@0Z^dx;lJ`_qnLa!-gHPib%&NWOyy#@*-0V(7H zB>Q4mT1F9z-tTfMh4n5?`(F;Yk`Cu7L(In^qz+tZWNw|jva^D@Vk;%{)a)_ollr3^ zAy7GE-rI7-SAaHkD@QZbde*D+wvyN5R&(Y_M--YjwjDJf!UZF+fid>%n5KBQLiE@_ z1WoSYl;z>z*RCM-Z4&_ia{^d^vOE+tBH-Z$;i;(f#~*+GA_JfU3=9-R#g$%YvAi;} zbF?)xFm|ywwKej02Se#9)bOWqhl z`iB!w<=>d^+q@I}xO~+x+N8P=%BFOdpEh2`oJc!ur9Dbmrdrfg{$+=8*nhtIiP8Lv ze7aX2D!9kByGD{-4U7x>UZJdd(;r&4q&fRoEaQd;314mcI`TGbTMFZk9?^puYU?SH ztz`f@Noi!C=8?`(4cczdiM4}{t}RU?a|gpR(@1L-;EW3BbJZp#%AuLH7Sksk=l#d` zf+?)G?_>B3vv*ie)~*bkHHx_C4Okn_qb58kTwVmReER(Qk`tpCJDV$Vm|)FaWu+;6 z-F8>bVi(bwI-$ApKr7}{H}Qu>X19+6?R%PFQ)u=8;AJG{bpl%%k1W>mdLZB8TqkZ> zjk3sE&8WLadf~3B>EuERaL_%zpqarnfj z>6ag$yvfzg6kZy)(?z9;8x_js*}_@`F4DT`ENi})vDeF(w3%*ovu52)f99G=tvf$G zVi*tv_e>vOO8gn2s&p`JeQNLaA?vZFskVx+6A6Z+qA$cl6~wZ_N~k~r0QOLTM~Q0@ zS*7^63Mjyd`=68F#KFbhnT?g%+1>eH`FhOwRrjF|I6&;{?5pgX>?`aW>}%{>HK3Yx zNbpJE;6ECnYT$zT*x=)@(i4|^`b+8I{q3Z7X3j>YM$Sh6F1e9Q*WXG`;x`D@KSNB- z%#B=ZotYn~|I$ACConQ*L#~3pkL9UKl{$STBeU!<%pb~)Sn^XJRAc_4{CCqJ|K~dY z+YtWl8ov*L`Ts#T<8*s#hU${IkGAlv7U^~W?NFF}^rfv0Hvk7d1kI=_Rbb&_RJ;@_T~@p67Z}R|2DI~XU(r-0nopT z{VDySG5_c6d1%XzyFb}4VkQ2O_oy?&|GP8)w-k5`z7<7DeUwGNPVeuEe@}%6#gCo( z=`V_b{~P?vWySB}f2P}m=RY=PieJuwmGMur{7VP;iGJt+kLTs@&i=UU!~ceUWRgE4 z_~7$Dfj_lASma|X{x#i({~7qJOa4^*;Q5c${@u<0QhO^ZmidoS{G06G!}uWku{ARP hGR1NK5AyFm|9Av2u;AhY_X>1?1q=XSmG$uG{{UmWQI!Ay literal 0 HcmV?d00001 From ef61674597a8410be7b653027a09324f5a1ec0ba Mon Sep 17 00:00:00 2001 From: Li Wu Date: Sat, 19 Oct 2019 06:10:02 +0800 Subject: [PATCH 23/53] Revert oom fix change (#320) --- .../eventgen_server_api.py | 2 +- splunk_eventgen/eventgen_core.py | 102 +++++++++++++----- splunk_eventgen/lib/eventgentimer.py | 18 +--- 3 files changed, 79 insertions(+), 43 deletions(-) diff --git a/splunk_eventgen/eventgen_api_server/eventgen_server_api.py b/splunk_eventgen/eventgen_api_server/eventgen_server_api.py index 37c68f3e..a8400af1 100644 --- a/splunk_eventgen/eventgen_api_server/eventgen_server_api.py +++ b/splunk_eventgen/eventgen_api_server/eventgen_server_api.py @@ -406,7 +406,7 @@ def stop(self, force_stop=False): response = {} if self.eventgen.eventgen_core_object.check_running(): try: - self.eventgen.eventgen_core_object.stop() + self.eventgen.eventgen_core_object.stop(force_stop=force_stop) except: pass response['message'] = "Eventgen is stopped." diff --git a/splunk_eventgen/eventgen_core.py b/splunk_eventgen/eventgen_core.py index b5c8cf6d..dc8a0355 100644 --- a/splunk_eventgen/eventgen_core.py +++ b/splunk_eventgen/eventgen_core.py @@ -6,6 +6,7 @@ import os import sys import time +import signal from Queue import Empty, Queue from threading import Thread import multiprocessing @@ -140,7 +141,7 @@ def _load_custom_plugins(self, PluginNotLoadedException): # APPPERF-263: be greedy when scanning plugin dir (eat all the pys) self._initializePlugins(plugindir, pluginsdict, plugintype) - def _setup_pools(self, generator_worker_count=20): + def _setup_pools(self, generator_worker_count): ''' This method is an internal method called on init to generate pools needed for processing. @@ -150,8 +151,7 @@ def _setup_pools(self, generator_worker_count=20): self._create_generator_pool() self._create_timer_threadpool() self._create_output_threadpool() - if self.args.multiprocess: - self.pool = multiprocessing.Pool(generator_worker_count, maxtasksperchild=1000000) + self._create_generator_workers(generator_worker_count) def _create_timer_threadpool(self, threadcount=100): ''' @@ -163,13 +163,11 @@ def _create_timer_threadpool(self, threadcount=100): ''' self.sampleQueue = Queue(maxsize=0) num_threads = threadcount - self.timer_thread_pool = [] for i in range(num_threads): worker = Thread(target=self._worker_do_work, args=( self.sampleQueue, self.loggingQueue, ), name="TimeThread{0}".format(i)) - self.timer_thread_pool.append(worker) worker.setDaemon(True) worker.start() @@ -188,13 +186,11 @@ def _create_output_threadpool(self, threadcount=1): else: self.outputQueue = Queue(maxsize=500) num_threads = threadcount - self.output_thread_pool = [] for i in range(num_threads): worker = Thread(target=self._worker_do_work, args=( self.outputQueue, self.loggingQueue, ), name="OutputThread{0}".format(i)) - self.output_thread_pool.append(worker) worker.setDaemon(True) worker.start() @@ -238,6 +234,22 @@ def _create_generator_pool(self, workercount=20): worker.setDaemon(True) worker.start() + def _create_generator_workers(self, workercount=20): + if self.args.multiprocess: + import multiprocessing + self.workerPool = [] + for worker in range(workercount): + # builds a list of tuples to use the map function + process = multiprocessing.Process(target=self._proc_worker_do_work, args=( + self.workerQueue, + self.loggingQueue, + self.genconfig, + )) + self.workerPool.append(process) + process.start() + else: + pass + def _setup_loggers(self, args=None): self.logger = logger self.loggingQueue = None @@ -281,6 +293,37 @@ def _generator_do_work(self, work_queue, logging_queue, output_counter=None): self.logger.exception(str(e)) raise e + @staticmethod + def _proc_worker_do_work(work_queue, logging_queue, config): + genconfig = config + stopping = genconfig['stopping'] + root = logging.getLogger() + root.setLevel(logging.DEBUG) + if logging_queue is not None: + # TODO https://github.com/splunk/eventgen/issues/217 + qh = logutils.queue.QueueHandler(logging_queue) + root.addHandler(qh) + else: + root.addHandler(logging.StreamHandler()) + while not stopping: + try: + root.info("Checking for work") + item = work_queue.get(timeout=10) + item.logger = root + item._out.updateConfig(item.config) + item.run() + work_queue.task_done() + stopping = genconfig['stopping'] + item.logger.debug("Current Worker Stopping: {0}".format(stopping)) + except Empty: + stopping = genconfig['stopping'] + except Exception as e: + root.exception(e) + raise e + else: + root.info("Stopping Process") + sys.exit(0) + def logger_thread(self, loggingQueue): while not self.stopping: try: @@ -383,12 +426,8 @@ def start(self, join_after_start=True): self.logger.info("Creating timer object for sample '%s' in app '%s'" % (s.name, s.app)) # This is where the timer is finally sent to a queue to be processed. Needs to move to this object. try: - if self.args.multiprocess: - t = Timer(1.0, sample=s, config=self.config, genqueue=self.workerQueue, - outputqueue=self.outputQueue, loggingqueue=self.loggingQueue, pool=self.pool) - else: - t = Timer(1.0, sample=s, config=self.config, genqueue=self.workerQueue, - outputqueue=self.outputQueue, loggingqueue=self.loggingQueue) + t = Timer(1.0, sample=s, config=self.config, genqueue=self.workerQueue, + outputqueue=self.outputQueue, loggingqueue=self.loggingQueue) except PluginNotLoaded as pnl: self._load_custom_plugins(pnl) t = Timer(1.0, sample=s, config=self.config, genqueue=self.workerQueue, @@ -421,12 +460,6 @@ def stop(self, force_stop=False): self.stopping = True self.force_stop = force_stop - # join timer thread and output thread - for output_thread in self.output_thread_pool: - output_thread.join() - for timer_thread in self.timer_thread_pool: - timer_thread.join() - self.logger.info("All timers exited, joining generation queue until it's empty.") if force_stop: self.logger.info("Forcibly stopping Eventgen: Deleting workerQueue.") @@ -439,8 +472,18 @@ def stop(self, force_stop=False): self.kill_processes() else: self.genconfig["stopping"] = True - self.pool.close() - self.pool.join() + for worker in self.workerPool: + count = 0 + # We wait for a minute until terminating the worker + while worker.exitcode is None and count != 20: + if count == 30: + self.logger.info("Terminating worker {0}".format(worker._name)) + worker.terminate() + count = 0 + break + self.logger.info("Worker {0} still working, waiting for it to finish.".format(worker._name)) + time.sleep(2) + count += 1 self.logger.info("All generators working/exited, joining output queue until it's empty.") if not self.args.multiprocess and not force_stop: @@ -492,9 +535,14 @@ def check_done(self): return self.sampleQueue.empty() and self.sampleQueue.unfinished_tasks <= 0 and self.workerQueue.empty() def kill_processes(self): - if self.args.multiprocess and hasattr(self, "pool"): - self.pool.close() - self.pool.terminate() - self.pool.join() - del self.outputQueue - self.manager.shutdown() + try: + if self.args.multiprocess: + for worker in self.workerPool: + try: + os.kill(int(worker.pid), signal.SIGKILL) + except: + continue + del self.outputQueue + self.manager.shutdown() + except: + pass diff --git a/splunk_eventgen/lib/eventgentimer.py b/splunk_eventgen/lib/eventgentimer.py index b802d9c9..5e3c8405 100644 --- a/splunk_eventgen/lib/eventgentimer.py +++ b/splunk_eventgen/lib/eventgentimer.py @@ -26,7 +26,7 @@ class Timer(object): countdown = None # Added by CS 5/7/12 to emulate threading.Timer - def __init__(self, time, sample=None, config=None, genqueue=None, outputqueue=None, loggingqueue=None, pool=None): + def __init__(self, time, sample=None, config=None, genqueue=None, outputqueue=None, loggingqueue=None): # Logger already setup by config, just get an instance # setup default options self.profiler = config.profiler @@ -36,7 +36,6 @@ def __init__(self, time, sample=None, config=None, genqueue=None, outputqueue=No self.endts = getattr(self.sample, "endts", None) self.generatorQueue = genqueue self.outputQueue = outputqueue - self.pool = pool self.time = time self.stopping = False self.countdown = 0 @@ -142,10 +141,7 @@ def real_run(self): genPlugin.updateConfig(config=self.config, outqueue=self.outputQueue) genPlugin.updateCounts(count=count, start_time=et, end_time=lt) try: - if self.pool is not None: - self.pool.apply_async(run_task, args=(genPlugin,)) - else: - self.generatorQueue.put(genPlugin, True, 3) + self.generatorQueue.put(genPlugin, True, 3) self.executions += 1 backfillearliest = lt except Full: @@ -193,11 +189,7 @@ def real_run(self): genPlugin.updateCounts(count=count, start_time=et, end_time=lt) try: - if self.pool is not None: - self.pool.apply_async(run_task, args=(genPlugin,)) - else: - self.generatorQueue.put(genPlugin) - + self.generatorQueue.put(genPlugin) logger.debug(("Worker# {0}: Put {1} MB of events in queue for sample '{2}'" + "with et '{3}' and lt '{4}'").format( worker_id, round((count / 1024.0 / 1024), 4), @@ -239,7 +231,3 @@ def real_run(self): else: time.sleep(self.time) self.countdown -= self.time - - -def run_task(generator_plugin): - generator_plugin.run() From e8c460b61b73064b1d251107b92056430f4f6856 Mon Sep 17 00:00:00 2001 From: Guodong Wang Date: Sat, 19 Oct 2019 08:08:50 +0800 Subject: [PATCH 24/53] make circle ci fail when test case fails (#317) --- Makefile | 21 ++++++++++----------- install_docker_compose.sh | 6 ++++++ run_tests.py | 25 +++++++++++++------------ 3 files changed, 29 insertions(+), 23 deletions(-) create mode 100644 install_docker_compose.sh diff --git a/Makefile b/Makefile index f4e6a854..9b6b6198 100644 --- a/Makefile +++ b/Makefile @@ -33,24 +33,23 @@ test_helper: docker exec -i ${EVENTGEN_TEST_IMAGE} /bin/sh -c "apk add --no-cache --update libxml2-dev libxslt-dev" @echo 'Creating dirs needed for tests' - docker exec -i ${EVENTGEN_TEST_IMAGE} /bin/sh -c "mkdir -p $(shell pwd) " || true + docker exec -i ${EVENTGEN_TEST_IMAGE} /bin/sh -c "mkdir -p $(shell pwd) " @echo 'Copying orca tree into the orca container' - docker cp . ${EVENTGEN_TEST_IMAGE}:$(shell pwd) || true + docker cp . ${EVENTGEN_TEST_IMAGE}:$(shell pwd) @echo 'Verifying contents of pip.conf' - docker exec -i ${EVENTGEN_TEST_IMAGE} /bin/sh -c "cd $(shell pwd); pip install dist/splunk_eventgen*.tar.gz" || true + docker exec -i ${EVENTGEN_TEST_IMAGE} /bin/sh -c "cd $(shell pwd); pip install dist/splunk_eventgen*.tar.gz" @echo 'Installing test requirements' - docker exec -i ${EVENTGEN_TEST_IMAGE} /bin/sh -c "pip install --upgrade pip;pip install -r $(shell pwd)/requirements.txt" || true + docker exec -i ${EVENTGEN_TEST_IMAGE} /bin/sh -c "pip install --upgrade pip;pip install -r $(shell pwd)/requirements.txt" @echo 'Make simulated app dir and sample for modular input test' - docker exec -i ${EVENTGEN_TEST_IMAGE} /bin/sh -c "cd $(shell pwd); cd ../..; mkdir -p modinput_test_app/samples/" || true + docker exec -i ${EVENTGEN_TEST_IMAGE} /bin/sh -c "cd $(shell pwd); cd ../..; mkdir -p modinput_test_app/samples/" docker cp tests/large/sample/film.json ${EVENTGEN_TEST_IMAGE}:$(shell pwd)/../../modinput_test_app/samples @echo 'Installing docker-compose' - sudo curl -L "https://github.com/docker/compose/releases/download/1.24.0/docker-compose-Linux-x86_64" -o /usr/local/bin/docker-compose || true - sudo chmod +x /usr/local/bin/docker-compose || true + bash install_docker_compose.sh @echo 'Start container with splunk' docker-compose -f tests/large/provision/docker-compose.yml up & @@ -61,7 +60,7 @@ test_helper: run_tests: @echo 'Running the super awesome tests' - docker exec -i ${EVENTGEN_TEST_IMAGE} /bin/sh -c "cd $(shell pwd); python run_tests.py ${SMALL} ${MEDIUM} ${LARGE} ${XLARGE}" || true + docker exec -i ${EVENTGEN_TEST_IMAGE} /bin/sh -c "cd $(shell pwd); python run_tests.py ${SMALL} ${MEDIUM} ${LARGE} ${XLARGE}" test_collection_cleanup: @echo 'Collecting results' @@ -99,7 +98,7 @@ clean: docker network rm eg_network_test || true setup_eventgen: - curl -O splunk_eventgen/default/eventgen_engine.conf ${ENGINE_CONF_SOURCE} + curl -k -O splunk_eventgen/default/eventgen_engine.conf ${ENGINE_CONF_SOURCE} eg_network: docker network create --attachable --driver bridge eg_network || true @@ -132,9 +131,9 @@ lint: ifeq ($(NEWLY_ADDED_PY_FILES), ) @echo 'No newly added python files. Skip...' else - @flake8 $(NEWLY_ADDED_PY_FILES) || true + @flake8 $(NEWLY_ADDED_PY_FILES) endif - @git diff -U0 -- '*.py' | flake8 --diff || true + @git diff -U0 -- '*.py' | flake8 --diff format: ifeq ($(CHANGED_ADDED_PY_FILES), ) diff --git a/install_docker_compose.sh b/install_docker_compose.sh new file mode 100644 index 00000000..b91cc3e7 --- /dev/null +++ b/install_docker_compose.sh @@ -0,0 +1,6 @@ +# should install docker-compose on circle ci env, but do not impact local mac os env +if [[ ! -f /usr/local/bin/docker-compose ]]; then + echo "Installing Linux docker-compose to /usr/local/bin" + sudo curl -L "https://github.com/docker/compose/releases/download/1.24.0/docker-compose-Linux-x86_64" -o /usr/local/bin/docker-compose + sudo chmod +x /usr/local/bin/docker-compose +fi diff --git a/run_tests.py b/run_tests.py index ff17152b..82835c3f 100644 --- a/run_tests.py +++ b/run_tests.py @@ -39,9 +39,6 @@ if XLARGE.lower() == 'none': XLARGE = False -# Array that will hold return codes -return_codes = [] - cov_args = [ "--cov=splunk_eventgen", "--cov-config=tests/.coveragerc", @@ -55,14 +52,21 @@ sys.path = PATH os.environ = ENV args = [SMALL, "--junitxml=tests/test-reports/tests_small_results.xml"] + cov_args - return_codes.append(pytest.main(args)) + rt = pytest.main(args) + if rt != 0: + print("There are failures in small test cases!") + sys.exit(rt) + # Run medium tests if MEDIUM: sys.path = PATH os.environ = ENV args = ["-sv", MEDIUM, "--junitxml=tests/test-reports/tests_medium_results.xml"] + cov_args - return_codes.append(pytest.main(args)) + rt = pytest.main(args) + if rt != 0: + print("There are failures in medium test cases!") + sys.exit(rt) # Commenting out other tests that aren't added yet. # Run large tests @@ -70,10 +74,7 @@ sys.path = PATH os.environ = ENV args = ["-sv", LARGE, "--junitxml=tests/test-reports/tests_large_results.xml"] + cov_args - return_codes.append(pytest.main(args)) - -print("What do you call a Boomerang that doesn't come back....") -# We need to ensure we return a bad exit code if the tests do not completely pass -for code in return_codes: - if int(code) != 0: - sys.exit(int(code)) + rt = pytest.main(args) + if rt != 0: + print("There are failures in large test cases!") + sys.exit(rt) From b95b59d050b7a55f04ab0c6cab98a162f5d682bc Mon Sep 17 00:00:00 2001 From: Li Wu Date: Wed, 23 Oct 2019 01:39:39 +0800 Subject: [PATCH 25/53] Add disable logging option to fix oom (#323) * Add disable logging option to fix oom * Fix modular input bug --- LICENSE | 1 - requirements.txt | 3 +- splunk_eventgen/__main__.py | 2 + .../eventgen_core_object.py | 1 + splunk_eventgen/eventgen_core.py | 14 +++++-- .../lib/logging_config/__init__.py | 40 +++---------------- splunk_eventgen/lib/requirements.txt | 1 - .../splunk_app/bin/modinput_eventgen.py | 1 + 8 files changed, 22 insertions(+), 41 deletions(-) diff --git a/LICENSE b/LICENSE index 75e7576d..42713bba 100644 --- a/LICENSE +++ b/LICENSE @@ -214,7 +214,6 @@ Apache License 2.0 ======================================================================== The following components are provided under the Apache License 2.0. See project link for details. - (Apache License 2.0) structlog (https://github.com/hynek/structlog/blob/master/LICENSE.apache2) (Apache License 2.0) boto3 (https://github.com/boto/boto3/blob/master/LICENSE) (Apache License 2.0) requests (https://github.com/kennethreitz/requests/blob/master/LICENSE) (Apache License 2.0) pyOpenSSL (https://github.com/pyca/pyopenssl/blob/master/LICENSE) diff --git a/requirements.txt b/requirements.txt index e232e488..cccbc283 100644 --- a/requirements.txt +++ b/requirements.txt @@ -22,5 +22,4 @@ yapf>=0.26.0 isort>=4.3.15 Flask>=1.0.3 redis==3.3.10 -structlog==19.1.0 -uuid \ No newline at end of file +uuid diff --git a/splunk_eventgen/__main__.py b/splunk_eventgen/__main__.py index 9340e2f2..e414fa28 100644 --- a/splunk_eventgen/__main__.py +++ b/splunk_eventgen/__main__.py @@ -53,6 +53,8 @@ def parse_args(): generate_subparser.add_argument( "--generator-queue-size", type=int, default=500, help="the max queue size for the " "generator queue, timer object puts all the generator tasks into this queue, default max size is 500") + generate_subparser.add_argument("--disable-logging", action="store_true", + help="disable logging") # Build subparser build_subparser = subparsers.add_parser('build', help="Will build different forms of sa-eventgen") build_subparser.add_argument("--mode", type=str, default="splunk-app", diff --git a/splunk_eventgen/eventgen_api_server/eventgen_core_object.py b/splunk_eventgen/eventgen_api_server/eventgen_core_object.py index b02ba219..9e015eec 100644 --- a/splunk_eventgen/eventgen_api_server/eventgen_core_object.py +++ b/splunk_eventgen/eventgen_api_server/eventgen_core_object.py @@ -54,4 +54,5 @@ def _create_args(self): args.wsgi = True args.modinput_mode = False args.generator_queue_size = 1500 + args.disable_logging = True return args diff --git a/splunk_eventgen/eventgen_core.py b/splunk_eventgen/eventgen_core.py index dc8a0355..acc37176 100644 --- a/splunk_eventgen/eventgen_core.py +++ b/splunk_eventgen/eventgen_core.py @@ -59,7 +59,7 @@ def __init__(self, args=None): self._generator_queue_size = getattr(self.args, 'generator_queue_size', 500) if self._generator_queue_size < 0: self._generator_queue_size = 0 - self.logger.info("Set generator queue size", queue_size=self._generator_queue_size) + self.logger.info("Set generator queue size:{}".format(self._generator_queue_size)) if self.args and 'configfile' in self.args and self.args.configfile: self._load_config(self.args.configfile, args=args) @@ -240,10 +240,12 @@ def _create_generator_workers(self, workercount=20): self.workerPool = [] for worker in range(workercount): # builds a list of tuples to use the map function + disable_logging = True if self.args and self.args.disable_logging else False process = multiprocessing.Process(target=self._proc_worker_do_work, args=( self.workerQueue, self.loggingQueue, self.genconfig, + disable_logging )) self.workerPool.append(process) process.start() @@ -251,6 +253,9 @@ def _create_generator_workers(self, workercount=20): pass def _setup_loggers(self, args=None): + if args and args.disable_logging: + logger.handlers = [] + logger.addHandler(logging.NullHandler()) self.logger = logger self.loggingQueue = None if args and args.verbosity: @@ -294,7 +299,7 @@ def _generator_do_work(self, work_queue, logging_queue, output_counter=None): raise e @staticmethod - def _proc_worker_do_work(work_queue, logging_queue, config): + def _proc_worker_do_work(work_queue, logging_queue, config, disable_logging): genconfig = config stopping = genconfig['stopping'] root = logging.getLogger() @@ -304,7 +309,10 @@ def _proc_worker_do_work(work_queue, logging_queue, config): qh = logutils.queue.QueueHandler(logging_queue) root.addHandler(qh) else: - root.addHandler(logging.StreamHandler()) + if disable_logging: + root.addHandler(logging.NullHandler()) + else: + root.addHandler(logging.StreamHandler()) while not stopping: try: root.info("Checking for work") diff --git a/splunk_eventgen/lib/logging_config/__init__.py b/splunk_eventgen/lib/logging_config/__init__.py index 63a41d9d..5a0e1c29 100644 --- a/splunk_eventgen/lib/logging_config/__init__.py +++ b/splunk_eventgen/lib/logging_config/__init__.py @@ -1,31 +1,9 @@ import os - -import structlog import logging.config LOG_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', '..', 'logs') DEFAULT_LOGGING_LEVEL = "DEBUG" -structlog.configure( - processors=[ - structlog.processors.UnicodeEncoder(encoding='utf-8', errors='backslashreplace'), - structlog.processors.StackInfoRenderer(), - structlog.processors.format_exc_info, - structlog.stdlib.ProcessorFormatter.wrap_for_formatter, - ], - context_class=structlog.threadlocal.wrap_dict(dict), - logger_factory=structlog.stdlib.LoggerFactory(), - wrapper_class=structlog.stdlib.BoundLogger, - cache_logger_on_first_use=True, -) - -pre_chain = [ - # Add the log level and a timestamp to the event_dict if the log entry - # is not from structlog. - structlog.stdlib.add_log_level, - structlog.processors.TimeStamper(fmt='iso'), -] - LOGGING_CONFIG = { 'version': 1, 'disable_existing_loggers': False, @@ -35,11 +13,6 @@ 'format': '%(asctime)s %(name)-15s %(levelname)-8s %(processName)-10s %(message)s', 'datefmt': '%Y-%m-%d %H:%M:%S' }, - 'json_struct': { - "()": structlog.stdlib.ProcessorFormatter, - "foreign_pre_chain": pre_chain, - "processor": structlog.processors.JSONRenderer(sort_keys=True), - } }, 'filters': { @@ -86,7 +59,7 @@ 'eventgen_metrics': { 'class': 'logging.handlers.RotatingFileHandler', 'level': DEFAULT_LOGGING_LEVEL, - 'formatter': 'json_struct', + 'formatter': 'default', 'filters': [], 'maxBytes': 1024 * 1024, 'filename': os.path.join(LOG_DIR, 'eventgen-metrics.log') @@ -94,7 +67,7 @@ 'eventgen_server': { 'class': 'logging.handlers.RotatingFileHandler', 'level': DEFAULT_LOGGING_LEVEL, - 'formatter': 'json_struct', + 'formatter': 'default', 'filters': [], 'maxBytes': 1024 * 1024, 'filename': os.path.join(LOG_DIR, 'eventgen-server.log') @@ -126,8 +99,7 @@ } logging.config.dictConfig(LOGGING_CONFIG) - -logger = structlog.get_logger('eventgen') -controller_logger = structlog.get_logger('eventgen_controller') -server_logger = structlog.get_logger('eventgen_server') -metrics_logger = structlog.get_logger('eventgen_metrics') +logger = logging.getLogger('eventgen') +controller_logger = logging.getLogger('eventgen_controller') +server_logger = logging.getLogger('eventgen_server') +metrics_logger = logging.getLogger('eventgen_metrics') diff --git a/splunk_eventgen/lib/requirements.txt b/splunk_eventgen/lib/requirements.txt index 24b66fae..4bfb4b0b 100644 --- a/splunk_eventgen/lib/requirements.txt +++ b/splunk_eventgen/lib/requirements.txt @@ -1,3 +1,2 @@ ujson==1.35 jinja2==2.10.1 -structlog==19.1.0 diff --git a/splunk_eventgen/splunk_app/bin/modinput_eventgen.py b/splunk_eventgen/splunk_app/bin/modinput_eventgen.py index 38299c62..d1f283a6 100644 --- a/splunk_eventgen/splunk_app/bin/modinput_eventgen.py +++ b/splunk_eventgen/splunk_app/bin/modinput_eventgen.py @@ -68,6 +68,7 @@ def create_args(self): args.wsgi = False args.log_path = make_splunkhome_path(['var', 'log', 'splunk']) args.modinput_mode = True + args.disable_logging = False return args def prepare_config(self, args): From e0a02e4bc7aca4aaef8190149e396d3310194a53 Mon Sep 17 00:00:00 2001 From: Tony Lee Date: Tue, 22 Oct 2019 10:45:24 -0700 Subject: [PATCH 26/53] Fixing api server (#322) * fixed multiple attributes * debug --- splunk_eventgen/eventgen_api_server/eventgen_core_object.py | 2 -- splunk_eventgen/eventgen_api_server/eventgen_server.py | 2 +- splunk_eventgen/eventgen_api_server/eventgen_server_api.py | 4 +++- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/splunk_eventgen/eventgen_api_server/eventgen_core_object.py b/splunk_eventgen/eventgen_api_server/eventgen_core_object.py index 9e015eec..5e329481 100644 --- a/splunk_eventgen/eventgen_api_server/eventgen_core_object.py +++ b/splunk_eventgen/eventgen_api_server/eventgen_core_object.py @@ -33,13 +33,11 @@ def refresh_eventgen_core_object(self): def _create_args(self): args = argparse.Namespace() args.daemon = False - args.verbosity = None args.version = False args.backfill = None args.count = None args.devnull = False args.disableOutputQueue = False - args.end = None args.generators = None args.interval = None args.keepoutput = False diff --git a/splunk_eventgen/eventgen_api_server/eventgen_server.py b/splunk_eventgen/eventgen_api_server/eventgen_server.py index 3edeeeb2..de3f6663 100644 --- a/splunk_eventgen/eventgen_api_server/eventgen_server.py +++ b/splunk_eventgen/eventgen_api_server/eventgen_server.py @@ -16,7 +16,7 @@ def __init__(self, *args, **kwargs): self.role = 'server' self.logger = logging.getLogger('eventgen_server') - self.logger.info('Initialized Eventgen Controller: hostname [{}]'.format(self.host)) + self.logger.info('Initialized Eventgen Server: hostname [{}]'.format(self.host)) if self.mode != 'standalone': from redis_connector import RedisConnector diff --git a/splunk_eventgen/eventgen_api_server/eventgen_server_api.py b/splunk_eventgen/eventgen_api_server/eventgen_server_api.py index a8400af1..c9f925ea 100644 --- a/splunk_eventgen/eventgen_api_server/eventgen_server_api.py +++ b/splunk_eventgen/eventgen_api_server/eventgen_server_api.py @@ -22,7 +22,7 @@ FILE_PATH = os.path.dirname(os.path.realpath(__file__)) DEFAULT_PATH = os.path.realpath(os.path.join(FILE_PATH, "..", "default")) -SAMPLE_DIR_PATH = os.path.realpath(os.path.join(FILE_PATH, "..", "samples")) +SAMPLE_DIR_PATH = os.path.realpath(os.path.join(FILE_PATH, "..", "serverSamples")) class EventgenServerAPI(): def __init__(self, eventgen, redis_connector, host, mode='standalone'): @@ -452,6 +452,8 @@ def set_bundle(self, url): bundle_dir = self.unarchive_bundle(self.download_bundle(url)) if os.path.isdir(os.path.join(bundle_dir, "samples")): + if not os.path.exists(SAMPLE_DIR_PATH): + os.makedirs(SAMPLE_DIR_PATH) for file in glob.glob(os.path.join(bundle_dir, "samples", "*")): shutil.copy(file, SAMPLE_DIR_PATH) self.logger.info("Copied all samples to the sample directory.") From 523ea236d70f76bab51ab24a4a27a02df57e059d Mon Sep 17 00:00:00 2001 From: Li Wu Date: Wed, 23 Oct 2019 10:45:54 +0800 Subject: [PATCH 27/53] Py3 upgrade (#291) * init commit for python 3 migration * migrate to py3 for test * Fix import issue * Fix test cases * Fix jinja test case * Fix multiprocess issue * Fix a few warnings * Fix test case fail * Fix test case * Fix cluster test cases * Fix set_conf error * Fix shebang * Remove logutils lib from source * Fix splunkstream import error * Remove logutils from requirements.txt * Fix build spl error * Change code block indent * Fix doc anchor issue * Fix app running on Splunk 8.0 issue * Fix xrange issue introduced by develop branch * Fix merge bug * Fix bug * remove logutils * Fix makefile bug * Fix configparser bug * Change filter function --- LICENSE | 8 - Makefile | 5 +- dockerfiles/Dockerfile | 12 +- requirements.txt | 5 +- setup.py | 27 +- splunk_eventgen/__init__.py | 57 -- splunk_eventgen/__main__.py | 64 +- splunk_eventgen/default/eventgen.conf | 1 - .../eventgen_controller.py | 8 +- .../eventgen_controller_api.py | 12 +- .../eventgen_core_object.py | 3 +- .../eventgen_api_server/eventgen_server.py | 8 +- .../eventgen_server_api.py | 64 +- .../eventgen_api_server/redis_connector.py | 2 +- splunk_eventgen/eventgen_core.py | 41 +- splunk_eventgen/identitygen.py | 13 +- splunk_eventgen/lib/.gitignore | 2 - splunk_eventgen/lib/__init__.py | 0 splunk_eventgen/lib/concurrent/__init__.py | 3 - .../lib/concurrent/futures/__init__.py | 23 - .../lib/concurrent/futures/_base.py | 607 ------------------ .../lib/concurrent/futures/process.py | 359 ----------- .../lib/concurrent/futures/thread.py | 134 ---- splunk_eventgen/lib/eventgenconfig.py | 58 +- splunk_eventgen/lib/eventgenoutput.py | 8 +- splunk_eventgen/lib/eventgensamples.py | 155 ++--- splunk_eventgen/lib/eventgentimer.py | 6 +- splunk_eventgen/lib/eventgentoken.py | 23 +- splunk_eventgen/lib/generatorplugin.py | 16 +- splunk_eventgen/lib/logutils_src/LICENSE.txt | 26 - splunk_eventgen/lib/logutils_src/NEWS.txt | 56 -- splunk_eventgen/lib/logutils_src/PKG-INFO | 36 -- splunk_eventgen/lib/logutils_src/README.rst | 41 -- splunk_eventgen/lib/logutils_src/__init__.py | 0 splunk_eventgen/lib/logutils_src/doc/Makefile | 75 --- .../lib/logutils_src/doc/adapter.rst | 16 - .../lib/logutils_src/doc/colorize.rst | 11 - splunk_eventgen/lib/logutils_src/doc/conf.py | 194 ------ .../lib/logutils_src/doc/dictconfig.rst | 15 - splunk_eventgen/lib/logutils_src/doc/http.rst | 11 - .../lib/logutils_src/doc/index.rst | 33 - .../lib/logutils_src/doc/libraries.rst | 25 - .../lib/logutils_src/doc/queue.rst | 6 - .../lib/logutils_src/doc/redis.rst | 11 - .../lib/logutils_src/doc/testing.rst | 65 -- .../lib/logutils_src/doc/whatsnew.rst | 2 - .../lib/logutils_src/logutils/__init__.py | 196 ------ .../lib/logutils_src/logutils/adapter.py | 117 ---- .../lib/logutils_src/logutils/colorize.py | 191 ------ .../lib/logutils_src/logutils/dictconfig.py | 551 ---------------- .../lib/logutils_src/logutils/http.py | 91 --- .../lib/logutils_src/logutils/queue.py | 227 ------- .../lib/logutils_src/logutils/redis.py | 79 --- .../lib/logutils_src/logutils/testing.py | 157 ----- .../lib/logutils_src/logutils_src_setup.py | 65 -- .../lib/logutils_src/tests/mytest.py | 8 - .../lib/logutils_src/tests/test_adapter.py | 70 -- .../lib/logutils_src/tests/test_colorize.py | 24 - .../lib/logutils_src/tests/test_dictconfig.py | 550 ---------------- .../lib/logutils_src/tests/test_formatter.py | 74 --- .../lib/logutils_src/tests/test_messages.py | 34 - .../lib/logutils_src/tests/test_queue.py | 69 -- .../lib/logutils_src/tests/test_redis.py | 98 --- .../lib/logutils_src/tests/test_testing.py | 60 -- splunk_eventgen/lib/outputcounter.py | 2 +- splunk_eventgen/lib/outputplugin.py | 4 +- splunk_eventgen/lib/plugins/__init__.py | 0 .../lib/plugins/generator/__init__.py | 0 .../lib/plugins/generator/default.py | 9 +- .../lib/plugins/generator/jinja.py | 12 +- .../generator/perdayvolumegenerator.py | 6 +- .../lib/plugins/generator/replay.py | 9 +- .../lib/plugins/generator/weblog.py | 6 +- .../lib/plugins/generator/windbag.py | 7 +- .../lib/plugins/output/__init__.py | 0 splunk_eventgen/lib/plugins/output/awss3.py | 10 +- splunk_eventgen/lib/plugins/output/counter.py | 4 +- splunk_eventgen/lib/plugins/output/devnull.py | 4 +- splunk_eventgen/lib/plugins/output/file.py | 8 +- .../lib/plugins/output/httpevent.py | 10 +- .../lib/plugins/output/httpevent_core.py | 17 +- .../lib/plugins/output/metric_httpevent.py | 15 +- .../lib/plugins/output/modinput.py | 9 +- splunk_eventgen/lib/plugins/output/s2s.py | 20 +- .../lib/plugins/output/splunkstream.py | 32 +- splunk_eventgen/lib/plugins/output/spool.py | 14 +- splunk_eventgen/lib/plugins/output/stdout.py | 6 +- .../lib/plugins/output/syslogout.py | 4 +- splunk_eventgen/lib/plugins/output/tcpout.py | 6 +- splunk_eventgen/lib/plugins/output/udpout.py | 6 +- splunk_eventgen/lib/plugins/rater/__init__.py | 0 splunk_eventgen/lib/plugins/rater/config.py | 4 +- .../lib/plugins/rater/perdayvolume.py | 6 +- .../lib/requests_futures/__init__.py | 29 - .../lib/requests_futures/sessions.py | 99 --- splunk_eventgen/lib/timeparser.py | 6 +- splunk_eventgen/logger/__init__.py | 0 splunk_eventgen/logger/logger_config.py | 17 - .../logger/requests_futures/__init__.py | 30 - .../logger/requests_futures/sessions.py | 96 --- .../__init__.py => logs/__init__} | 0 splunk_eventgen/logs/__init__.py | 0 .../splunk_app/README/eventgen.conf.spec | 2 +- .../splunk_app/bin/modinput_eventgen.py | 16 +- splunk_eventgen/splunk_app/default/app.conf | 2 +- .../splunk_app/default/distsearch.conf | 2 +- .../default/distsearch.conf.windows | 4 +- .../splunk_app/default/inputs.conf | 1 + .../splunk_app/lib/mod_input/__init__.py | 18 +- .../splunk_app/lib/mod_input/fields.py | 9 +- tests/large/provision/docker-compose.yml | 8 - tests/large/test_eventgen_orchestration.py | 74 +-- tests/large/test_jinja_template.py | 4 +- tests/large/test_mode_replay.py | 2 +- tests/large/test_mode_sample.py | 2 +- tests/large/test_modular_input.py | 6 +- tests/large/test_output_modinput.py | 4 +- tests/large/test_output_plugin.py | 2 +- tests/large/test_token_replacement.py | 8 +- tests/large/utils/eventgen_test_helper.py | 6 +- tests/large/utils/splunk_search_util.py | 10 +- tests/medium/plugins/test_file_output.py | 8 +- tests/medium/plugins/test_jinja_generator.py | 6 +- tests/medium/plugins/test_syslog_output.py | 4 +- .../plugins/test_syslog_output_with_header.py | 2 +- tests/medium/plugins/test_tcp_output.py | 2 +- tests/medium/plugins/test_udp_output.py | 2 +- tests/small/test_main.py | 2 +- tests/unit/test_eventgenconfig.py | 2 +- tests/unit/test_timeparser.py | 2 +- 130 files changed, 451 insertions(+), 5274 deletions(-) delete mode 100644 splunk_eventgen/__init__.py delete mode 100644 splunk_eventgen/lib/.gitignore delete mode 100644 splunk_eventgen/lib/__init__.py delete mode 100644 splunk_eventgen/lib/concurrent/__init__.py delete mode 100644 splunk_eventgen/lib/concurrent/futures/__init__.py delete mode 100644 splunk_eventgen/lib/concurrent/futures/_base.py delete mode 100644 splunk_eventgen/lib/concurrent/futures/process.py delete mode 100644 splunk_eventgen/lib/concurrent/futures/thread.py delete mode 100644 splunk_eventgen/lib/logutils_src/LICENSE.txt delete mode 100644 splunk_eventgen/lib/logutils_src/NEWS.txt delete mode 100644 splunk_eventgen/lib/logutils_src/PKG-INFO delete mode 100644 splunk_eventgen/lib/logutils_src/README.rst delete mode 100644 splunk_eventgen/lib/logutils_src/__init__.py delete mode 100644 splunk_eventgen/lib/logutils_src/doc/Makefile delete mode 100644 splunk_eventgen/lib/logutils_src/doc/adapter.rst delete mode 100644 splunk_eventgen/lib/logutils_src/doc/colorize.rst delete mode 100644 splunk_eventgen/lib/logutils_src/doc/conf.py delete mode 100644 splunk_eventgen/lib/logutils_src/doc/dictconfig.rst delete mode 100644 splunk_eventgen/lib/logutils_src/doc/http.rst delete mode 100644 splunk_eventgen/lib/logutils_src/doc/index.rst delete mode 100644 splunk_eventgen/lib/logutils_src/doc/libraries.rst delete mode 100644 splunk_eventgen/lib/logutils_src/doc/queue.rst delete mode 100644 splunk_eventgen/lib/logutils_src/doc/redis.rst delete mode 100644 splunk_eventgen/lib/logutils_src/doc/testing.rst delete mode 100644 splunk_eventgen/lib/logutils_src/doc/whatsnew.rst delete mode 100644 splunk_eventgen/lib/logutils_src/logutils/__init__.py delete mode 100644 splunk_eventgen/lib/logutils_src/logutils/adapter.py delete mode 100644 splunk_eventgen/lib/logutils_src/logutils/colorize.py delete mode 100644 splunk_eventgen/lib/logutils_src/logutils/dictconfig.py delete mode 100644 splunk_eventgen/lib/logutils_src/logutils/http.py delete mode 100644 splunk_eventgen/lib/logutils_src/logutils/queue.py delete mode 100644 splunk_eventgen/lib/logutils_src/logutils/redis.py delete mode 100644 splunk_eventgen/lib/logutils_src/logutils/testing.py delete mode 100644 splunk_eventgen/lib/logutils_src/logutils_src_setup.py delete mode 100644 splunk_eventgen/lib/logutils_src/tests/mytest.py delete mode 100644 splunk_eventgen/lib/logutils_src/tests/test_adapter.py delete mode 100644 splunk_eventgen/lib/logutils_src/tests/test_colorize.py delete mode 100644 splunk_eventgen/lib/logutils_src/tests/test_dictconfig.py delete mode 100644 splunk_eventgen/lib/logutils_src/tests/test_formatter.py delete mode 100644 splunk_eventgen/lib/logutils_src/tests/test_messages.py delete mode 100644 splunk_eventgen/lib/logutils_src/tests/test_queue.py delete mode 100644 splunk_eventgen/lib/logutils_src/tests/test_redis.py delete mode 100644 splunk_eventgen/lib/logutils_src/tests/test_testing.py delete mode 100644 splunk_eventgen/lib/plugins/__init__.py delete mode 100644 splunk_eventgen/lib/plugins/generator/__init__.py delete mode 100644 splunk_eventgen/lib/plugins/output/__init__.py delete mode 100644 splunk_eventgen/lib/plugins/rater/__init__.py delete mode 100755 splunk_eventgen/lib/requests_futures/__init__.py delete mode 100755 splunk_eventgen/lib/requests_futures/sessions.py delete mode 100644 splunk_eventgen/logger/__init__.py delete mode 100644 splunk_eventgen/logger/logger_config.py delete mode 100755 splunk_eventgen/logger/requests_futures/__init__.py delete mode 100755 splunk_eventgen/logger/requests_futures/sessions.py rename splunk_eventgen/{eventgen_api_server/__init__.py => logs/__init__} (100%) delete mode 100644 splunk_eventgen/logs/__init__.py diff --git a/LICENSE b/LICENSE index 42713bba..e7f86e85 100644 --- a/LICENSE +++ b/LICENSE @@ -242,13 +242,5 @@ BSD-style licenses The following components are provided under a BSD-style license. See project link for details. (BSD 2-Clause "Simplified" License) mock (https://github.com/testing-cabal/mock/blob/master/LICENSE.txt) - (BSD 3-Clause) logutils (https://opensource.org/licenses/BSD-3-Clause) (BSD 3-Clause) jinja2 (https://github.com/pallets/jinja/blob/master/LICENSE) (BSD 3-Clause) ujson(https://github.com/esnme/ultrajson/blob/master/LICENSE.txt) - -======================================================================== -PSF licenses -======================================================================== - -The following components are provided under a PSF license. See project link for details. - (PSD License) futures (https://github.com/agronholm/pythonfutures/blob/master/LICENSE) diff --git a/Makefile b/Makefile index 9b6b6198..19d32c6e 100644 --- a/Makefile +++ b/Makefile @@ -60,7 +60,8 @@ test_helper: run_tests: @echo 'Running the super awesome tests' - docker exec -i ${EVENTGEN_TEST_IMAGE} /bin/sh -c "cd $(shell pwd); python run_tests.py ${SMALL} ${MEDIUM} ${LARGE} ${XLARGE}" + docker exec -i ${EVENTGEN_TEST_IMAGE} /bin/sh -c "cd $(shell pwd); python3 run_tests.py ${SMALL} ${MEDIUM} ${LARGE} ${XLARGE}" + test_collection_cleanup: @echo 'Collecting results' @@ -125,7 +126,7 @@ docs: cd docs/; bundle install; bundle exec jekyll serve build_spl: clean - python -m splunk_eventgen build --destination ./ + python3 -m splunk_eventgen build --destination ./ lint: ifeq ($(NEWLY_ADDED_PY_FILES), ) diff --git a/dockerfiles/Dockerfile b/dockerfiles/Dockerfile index 19628ab5..708bc244 100644 --- a/dockerfiles/Dockerfile +++ b/dockerfiles/Dockerfile @@ -2,8 +2,8 @@ FROM redis:5.0.5-alpine RUN apk --no-cache upgrade && \ apk add --no-cache --update \ - python2-dev \ - py2-pip \ + python3 \ + python3-dev \ gcc \ libc-dev \ libffi-dev \ @@ -18,7 +18,7 @@ RUN apk --no-cache upgrade && \ g++ \ git \ curl && \ - pip install --upgrade pip && \ + pip3 install --upgrade pip && \ rm -rf /tmp/* && \ rm -rf /var/cache/apk/* && \ ssh-keygen -f /etc/ssh/ssh_host_rsa_key -N '' -t rsa && \ @@ -26,15 +26,15 @@ RUN apk --no-cache upgrade && \ mkdir -p /root/.ssh && \ chmod 0700 /root/.ssh && \ passwd -u root && \ - pip install git+git://github.com/esnme/ultrajson.git + pip3 install git+git://github.com/esnme/ultrajson.git COPY dockerfiles/sshd_config /etc/ssh/sshd_config COPY dockerfiles/entrypoint.sh /sbin/entrypoint.sh COPY dist/* /root/splunk_eventgen.tgz -RUN pip install /root/splunk_eventgen.tgz && \ +RUN pip3 install /root/splunk_eventgen.tgz && \ rm /root/splunk_eventgen.tgz EXPOSE 2222 6379 9500 RUN chmod a+x /sbin/entrypoint.sh -WORKDIR /usr/lib/python2.7/site-packages/splunk_eventgen +WORKDIR /usr/lib/python3.7/site-packages/splunk_eventgen ENTRYPOINT ["/sbin/entrypoint.sh"] diff --git a/requirements.txt b/requirements.txt index cccbc283..fd0bfe6a 100644 --- a/requirements.txt +++ b/requirements.txt @@ -2,15 +2,13 @@ pytest==4.6.4 pytest-xdist mock pytest-cov -docker==2.7.0 +docker==3.7.3 pyOpenSSL lxml==4.3.4 pytest-mock>=1.10.4 boto3 requests>=2.18.4 requests[security] -logutils>=0.3.4.1 -futures>=3.0.5 ujson>=1.35 pyyaml httplib2 @@ -22,4 +20,5 @@ yapf>=0.26.0 isort>=4.3.15 Flask>=1.0.3 redis==3.3.10 +requests-futures==1.0.0 uuid diff --git a/setup.py b/setup.py index 9b332b88..05e49979 100644 --- a/setup.py +++ b/setup.py @@ -1,11 +1,8 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 # encoding: utf-8 -from setuptools import find_packages, setup - -import splunk_eventgen - -VERSION = splunk_eventgen.__version__ +from setuptools import setup +import json try: import pypandoc @@ -14,21 +11,37 @@ long_description = open('README.md').read() +def get_version(): + """ + @return: Version Number + """ + with open("splunk_eventgen/version.json", 'rb') as fp: + json_data = json.load(fp) + version = json_data['version'] + return version + + def readme(): with open('README.md') as f: return f.read() + def get_requirements(): with open('requirements.txt') as f: requirements = f.read().splitlines() return requirements + +VERSION = get_version() + + setup( name='splunk_eventgen', version=VERSION, description='Splunk Event Generator to produce real-time, representative data', long_description=long_description, author='Splunk, Inc.', + python_requires='>3.7.0', classifiers=[ 'Development Status :: 5 - Production/Stable', 'Intended Audience :: Developers', 'Topic :: Software Development :: Libraries :: Python Modules', 'Topic :: Software Development :: Build Tools', @@ -37,6 +50,6 @@ def get_requirements(): keywords='splunk eventgen container containers docker automation', entry_points={'console_scripts': ["splunk_eventgen = splunk_eventgen.__main__:main"]}, include_package_data=True, - packages=find_packages(), + packages=["splunk_eventgen"], package_data={"splunk_eventgen": ['*.sh', '*.txt', '*.yml'], '': ['*.sh', '*.txt', '*.yml']}, install_requires=get_requirements()) diff --git a/splunk_eventgen/__init__.py b/splunk_eventgen/__init__.py deleted file mode 100644 index 04a51e0b..00000000 --- a/splunk_eventgen/__init__.py +++ /dev/null @@ -1,57 +0,0 @@ -#!/usr/bin/env python2 -# encoding: utf-8 - -import json -import os - -file_location = os.path.normpath(os.path.realpath(__file__)) -VERSION_FILE = "version.json" -VERSION_LOCATION = os.path.normpath(os.path.join(file_location, '..', VERSION_FILE)) - - -def _get_version(versionfile): - """ - @param versionfile: File to get the version info from - @return: Version Number - """ - with open(VERSION_LOCATION, 'r') as fp: - json_data = json.load(fp) - version = json_data['version'] - fp.close() - return version - - -def _set_dev_version(): - """ - Write .dev at the end of version - :return: None - """ - with open(VERSION_LOCATION, 'r+') as fp: - json_data = json.load(fp) - new_version = json_data['version'].split('.dev0')[0] - new_version_write = new_version + ".dev0" - json_data['version'] = new_version_write - fp.seek(0) - fp.write(json.dumps(json_data)) - fp.close() - - -def _set_release_version(): - """ - Remove .dev at end of version if it exists - :return: None - """ - with open(VERSION_LOCATION, 'r+') as fp: - json_data = json.load(fp) - new_version = json_data['version'].split('.dev0')[0] - json_data['version'] = new_version - fp.seek(0) - fp.truncate() - fp.write(json.dumps(json_data)) - fp.close() - - -__version__ = _get_version(versionfile='version.json') - -if __name__ == "__main__": - print __version__ diff --git a/splunk_eventgen/__main__.py b/splunk_eventgen/__main__.py index e414fa28..ce66a6d0 100644 --- a/splunk_eventgen/__main__.py +++ b/splunk_eventgen/__main__.py @@ -1,8 +1,5 @@ -''' -Copyright (C) 2005-2015 Splunk Inc. All Rights Reserved. -''' - -from __future__ import division +from splunk_eventgen.lib.logging_config import logger +from splunk_eventgen.eventgen_core import EventGenerator import argparse import errno @@ -10,16 +7,23 @@ import os import shutil import sys +import json FILE_LOCATION = os.path.dirname(os.path.abspath(__file__)) -path_prepend = os.path.join(FILE_LOCATION, 'lib') -sys.path.append(path_prepend) +VERSION_LOCATION = os.path.join(os.path.dirname(os.path.abspath(__file__)), "version.json") + + +def _get_version(): + """ + @return: Version Number + """ + with open(VERSION_LOCATION, 'rb') as fp: + json_data = json.load(fp) + version = json_data['version'] + return version -import __init__ as splunk_eventgen_init # noqa isort:skip -import eventgen_core # noqa isort:skip -from logging_config import logger # noqa isort:skip -EVENTGEN_VERSION = splunk_eventgen_init.__version__ +EVENTGEN_VERSION = _get_version() def parse_args(): @@ -75,7 +79,7 @@ def parse_args(): # Help subparser # NOTE: Keep this at the end so we can use the subparser_dict.keys() to display valid commands help_subparser = subparsers.add_parser('help', help="Display usage on a subcommand") - helpstr = "Help on a specific command, valid commands are: " + ", ".join(subparser_dict.keys() + ["help"]) + helpstr = "Help on a specific command, valid commands are: " + ", ".join(list(subparser_dict.keys()) + ["help"]) help_subparser.add_argument("command", nargs='?', default="default", help=helpstr) # add subparsers to the subparser dict, this will be used later for usage / help statements. subparser_dict['generate'] = generate_subparser @@ -107,7 +111,7 @@ def parse_args(): sys.exit(0) if args.subcommand == "help": - if args.command in subparser_dict.keys(): + if args.command in list(subparser_dict.keys()): subparser_dict[args.command].print_help() else: parser.print_help() @@ -125,34 +129,32 @@ def parse_args(): return args -def exclude_function(filename): +def filter_function(tarinfo): # removing any hidden . files. - last_index = filename.rfind('/') + last_index = tarinfo.name.rfind('/') if last_index != -1: - if filename[last_index + 1:].startswith('.'): - return True - if filename.endswith('.pyo') or filename.endswith('.pyc'): - return True + if tarinfo.name[last_index + 1:].startswith('.'): + return None + if tarinfo.name.endswith('.pyo') or tarinfo.name.endswith('.pyc') or '/splunk_app' in tarinfo.name: + return None else: - return False + return tarinfo def make_tarfile(output_filename, source_dir): import tarfile with tarfile.open(output_filename, "w:gz") as tar: - tar.add(source_dir, arcname=os.path.basename(source_dir), exclude=exclude_function) + tar.add(source_dir, arcname=os.path.basename(source_dir), filter=filter_function) def build_splunk_app(dest, source=os.getcwd(), remove=True): - import imp cwd = os.getcwd() os.chdir(source) directory = os.path.join(dest, 'SA-Eventgen') target_file = os.path.join(dest, 'sa_eventgen_{}.spl'.format(EVENTGEN_VERSION)) - module_file, module_path, module_description = imp.find_module('splunk_eventgen') - splunk_app = os.path.join(module_path, 'splunk_app') + splunk_app = os.path.join(FILE_LOCATION, 'splunk_app') splunk_app_samples = os.path.join(splunk_app, "samples") - shutil.copytree(os.path.join(module_path, "samples"), splunk_app_samples) + shutil.copytree(os.path.join(FILE_LOCATION, "samples"), splunk_app_samples) try: shutil.copytree(splunk_app, directory) except OSError as e: @@ -163,9 +165,9 @@ def build_splunk_app(dest, source=os.getcwd(), remove=True): else: raise directory_lib_dir = os.path.join(directory, 'lib', 'splunk_eventgen') - shutil.copytree(module_path, directory_lib_dir) + shutil.copytree(FILE_LOCATION, directory_lib_dir) directory_default_dir = os.path.join(directory, 'default', 'eventgen.conf') - eventgen_conf = os.path.join(module_path, 'default', 'eventgen.conf') + eventgen_conf = os.path.join(FILE_LOCATION, 'default', 'eventgen.conf') shutil.copyfile(eventgen_conf, directory_default_dir) # install 3rd lib dependencies @@ -208,18 +210,18 @@ def main(): args = parse_args() args.verbosity = convert_verbosity_count_to_logging_level(args.verbosity) if args.subcommand == "generate": - eventgen = eventgen_core.EventGenerator(args=args) + eventgen = EventGenerator(args=args) eventgen.start() elif args.subcommand == "service": env_vars = gather_env_vars(args) if args.role == "controller": - from eventgen_api_server.eventgen_controller import EventgenController + from splunk_eventgen.eventgen_api_server.eventgen_controller import EventgenController EventgenController(env_vars=env_vars).app_run() elif args.role == "server": - from eventgen_api_server.eventgen_server import EventgenServer + from splunk_eventgen.eventgen_api_server.eventgen_server import EventgenServer EventgenServer(env_vars=env_vars, mode="cluster").app_run() elif args.role == "standalone": - from eventgen_api_server.eventgen_server import EventgenServer + from splunk_eventgen.eventgen_api_server.eventgen_server import EventgenServer EventgenServer(env_vars=env_vars, mode="standalone").app_run() elif args.subcommand == "build": if not args.destination: diff --git a/splunk_eventgen/default/eventgen.conf b/splunk_eventgen/default/eventgen.conf index 421158fb..0b2b6107 100644 --- a/splunk_eventgen/default/eventgen.conf +++ b/splunk_eventgen/default/eventgen.conf @@ -1,4 +1,3 @@ -# Copyright (C) 2005-2015 Splunk Inc. All Rights Reserved. # DO NOT EDIT THIS FILE! # Please make all changes to files in $SPLUNK_HOME/etc/apps/SA-Eventgen/local. # To make changes, copy the section/stanza you want to change from $SPLUNK_HOME/etc/apps/SA-Eventgen/default diff --git a/splunk_eventgen/eventgen_api_server/eventgen_controller.py b/splunk_eventgen/eventgen_api_server/eventgen_controller.py index 1b2ae3cd..25f930d1 100644 --- a/splunk_eventgen/eventgen_api_server/eventgen_controller.py +++ b/splunk_eventgen/eventgen_api_server/eventgen_controller.py @@ -6,14 +6,14 @@ import time import threading -from eventgen_controller_api import EventgenControllerAPI -from redis_connector import RedisConnector +from splunk_eventgen.eventgen_api_server.eventgen_controller_api import EventgenControllerAPI +from splunk_eventgen.eventgen_api_server.redis_connector import RedisConnector FILE_PATH = os.path.dirname(os.path.realpath(__file__)) LOG_PATH = os.path.join(FILE_PATH, '..', 'logs') -class EventgenController(): +class EventgenController: def __init__(self, *args, **kwargs): self.env_vars = kwargs.get('env_vars') @@ -25,7 +25,7 @@ def __init__(self, *args, **kwargs): self._setup_loggers() self.connections_healthcheck() - self.logger = logging.getLogger('eventgen_server') + self.logger = logging.getLogger('eventgen_controller') self.logger.info('Initialized Eventgen Controller: hostname [{}]'.format(self.host)) self.app = self._create_app() diff --git a/splunk_eventgen/eventgen_api_server/eventgen_controller_api.py b/splunk_eventgen/eventgen_api_server/eventgen_controller_api.py index 4988d3ad..4b060df1 100644 --- a/splunk_eventgen/eventgen_api_server/eventgen_controller_api.py +++ b/splunk_eventgen/eventgen_api_server/eventgen_controller_api.py @@ -1,25 +1,19 @@ -import atexit from flask import Blueprint, Response, request -import os -import socket import time import json -import requests import logging import uuid INTERNAL_ERROR_RESPONSE = json.dumps({"message": "Internal Error Occurred"}) -class EventgenControllerAPI(): +class EventgenControllerAPI: def __init__(self, redis_connector, host): self.bp = self.__create_blueprint() self.redis_connector = redis_connector self.host = host - self.logger = logging.getLogger("eventgen_controller") self.logger.info("Initialized the EventgenControllerAPI Blueprint") - self.interval = 0.001 self.server_responses = {} @@ -47,13 +41,13 @@ def gather_response(target_job, message_uuid, response_number_target=0): else: countdown = 5 for i in range(0, int(countdown / self.interval)): - response_num = len(self.server_responses.get(message_uuid, {}).keys()) + response_num = len(list(self.server_responses.get(message_uuid, {}).keys())) if response_num == response_number_target: break else: time.sleep(self.interval) message = self.redis_connector.pubsub.get_message() - if message and type(message.get('data')) == str: + if message and type(message.get('data')) == bytes: server_response = json.loads(message.get('data')) self.logger.info(server_response) response_message_uuid = server_response.get('message_uuid') diff --git a/splunk_eventgen/eventgen_api_server/eventgen_core_object.py b/splunk_eventgen/eventgen_api_server/eventgen_core_object.py index 5e329481..87c89a26 100644 --- a/splunk_eventgen/eventgen_api_server/eventgen_core_object.py +++ b/splunk_eventgen/eventgen_api_server/eventgen_core_object.py @@ -7,7 +7,8 @@ FILE_PATH = os.path.dirname(os.path.realpath(__file__)) CUSTOM_CONFIG_PATH = os.path.realpath(os.path.join(FILE_PATH, "..", "default", "eventgen_wsgi.conf")) -class EventgenCoreObject(): + +class EventgenCoreObject: def __init__(self): self.logger = logging.getLogger('eventgen_server') self.eventgen_core_object = eventgen_core.EventGenerator(self._create_args()) diff --git a/splunk_eventgen/eventgen_api_server/eventgen_server.py b/splunk_eventgen/eventgen_api_server/eventgen_server.py index de3f6663..f95493c8 100644 --- a/splunk_eventgen/eventgen_api_server/eventgen_server.py +++ b/splunk_eventgen/eventgen_api_server/eventgen_server.py @@ -3,11 +3,11 @@ import socket import logging -from eventgen_server_api import EventgenServerAPI -import eventgen_core_object +from splunk_eventgen.eventgen_api_server.eventgen_server_api import EventgenServerAPI +from splunk_eventgen.eventgen_api_server import eventgen_core_object -class EventgenServer(): +class EventgenServer: def __init__(self, *args, **kwargs): self.eventgen = eventgen_core_object.EventgenCoreObject() self.mode = kwargs.get('mode', 'standalone') @@ -19,7 +19,7 @@ def __init__(self, *args, **kwargs): self.logger.info('Initialized Eventgen Server: hostname [{}]'.format(self.host)) if self.mode != 'standalone': - from redis_connector import RedisConnector + from splunk_eventgen.eventgen_api_server.redis_connector import RedisConnector self.redis_connector = RedisConnector(host=self.env_vars.get('REDIS_HOST'), port=self.env_vars.get('REDIS_PORT')) self.redis_connector.register_myself(hostname=self.host, role=self.role) self.app = self._create_app() diff --git a/splunk_eventgen/eventgen_api_server/eventgen_server_api.py b/splunk_eventgen/eventgen_api_server/eventgen_server_api.py index c9f925ea..d9133d45 100644 --- a/splunk_eventgen/eventgen_api_server/eventgen_server_api.py +++ b/splunk_eventgen/eventgen_api_server/eventgen_server_api.py @@ -2,10 +2,9 @@ from flask import Response, request import socket import json -import ConfigParser +import configparser import os import time -import requests import zipfile import tarfile import glob @@ -13,10 +12,9 @@ import collections import logging import requests -from requests.packages.urllib3.util.retry import Retry import threading -import eventgen_core_object +from splunk_eventgen.eventgen_api_server import eventgen_core_object INTERNAL_ERROR_RESPONSE = json.dumps({"message": "Internal Error Occurred"}) @@ -24,11 +22,11 @@ DEFAULT_PATH = os.path.realpath(os.path.join(FILE_PATH, "..", "default")) SAMPLE_DIR_PATH = os.path.realpath(os.path.join(FILE_PATH, "..", "serverSamples")) -class EventgenServerAPI(): + +class EventgenServerAPI: def __init__(self, eventgen, redis_connector, host, mode='standalone'): self.bp = self._create_blueprint() self.eventgen = eventgen - self.logger = logging.getLogger('eventgen_server') self.logger.info("Initialized the EventgenServerAPI Blueprint") @@ -48,7 +46,7 @@ def _channel_listener(self): def start_listening(self): while True: message = self.redis_connector.pubsub.get_message() - if message and type(message.get('data')) == str: + if message and type(message.get('data')) == bytes: data = json.loads(message.get('data')) self.logger.info("Message Recieved {}".format(message['data'])) if data['target'] == 'all' or data['target'] == self.host: @@ -107,7 +105,6 @@ def _delegate_jobs(self, job, request_method, body, message_uuid): message = self.format_message('healthcheck', request_method, response=response, message_uuid=message_uuid) self.redis_connector.message_connection.publish(self.redis_connector.controller_channel, message) - def _create_blueprint(self): bp = flask.Blueprint('server_api', __name__) @@ -244,7 +241,7 @@ def get_index(self): def get_conf(self): response = collections.OrderedDict() if self.eventgen.configured: - config = ConfigParser.ConfigParser() + config = configparser.RawConfigParser() config.optionxform = str config_path = self.eventgen.configfile if os.path.isfile(config_path): @@ -256,16 +253,16 @@ def get_conf(self): return response def set_conf(self, request_body): - config = ConfigParser.ConfigParser({}, collections.OrderedDict) + config = configparser.RawConfigParser({}, collections.OrderedDict) config.optionxform = str - for sample in request_body.iteritems(): + for sample in request_body.items(): config.add_section(sample[0]) - for pair in sample[1].iteritems(): + for pair in sample[1].items(): value = pair[1] if type(value) == dict: value = json.dumps(value) - config.set(sample[0], pair[0], value) + config.set(sample[0], pair[0], str(value)) with open(eventgen_core_object.CUSTOM_CONFIG_PATH, 'w+') as conf_content: config.write(conf_content) @@ -275,12 +272,12 @@ def set_conf(self, request_body): def edit_conf(self, request_body): conf_dict = self.get_conf() - for stanza, kv_pairs in request_body.iteritems(): - for key, value in kv_pairs.iteritems(): - if stanza not in conf_dict.keys(): + for stanza, kv_pairs in request_body.items(): + for key, value in kv_pairs.items(): + if stanza not in conf_dict: conf_dict[stanza] = {} if stanza == "global" and key == "index": - for stanza, kv_pairs in conf_dict.iteritems(): + for stanza, kv_pairs in conf_dict.items(): conf_dict[stanza]["index"] = value conf_dict[stanza][key] = value @@ -357,8 +354,8 @@ def get_volume(self): config = self.get_conf() total_volume = 0.0 volume_distribution = {} - for stanza in config.keys(): - if isinstance(config[stanza], dict) and "perDayVolume" in config[stanza].keys(): + for stanza in list(config.keys()): + if isinstance(config[stanza], dict) and "perDayVolume" in list(config[stanza].keys()): total_volume += float(config[stanza]["perDayVolume"]) volume_distribution[stanza] = float(config[stanza]["perDayVolume"]) @@ -372,19 +369,19 @@ def set_volume(self, target_volume): conf_dict = self.get_conf() if self.get_volume()['perDayVolume'] != 0: ratio = float(target_volume) / float(self.total_volume) - for stanza, kv_pair in conf_dict.iteritems(): + for stanza, kv_pair in conf_dict.items(): if isinstance(kv_pair, dict): - if '.*' not in stanza and "perDayVolume" in kv_pair.keys(): + if '.*' not in stanza and "perDayVolume" in list(kv_pair.keys()): conf_dict[stanza]["perDayVolume"] = round(float(conf_dict[stanza]["perDayVolume"]) * ratio, 2) else: # If there is no total_volume existing, divide the volume equally into stanzas - stanza_num = len(conf_dict.keys()) - if '.*' in conf_dict.keys(): + stanza_num = len(list(conf_dict.keys())) + if '.*' in conf_dict: stanza_num -= 1 - if 'global' in conf_dict.keys(): + if 'global' in conf_dict: stanza_num -= 1 divided_volume = float(target_volume) / stanza_num - for stanza, kv_pair in conf_dict.iteritems(): + for stanza, kv_pair in conf_dict.items(): if isinstance(kv_pair, dict) and stanza != 'global' and '.*' not in stanza: conf_dict[stanza]["perDayVolume"] = divided_volume @@ -460,14 +457,13 @@ def set_bundle(self, url): if os.path.isfile(os.path.join(bundle_dir, "default", "eventgen.conf")): self.eventgen.configured = False - config = ConfigParser.ConfigParser() + config = configparser.RawConfigParser() config.optionxform = str config.read(os.path.join(bundle_dir, "default", "eventgen.conf")) config_dict = {s: collections.OrderedDict(config.items(s)) for s in config.sections()} self.set_conf(config_dict) self.eventgen.configured = True self.logger.info("Configured Eventgen with the downloaded bundle.") - def download_bundle(self, url): bundle_path = os.path.join(DEFAULT_PATH, "eg-bundle.tgz") @@ -517,17 +513,17 @@ def unarchive_bundle(self, path): def clean_bundle_conf(self): conf_dict = self.get_conf() - if ".*" not in conf_dict.keys(): + if ".*" not in conf_dict: conf_dict['.*'] = {} # 1. Remove sampleDir from individual stanza and set a global sampleDir # 2. Change token sample path to a local sample path - for stanza, kv_pair in conf_dict.iteritems(): + for stanza, kv_pair in conf_dict.items(): if stanza != ".*": if 'sampleDir' in kv_pair: del kv_pair['sampleDir'] - for key, value in kv_pair.iteritems(): + for key, value in kv_pair.items(): if 'replacementType' in key and value in ['file', 'mvfile', 'seqfile']: token_num = key[key.find('.')+1:key.rfind('.')] if not token_num: continue @@ -541,9 +537,9 @@ def clean_bundle_conf(self): def setup_http(self, data): if data.get("servers"): conf_dict = self.get_conf() - if 'global' not in conf_dict.keys(): + if 'global' not in conf_dict: conf_dict['global'] = {} - for stanza, kv_pair in conf_dict.iteritems(): + for stanza, kv_pair in conf_dict.items(): if 'outputMode' in kv_pair: del kv_pair['outputMode'] if 'httpeventServers' in kv_pair: @@ -611,9 +607,9 @@ def create_new_hec_key(hostname): break conf_dict = self.get_conf() - if 'global' not in conf_dict.keys(): + if 'global' not in conf_dict: conf_dict['global'] = {} - for stanza, kv_pair in conf_dict.iteritems(): + for stanza, kv_pair in conf_dict.items(): if 'outputMode' in kv_pair: del kv_pair['outputMode'] if 'httpeventServers' in kv_pair: diff --git a/splunk_eventgen/eventgen_api_server/redis_connector.py b/splunk_eventgen/eventgen_api_server/redis_connector.py index 660add01..8ca3f86f 100644 --- a/splunk_eventgen/eventgen_api_server/redis_connector.py +++ b/splunk_eventgen/eventgen_api_server/redis_connector.py @@ -2,8 +2,8 @@ import logging import time -class RedisConnector(): +class RedisConnector: def __init__(self, host, port): self.host = host self.port = port diff --git a/splunk_eventgen/eventgen_core.py b/splunk_eventgen/eventgen_core.py index acc37176..32d3763f 100644 --- a/splunk_eventgen/eventgen_core.py +++ b/splunk_eventgen/eventgen_core.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 # encoding: utf-8 import imp import logging @@ -6,29 +6,16 @@ import os import sys import time +from queue import Empty, Queue import signal -from Queue import Empty, Queue from threading import Thread import multiprocessing -from lib.eventgenconfig import Config -from lib.eventgenexceptions import PluginNotLoaded -from lib.eventgentimer import Timer -from lib.outputcounter import OutputCounter -from lib.logging_config import logger - -lib_path_prepend = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'lib') -sys.path.insert(0, lib_path_prepend) -# Since i'm including a new library but external sources may not have access to pip (like splunk embeded), I need to -# be able to load this library directly from src if it's not installed. -try: - import logutils - import logutils.handlers -except ImportError: - path_prepend = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'lib', 'logutils_src') - sys.path.append(path_prepend) - import logutils - import logutils.queue +from splunk_eventgen.lib.eventgenconfig import Config +from splunk_eventgen.lib.eventgenexceptions import PluginNotLoaded +from splunk_eventgen.lib.eventgentimer import Timer +from splunk_eventgen.lib.outputcounter import OutputCounter +from splunk_eventgen.lib.logging_config import logger FILE_PATH = os.path.dirname(os.path.realpath(__file__)) EVENTGEN_DIR = os.path.realpath(os.path.join(FILE_PATH, "..")) @@ -210,8 +197,8 @@ def _create_generator_pool(self, workercount=20): else: # TODO crash caused by logging Thread https://github.com/splunk/eventgen/issues/217 self.loggingQueue = self.manager.Queue() - self.logging_pool = Thread(target=self.logger_thread, args=(self.loggingQueue, ), name="LoggerThread") - self.logging_pool.start() + self.logging_thread = Thread(target=self.logger_thread, args=(self.loggingQueue, ), name="LoggerThread") + self.logging_thread.start() # since we're now in multiprocess, we need to use better queues. self.workerQueue = multiprocessing.JoinableQueue(maxsize=self._generator_queue_size) self.genconfig = self.manager.dict() @@ -306,7 +293,7 @@ def _proc_worker_do_work(work_queue, logging_queue, config, disable_logging): root.setLevel(logging.DEBUG) if logging_queue is not None: # TODO https://github.com/splunk/eventgen/issues/217 - qh = logutils.queue.QueueHandler(logging_queue) + qh = logging.handlers.QueueHandler(logging_queue) root.addHandler(qh) else: if disable_logging: @@ -336,7 +323,6 @@ def logger_thread(self, loggingQueue): while not self.stopping: try: record = loggingQueue.get(timeout=10) - logger = logging.getLogger(record.name) logger.handle(record) loggingQueue.task_done() except Empty: @@ -382,11 +368,16 @@ def _initializePlugins(self, dirname, plugins, plugintype, name=None): try: # Import the module # module = imp.load_source(base, filename) + mod_name, mod_path, mod_desc = imp.find_module(base, [dirname]) # TODO: Probably need to adjust module.load() to be added later so this can be pickled. module = imp.load_module(base, mod_name, mod_path, mod_desc) plugin = module.load() + # spec = importlib.util.spec_from_file_location(base, filename) + # plugin = importlib.util.module_from_spec(spec) + # spec.loader.exec_module(plugin) + # set plugin to something like output.file or generator.default pluginname = plugintype + '.' + base plugins[pluginname] = plugin @@ -415,7 +406,7 @@ def _initializePlugins(self, dirname, plugins, plugintype, name=None): except ValueError: self.logger.error("Error loading plugin '%s' of type '%s'" % (base, plugintype)) except ImportError as ie: - self.logger.warning("Could not load plugin: %s, skipping" % mod_name.name) + self.logger.warning("Could not load plugin: %s, skipping" % base) self.logger.exception(ie) except Exception as e: self.logger.exception(str(e)) diff --git a/splunk_eventgen/identitygen.py b/splunk_eventgen/identitygen.py index f95a1047..4d5fb6ad 100644 --- a/splunk_eventgen/identitygen.py +++ b/splunk_eventgen/identitygen.py @@ -24,18 +24,18 @@ def __init__(self): self.last = [i.split()[0] for i in open("%s/samples/dist.all.last" % BASE_PATH, "rb").readlines()] except IOError: self.last = [ - (''.join(random.choice(ascii_uppercase) for i in xrange(random.randint(4, 12)))) for i in xrange(100)] + (''.join(random.choice(ascii_uppercase) for i in range(random.randint(4, 12)))) for i in range(100)] try: self.female_first = [ i.split()[0] for i in open("%s/samples/dist.female.first" % BASE_PATH, "rb").readlines()] except IOError: self.female_first = [ - (''.join(random.choice(ascii_uppercase) for i in xrange(random.randint(4, 12)))) for i in xrange(100)] + (''.join(random.choice(ascii_uppercase) for i in range(random.randint(4, 12)))) for i in range(100)] try: self.male_first = [i.split()[0] for i in open("%s/samples/dist.male.first" % BASE_PATH, "rb").readlines()] except IOError: self.male_first = [ - (''.join(random.choice(ascii_uppercase) for i in xrange(random.randint(4, 12)))) for i in xrange(100)] + (''.join(random.choice(ascii_uppercase) for i in range(random.randint(4, 12)))) for i in range(100)] def generate(self, count): self.identities = [] @@ -44,7 +44,7 @@ def generate(self, count): len_last = len(self.last) len_male_first = len(self.male_first) len_female_first = len(self.female_first) - for i in xrange(count): + for i in range(count): gender = random.choice(["m", "f"]) last_name = self.last[int(random.triangular(0, len_last, 0))] if gender == "m": @@ -92,8 +92,7 @@ def setEmail(self, new_email): else: raise ValueError - def getFile(self, count=0, filename="../default", fields=["username", "first_name", "last_name"], fieldnames=[ - "username", "first_name", "last_name"]): + def getFile(self, count=0, filename="../default", fields=["username", "first_name", "last_name"], fieldnames=["username", "first_name", "last_name"]): """ Returns a rest endpoint to download a csv file """ @@ -113,7 +112,7 @@ def getFile(self, count=0, filename="../default", fields=["username", "first_nam with open(filename, "wb") as lookupFile: file = csv.writer(lookupFile) file.writerow(fieldnames) - for i in xrange(min(count + 1, len(self.identities))): # + 1 to account for the header + for i in range(min(count + 1, len(self.identities))): # + 1 to account for the header row = [] identity = self.identities[i] for field in fields: diff --git a/splunk_eventgen/lib/.gitignore b/splunk_eventgen/lib/.gitignore deleted file mode 100644 index 57f4e4fc..00000000 --- a/splunk_eventgen/lib/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -*.pyc -plugins/generator/cweblog.py \ No newline at end of file diff --git a/splunk_eventgen/lib/__init__.py b/splunk_eventgen/lib/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/splunk_eventgen/lib/concurrent/__init__.py b/splunk_eventgen/lib/concurrent/__init__.py deleted file mode 100644 index b36383a6..00000000 --- a/splunk_eventgen/lib/concurrent/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -from pkgutil import extend_path - -__path__ = extend_path(__path__, __name__) diff --git a/splunk_eventgen/lib/concurrent/futures/__init__.py b/splunk_eventgen/lib/concurrent/futures/__init__.py deleted file mode 100644 index 428b14bd..00000000 --- a/splunk_eventgen/lib/concurrent/futures/__init__.py +++ /dev/null @@ -1,23 +0,0 @@ -# Copyright 2009 Brian Quinlan. All Rights Reserved. -# Licensed to PSF under a Contributor Agreement. - -"""Execute computations asynchronously using threads or processes.""" - -__author__ = 'Brian Quinlan (brian@sweetapp.com)' - -from concurrent.futures._base import (FIRST_COMPLETED, - FIRST_EXCEPTION, - ALL_COMPLETED, - CancelledError, - TimeoutError, - Future, - Executor, - wait, - as_completed) -from concurrent.futures.thread import ThreadPoolExecutor - -try: - from concurrent.futures.process import ProcessPoolExecutor -except ImportError: - # some platforms don't have multiprocessing - pass diff --git a/splunk_eventgen/lib/concurrent/futures/_base.py b/splunk_eventgen/lib/concurrent/futures/_base.py deleted file mode 100644 index 2936c46b..00000000 --- a/splunk_eventgen/lib/concurrent/futures/_base.py +++ /dev/null @@ -1,607 +0,0 @@ -# Copyright 2009 Brian Quinlan. All Rights Reserved. -# Licensed to PSF under a Contributor Agreement. - -import collections -import logging -import threading -import itertools -import time - -__author__ = 'Brian Quinlan (brian@sweetapp.com)' - -FIRST_COMPLETED = 'FIRST_COMPLETED' -FIRST_EXCEPTION = 'FIRST_EXCEPTION' -ALL_COMPLETED = 'ALL_COMPLETED' -_AS_COMPLETED = '_AS_COMPLETED' - -# Possible future states (for internal use by the futures package). -PENDING = 'PENDING' -RUNNING = 'RUNNING' -# The future was cancelled by the user... -CANCELLED = 'CANCELLED' -# ...and _Waiter.add_cancelled() was called by a worker. -CANCELLED_AND_NOTIFIED = 'CANCELLED_AND_NOTIFIED' -FINISHED = 'FINISHED' - -_FUTURE_STATES = [ - PENDING, - RUNNING, - CANCELLED, - CANCELLED_AND_NOTIFIED, - FINISHED -] - -_STATE_TO_DESCRIPTION_MAP = { - PENDING: "pending", - RUNNING: "running", - CANCELLED: "cancelled", - CANCELLED_AND_NOTIFIED: "cancelled", - FINISHED: "finished" -} - -# Logger for internal use by the futures package. -LOGGER = logging.getLogger("concurrent.futures") - -class Error(Exception): - """Base class for all future-related exceptions.""" - pass - -class CancelledError(Error): - """The Future was cancelled.""" - pass - -class TimeoutError(Error): - """The operation exceeded the given deadline.""" - pass - -class _Waiter(object): - """Provides the event that wait() and as_completed() block on.""" - def __init__(self): - self.event = threading.Event() - self.finished_futures = [] - - def add_result(self, future): - self.finished_futures.append(future) - - def add_exception(self, future): - self.finished_futures.append(future) - - def add_cancelled(self, future): - self.finished_futures.append(future) - -class _AsCompletedWaiter(_Waiter): - """Used by as_completed().""" - - def __init__(self): - super(_AsCompletedWaiter, self).__init__() - self.lock = threading.Lock() - - def add_result(self, future): - with self.lock: - super(_AsCompletedWaiter, self).add_result(future) - self.event.set() - - def add_exception(self, future): - with self.lock: - super(_AsCompletedWaiter, self).add_exception(future) - self.event.set() - - def add_cancelled(self, future): - with self.lock: - super(_AsCompletedWaiter, self).add_cancelled(future) - self.event.set() - -class _FirstCompletedWaiter(_Waiter): - """Used by wait(return_when=FIRST_COMPLETED).""" - - def add_result(self, future): - super(_FirstCompletedWaiter, self).add_result(future) - self.event.set() - - def add_exception(self, future): - super(_FirstCompletedWaiter, self).add_exception(future) - self.event.set() - - def add_cancelled(self, future): - super(_FirstCompletedWaiter, self).add_cancelled(future) - self.event.set() - -class _AllCompletedWaiter(_Waiter): - """Used by wait(return_when=FIRST_EXCEPTION and ALL_COMPLETED).""" - - def __init__(self, num_pending_calls, stop_on_exception): - self.num_pending_calls = num_pending_calls - self.stop_on_exception = stop_on_exception - self.lock = threading.Lock() - super(_AllCompletedWaiter, self).__init__() - - def _decrement_pending_calls(self): - with self.lock: - self.num_pending_calls -= 1 - if not self.num_pending_calls: - self.event.set() - - def add_result(self, future): - super(_AllCompletedWaiter, self).add_result(future) - self._decrement_pending_calls() - - def add_exception(self, future): - super(_AllCompletedWaiter, self).add_exception(future) - if self.stop_on_exception: - self.event.set() - else: - self._decrement_pending_calls() - - def add_cancelled(self, future): - super(_AllCompletedWaiter, self).add_cancelled(future) - self._decrement_pending_calls() - -class _AcquireFutures(object): - """A context manager that does an ordered acquire of Future conditions.""" - - def __init__(self, futures): - self.futures = sorted(futures, key=id) - - def __enter__(self): - for future in self.futures: - future._condition.acquire() - - def __exit__(self, *args): - for future in self.futures: - future._condition.release() - -def _create_and_install_waiters(fs, return_when): - if return_when == _AS_COMPLETED: - waiter = _AsCompletedWaiter() - elif return_when == FIRST_COMPLETED: - waiter = _FirstCompletedWaiter() - else: - pending_count = sum( - f._state not in [CANCELLED_AND_NOTIFIED, FINISHED] for f in fs) - - if return_when == FIRST_EXCEPTION: - waiter = _AllCompletedWaiter(pending_count, stop_on_exception=True) - elif return_when == ALL_COMPLETED: - waiter = _AllCompletedWaiter(pending_count, stop_on_exception=False) - else: - raise ValueError("Invalid return condition: %r" % return_when) - - for f in fs: - f._waiters.append(waiter) - - return waiter - -def as_completed(fs, timeout=None): - """An iterator over the given futures that yields each as it completes. - - Args: - fs: The sequence of Futures (possibly created by different Executors) to - iterate over. - timeout: The maximum number of seconds to wait. If None, then there - is no limit on the wait time. - - Returns: - An iterator that yields the given Futures as they complete (finished or - cancelled). If any given Futures are duplicated, they will be returned - once. - - Raises: - TimeoutError: If the entire result iterator could not be generated - before the given timeout. - """ - if timeout is not None: - end_time = timeout + time.time() - - fs = set(fs) - with _AcquireFutures(fs): - finished = set( - f for f in fs - if f._state in [CANCELLED_AND_NOTIFIED, FINISHED]) - pending = fs - finished - waiter = _create_and_install_waiters(fs, _AS_COMPLETED) - - try: - for future in finished: - yield future - - while pending: - if timeout is None: - wait_timeout = None - else: - wait_timeout = end_time - time.time() - if wait_timeout < 0: - raise TimeoutError( - '%d (of %d) futures unfinished' % ( - len(pending), len(fs))) - - waiter.event.wait(wait_timeout) - - with waiter.lock: - finished = waiter.finished_futures - waiter.finished_futures = [] - waiter.event.clear() - - for future in finished: - yield future - pending.remove(future) - - finally: - for f in fs: - with f._condition: - f._waiters.remove(waiter) - -DoneAndNotDoneFutures = collections.namedtuple( - 'DoneAndNotDoneFutures', 'done not_done') -def wait(fs, timeout=None, return_when=ALL_COMPLETED): - """Wait for the futures in the given sequence to complete. - - Args: - fs: The sequence of Futures (possibly created by different Executors) to - wait upon. - timeout: The maximum number of seconds to wait. If None, then there - is no limit on the wait time. - return_when: Indicates when this function should return. The options - are: - - FIRST_COMPLETED - Return when any future finishes or is - cancelled. - FIRST_EXCEPTION - Return when any future finishes by raising an - exception. If no future raises an exception - then it is equivalent to ALL_COMPLETED. - ALL_COMPLETED - Return when all futures finish or are cancelled. - - Returns: - A named 2-tuple of sets. The first set, named 'done', contains the - futures that completed (is finished or cancelled) before the wait - completed. The second set, named 'not_done', contains uncompleted - futures. - """ - with _AcquireFutures(fs): - done = set(f for f in fs - if f._state in [CANCELLED_AND_NOTIFIED, FINISHED]) - not_done = set(fs) - done - - if (return_when == FIRST_COMPLETED) and done: - return DoneAndNotDoneFutures(done, not_done) - elif (return_when == FIRST_EXCEPTION) and done: - if any(f for f in done - if not f.cancelled() and f.exception() is not None): - return DoneAndNotDoneFutures(done, not_done) - - if len(done) == len(fs): - return DoneAndNotDoneFutures(done, not_done) - - waiter = _create_and_install_waiters(fs, return_when) - - waiter.event.wait(timeout) - for f in fs: - with f._condition: - f._waiters.remove(waiter) - - done.update(waiter.finished_futures) - return DoneAndNotDoneFutures(done, set(fs) - done) - -class Future(object): - """Represents the result of an asynchronous computation.""" - - def __init__(self): - """Initializes the future. Should not be called by clients.""" - self._condition = threading.Condition() - self._state = PENDING - self._result = None - self._exception = None - self._traceback = None - self._waiters = [] - self._done_callbacks = [] - - def _invoke_callbacks(self): - for callback in self._done_callbacks: - try: - callback(self) - except Exception: - LOGGER.exception('exception calling callback for %r', self) - - def __repr__(self): - with self._condition: - if self._state == FINISHED: - if self._exception: - return '' % ( - hex(id(self)), - _STATE_TO_DESCRIPTION_MAP[self._state], - self._exception.__class__.__name__) - else: - return '' % ( - hex(id(self)), - _STATE_TO_DESCRIPTION_MAP[self._state], - self._result.__class__.__name__) - return '' % ( - hex(id(self)), - _STATE_TO_DESCRIPTION_MAP[self._state]) - - def cancel(self): - """Cancel the future if possible. - - Returns True if the future was cancelled, False otherwise. A future - cannot be cancelled if it is running or has already completed. - """ - with self._condition: - if self._state in [RUNNING, FINISHED]: - return False - - if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]: - return True - - self._state = CANCELLED - self._condition.notify_all() - - self._invoke_callbacks() - return True - - def cancelled(self): - """Return True if the future has cancelled.""" - with self._condition: - return self._state in [CANCELLED, CANCELLED_AND_NOTIFIED] - - def running(self): - """Return True if the future is currently executing.""" - with self._condition: - return self._state == RUNNING - - def done(self): - """Return True of the future was cancelled or finished executing.""" - with self._condition: - return self._state in [CANCELLED, CANCELLED_AND_NOTIFIED, FINISHED] - - def __get_result(self): - if self._exception: - raise type(self._exception), self._exception, self._traceback - else: - return self._result - - def add_done_callback(self, fn): - """Attaches a callable that will be called when the future finishes. - - Args: - fn: A callable that will be called with this future as its only - argument when the future completes or is cancelled. The callable - will always be called by a thread in the same process in which - it was added. If the future has already completed or been - cancelled then the callable will be called immediately. These - callables are called in the order that they were added. - """ - with self._condition: - if self._state not in [CANCELLED, CANCELLED_AND_NOTIFIED, FINISHED]: - self._done_callbacks.append(fn) - return - fn(self) - - def result(self, timeout=None): - """Return the result of the call that the future represents. - - Args: - timeout: The number of seconds to wait for the result if the future - isn't done. If None, then there is no limit on the wait time. - - Returns: - The result of the call that the future represents. - - Raises: - CancelledError: If the future was cancelled. - TimeoutError: If the future didn't finish executing before the given - timeout. - Exception: If the call raised then that exception will be raised. - """ - with self._condition: - if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]: - raise CancelledError() - elif self._state == FINISHED: - return self.__get_result() - - self._condition.wait(timeout) - - if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]: - raise CancelledError() - elif self._state == FINISHED: - return self.__get_result() - else: - raise TimeoutError() - - def exception_info(self, timeout=None): - """Return a tuple of (exception, traceback) raised by the call that the - future represents. - - Args: - timeout: The number of seconds to wait for the exception if the - future isn't done. If None, then there is no limit on the wait - time. - - Returns: - The exception raised by the call that the future represents or None - if the call completed without raising. - - Raises: - CancelledError: If the future was cancelled. - TimeoutError: If the future didn't finish executing before the given - timeout. - """ - with self._condition: - if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]: - raise CancelledError() - elif self._state == FINISHED: - return self._exception, self._traceback - - self._condition.wait(timeout) - - if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]: - raise CancelledError() - elif self._state == FINISHED: - return self._exception, self._traceback - else: - raise TimeoutError() - - def exception(self, timeout=None): - """Return the exception raised by the call that the future represents. - - Args: - timeout: The number of seconds to wait for the exception if the - future isn't done. If None, then there is no limit on the wait - time. - - Returns: - The exception raised by the call that the future represents or None - if the call completed without raising. - - Raises: - CancelledError: If the future was cancelled. - TimeoutError: If the future didn't finish executing before the given - timeout. - """ - return self.exception_info(timeout)[0] - - # The following methods should only be used by Executors and in tests. - def set_running_or_notify_cancel(self): - """Mark the future as running or process any cancel notifications. - - Should only be used by Executor implementations and unit tests. - - If the future has been cancelled (cancel() was called and returned - True) then any threads waiting on the future completing (though calls - to as_completed() or wait()) are notified and False is returned. - - If the future was not cancelled then it is put in the running state - (future calls to running() will return True) and True is returned. - - This method should be called by Executor implementations before - executing the work associated with this future. If this method returns - False then the work should not be executed. - - Returns: - False if the Future was cancelled, True otherwise. - - Raises: - RuntimeError: if this method was already called or if set_result() - or set_exception() was called. - """ - with self._condition: - if self._state == CANCELLED: - self._state = CANCELLED_AND_NOTIFIED - for waiter in self._waiters: - waiter.add_cancelled(self) - # self._condition.notify_all() is not necessary because - # self.cancel() triggers a notification. - return False - elif self._state == PENDING: - self._state = RUNNING - return True - else: - LOGGER.critical('Future %s in unexpected state: %s', - id(self), - self._state) - raise RuntimeError('Future in unexpected state') - - def set_result(self, result): - """Sets the return value of work associated with the future. - - Should only be used by Executor implementations and unit tests. - """ - with self._condition: - self._result = result - self._state = FINISHED - for waiter in self._waiters: - waiter.add_result(self) - self._condition.notify_all() - self._invoke_callbacks() - - def set_exception_info(self, exception, traceback): - """Sets the result of the future as being the given exception - and traceback. - - Should only be used by Executor implementations and unit tests. - """ - with self._condition: - self._exception = exception - self._traceback = traceback - self._state = FINISHED - for waiter in self._waiters: - waiter.add_exception(self) - self._condition.notify_all() - self._invoke_callbacks() - - def set_exception(self, exception): - """Sets the result of the future as being the given exception. - - Should only be used by Executor implementations and unit tests. - """ - self.set_exception_info(exception, None) - -class Executor(object): - """This is an abstract base class for concrete asynchronous executors.""" - - def submit(self, fn, *args, **kwargs): - """Submits a callable to be executed with the given arguments. - - Schedules the callable to be executed as fn(*args, **kwargs) and returns - a Future instance representing the execution of the callable. - - Returns: - A Future representing the given call. - """ - raise NotImplementedError() - - def map(self, fn, *iterables, **kwargs): - """Returns a iterator equivalent to map(fn, iter). - - Args: - fn: A callable that will take as many arguments as there are - passed iterables. - timeout: The maximum number of seconds to wait. If None, then there - is no limit on the wait time. - - Returns: - An iterator equivalent to: map(func, *iterables) but the calls may - be evaluated out-of-order. - - Raises: - TimeoutError: If the entire result iterator could not be generated - before the given timeout. - Exception: If fn(*args) raises for any values. - """ - timeout = kwargs.get('timeout') - if timeout is not None: - end_time = timeout + time.time() - - fs = [self.submit(fn, *args) for args in itertools.izip(*iterables)] - - # Yield must be hidden in closure so that the futures are submitted - # before the first iterator value is required. - def result_iterator(): - try: - for future in fs: - if timeout is None: - yield future.result() - else: - yield future.result(end_time - time.time()) - finally: - for future in fs: - future.cancel() - return result_iterator() - - def shutdown(self, wait=True): - """Clean-up the resources associated with the Executor. - - It is safe to call this method several times. Otherwise, no other - methods can be called after this one. - - Args: - wait: If True then shutdown will not return until all running - futures have finished executing and the resources used by the - executor have been reclaimed. - """ - pass - - def __enter__(self): - return self - - def __exit__(self, exc_type, exc_val, exc_tb): - self.shutdown(wait=True) - return False diff --git a/splunk_eventgen/lib/concurrent/futures/process.py b/splunk_eventgen/lib/concurrent/futures/process.py deleted file mode 100644 index 72528410..00000000 --- a/splunk_eventgen/lib/concurrent/futures/process.py +++ /dev/null @@ -1,359 +0,0 @@ -# Copyright 2009 Brian Quinlan. All Rights Reserved. -# Licensed to PSF under a Contributor Agreement. - -"""Implements ProcessPoolExecutor. - -The follow diagram and text describe the data-flow through the system: - -|======================= In-process =====================|== Out-of-process ==| - -+----------+ +----------+ +--------+ +-----------+ +---------+ -| | => | Work Ids | => | | => | Call Q | => | | -| | +----------+ | | +-----------+ | | -| | | ... | | | | ... | | | -| | | 6 | | | | 5, call() | | | -| | | 7 | | | | ... | | | -| Process | | ... | | Local | +-----------+ | Process | -| Pool | +----------+ | Worker | | #1..n | -| Executor | | Thread | | | -| | +----------- + | | +-----------+ | | -| | <=> | Work Items | <=> | | <= | Result Q | <= | | -| | +------------+ | | +-----------+ | | -| | | 6: call() | | | | ... | | | -| | | future | | | | 4, result | | | -| | | ... | | | | 3, except | | | -+----------+ +------------+ +--------+ +-----------+ +---------+ - -Executor.submit() called: -- creates a uniquely numbered _WorkItem and adds it to the "Work Items" dict -- adds the id of the _WorkItem to the "Work Ids" queue - -Local worker thread: -- reads work ids from the "Work Ids" queue and looks up the corresponding - WorkItem from the "Work Items" dict: if the work item has been cancelled then - it is simply removed from the dict, otherwise it is repackaged as a - _CallItem and put in the "Call Q". New _CallItems are put in the "Call Q" - until "Call Q" is full. NOTE: the size of the "Call Q" is kept small because - calls placed in the "Call Q" can no longer be cancelled with Future.cancel(). -- reads _ResultItems from "Result Q", updates the future stored in the - "Work Items" dict and deletes the dict entry - -Process #1..n: -- reads _CallItems from "Call Q", executes the calls, and puts the resulting - _ResultItems in "Request Q" -""" - -import atexit -from concurrent.futures import _base -import Queue as queue -import multiprocessing -import threading -import weakref -import sys - -__author__ = 'Brian Quinlan (brian@sweetapp.com)' - -# Workers are created as daemon threads and processes. This is done to allow the -# interpreter to exit when there are still idle processes in a -# ProcessPoolExecutor's process pool (i.e. shutdown() was not called). However, -# allowing workers to die with the interpreter has two undesirable properties: -# - The workers would still be running during interpretor shutdown, -# meaning that they would fail in unpredictable ways. -# - The workers could be killed while evaluating a work item, which could -# be bad if the callable being evaluated has external side-effects e.g. -# writing to a file. -# -# To work around this problem, an exit handler is installed which tells the -# workers to exit when their work queues are empty and then waits until the -# threads/processes finish. - -_threads_queues = weakref.WeakKeyDictionary() -_shutdown = False - -def _python_exit(): - global _shutdown - _shutdown = True - items = list(_threads_queues.items()) if _threads_queues else () - for t, q in items: - q.put(None) - for t, q in items: - t.join(sys.maxint) - -# Controls how many more calls than processes will be queued in the call queue. -# A smaller number will mean that processes spend more time idle waiting for -# work while a larger number will make Future.cancel() succeed less frequently -# (Futures in the call queue cannot be cancelled). -EXTRA_QUEUED_CALLS = 1 - -class _WorkItem(object): - def __init__(self, future, fn, args, kwargs): - self.future = future - self.fn = fn - self.args = args - self.kwargs = kwargs - -class _ResultItem(object): - def __init__(self, work_id, exception=None, result=None): - self.work_id = work_id - self.exception = exception - self.result = result - -class _CallItem(object): - def __init__(self, work_id, fn, args, kwargs): - self.work_id = work_id - self.fn = fn - self.args = args - self.kwargs = kwargs - -def _process_worker(call_queue, result_queue): - """Evaluates calls from call_queue and places the results in result_queue. - - This worker is run in a separate process. - - Args: - call_queue: A multiprocessing.Queue of _CallItems that will be read and - evaluated by the worker. - result_queue: A multiprocessing.Queue of _ResultItems that will written - to by the worker. - shutdown: A multiprocessing.Event that will be set as a signal to the - worker that it should exit when call_queue is empty. - """ - while True: - call_item = call_queue.get(block=True) - if call_item is None: - # Wake up queue management thread - result_queue.put(None) - return - try: - r = call_item.fn(*call_item.args, **call_item.kwargs) - except BaseException: - e = sys.exc_info()[1] - result_queue.put(_ResultItem(call_item.work_id, - exception=e)) - else: - result_queue.put(_ResultItem(call_item.work_id, - result=r)) - -def _add_call_item_to_queue(pending_work_items, - work_ids, - call_queue): - """Fills call_queue with _WorkItems from pending_work_items. - - This function never blocks. - - Args: - pending_work_items: A dict mapping work ids to _WorkItems e.g. - {5: <_WorkItem...>, 6: <_WorkItem...>, ...} - work_ids: A queue.Queue of work ids e.g. Queue([5, 6, ...]). Work ids - are consumed and the corresponding _WorkItems from - pending_work_items are transformed into _CallItems and put in - call_queue. - call_queue: A multiprocessing.Queue that will be filled with _CallItems - derived from _WorkItems. - """ - while True: - if call_queue.full(): - return - try: - work_id = work_ids.get(block=False) - except queue.Empty: - return - else: - work_item = pending_work_items[work_id] - - if work_item.future.set_running_or_notify_cancel(): - call_queue.put(_CallItem(work_id, - work_item.fn, - work_item.args, - work_item.kwargs), - block=True) - else: - del pending_work_items[work_id] - continue - -def _queue_management_worker(executor_reference, - processes, - pending_work_items, - work_ids_queue, - call_queue, - result_queue): - """Manages the communication between this process and the worker processes. - - This function is run in a local thread. - - Args: - executor_reference: A weakref.ref to the ProcessPoolExecutor that owns - this thread. Used to determine if the ProcessPoolExecutor has been - garbage collected and that this function can exit. - process: A list of the multiprocessing.Process instances used as - workers. - pending_work_items: A dict mapping work ids to _WorkItems e.g. - {5: <_WorkItem...>, 6: <_WorkItem...>, ...} - work_ids_queue: A queue.Queue of work ids e.g. Queue([5, 6, ...]). - call_queue: A multiprocessing.Queue that will be filled with _CallItems - derived from _WorkItems for processing by the process workers. - result_queue: A multiprocessing.Queue of _ResultItems generated by the - process workers. - """ - nb_shutdown_processes = [0] - def shutdown_one_process(): - """Tell a worker to terminate, which will in turn wake us again""" - call_queue.put(None) - nb_shutdown_processes[0] += 1 - while True: - _add_call_item_to_queue(pending_work_items, - work_ids_queue, - call_queue) - - result_item = result_queue.get(block=True) - if result_item is not None: - work_item = pending_work_items[result_item.work_id] - del pending_work_items[result_item.work_id] - - if result_item.exception: - work_item.future.set_exception(result_item.exception) - else: - work_item.future.set_result(result_item.result) - # Delete references to object. See issue16284 - del work_item - # Check whether we should start shutting down. - executor = executor_reference() - # No more work items can be added if: - # - The interpreter is shutting down OR - # - The executor that owns this worker has been collected OR - # - The executor that owns this worker has been shutdown. - if _shutdown or executor is None or executor._shutdown_thread: - # Since no new work items can be added, it is safe to shutdown - # this thread if there are no pending work items. - if not pending_work_items: - while nb_shutdown_processes[0] < len(processes): - shutdown_one_process() - # If .join() is not called on the created processes then - # some multiprocessing.Queue methods may deadlock on Mac OS - # X. - for p in processes: - p.join() - call_queue.close() - return - del executor - -_system_limits_checked = False -_system_limited = None -def _check_system_limits(): - global _system_limits_checked, _system_limited - if _system_limits_checked: - if _system_limited: - raise NotImplementedError(_system_limited) - _system_limits_checked = True - try: - import os - nsems_max = os.sysconf("SC_SEM_NSEMS_MAX") - except (AttributeError, ValueError): - # sysconf not available or setting not available - return - if nsems_max == -1: - # indetermine limit, assume that limit is determined - # by available memory only - return - if nsems_max >= 256: - # minimum number of semaphores available - # according to POSIX - return - _system_limited = "system provides too few semaphores (%d available, 256 necessary)" % nsems_max - raise NotImplementedError(_system_limited) - -class ProcessPoolExecutor(_base.Executor): - def __init__(self, max_workers=None): - """Initializes a new ProcessPoolExecutor instance. - - Args: - max_workers: The maximum number of processes that can be used to - execute the given calls. If None or not given then as many - worker processes will be created as the machine has processors. - """ - _check_system_limits() - - if max_workers is None: - self._max_workers = multiprocessing.cpu_count() - else: - self._max_workers = max_workers - - # Make the call queue slightly larger than the number of processes to - # prevent the worker processes from idling. But don't make it too big - # because futures in the call queue cannot be cancelled. - self._call_queue = multiprocessing.Queue(self._max_workers + - EXTRA_QUEUED_CALLS) - self._result_queue = multiprocessing.Queue() - self._work_ids = queue.Queue() - self._queue_management_thread = None - self._processes = set() - - # Shutdown is a two-step process. - self._shutdown_thread = False - self._shutdown_lock = threading.Lock() - self._queue_count = 0 - self._pending_work_items = {} - - def _start_queue_management_thread(self): - # When the executor gets lost, the weakref callback will wake up - # the queue management thread. - def weakref_cb(_, q=self._result_queue): - q.put(None) - if self._queue_management_thread is None: - self._queue_management_thread = threading.Thread( - target=_queue_management_worker, - args=(weakref.ref(self, weakref_cb), - self._processes, - self._pending_work_items, - self._work_ids, - self._call_queue, - self._result_queue)) - self._queue_management_thread.daemon = True - self._queue_management_thread.start() - _threads_queues[self._queue_management_thread] = self._result_queue - - def _adjust_process_count(self): - for _ in range(len(self._processes), self._max_workers): - p = multiprocessing.Process( - target=_process_worker, - args=(self._call_queue, - self._result_queue)) - p.start() - self._processes.add(p) - - def submit(self, fn, *args, **kwargs): - with self._shutdown_lock: - if self._shutdown_thread: - raise RuntimeError('cannot schedule new futures after shutdown') - - f = _base.Future() - w = _WorkItem(f, fn, args, kwargs) - - self._pending_work_items[self._queue_count] = w - self._work_ids.put(self._queue_count) - self._queue_count += 1 - # Wake up queue management thread - self._result_queue.put(None) - - self._start_queue_management_thread() - self._adjust_process_count() - return f - submit.__doc__ = _base.Executor.submit.__doc__ - - def shutdown(self, wait=True): - with self._shutdown_lock: - self._shutdown_thread = True - if self._queue_management_thread: - # Wake up queue management thread - self._result_queue.put(None) - if wait: - self._queue_management_thread.join(sys.maxint) - # To reduce the risk of openning too many files, remove references to - # objects that use file descriptors. - self._queue_management_thread = None - self._call_queue = None - self._result_queue = None - self._processes = None - shutdown.__doc__ = _base.Executor.shutdown.__doc__ - -atexit.register(_python_exit) diff --git a/splunk_eventgen/lib/concurrent/futures/thread.py b/splunk_eventgen/lib/concurrent/futures/thread.py deleted file mode 100644 index 85ab4b74..00000000 --- a/splunk_eventgen/lib/concurrent/futures/thread.py +++ /dev/null @@ -1,134 +0,0 @@ -# Copyright 2009 Brian Quinlan. All Rights Reserved. -# Licensed to PSF under a Contributor Agreement. - -"""Implements ThreadPoolExecutor.""" - -import atexit -from concurrent.futures import _base -import Queue as queue -import threading -import weakref -import sys - -__author__ = 'Brian Quinlan (brian@sweetapp.com)' - -# Workers are created as daemon threads. This is done to allow the interpreter -# to exit when there are still idle threads in a ThreadPoolExecutor's thread -# pool (i.e. shutdown() was not called). However, allowing workers to die with -# the interpreter has two undesirable properties: -# - The workers would still be running during interpretor shutdown, -# meaning that they would fail in unpredictable ways. -# - The workers could be killed while evaluating a work item, which could -# be bad if the callable being evaluated has external side-effects e.g. -# writing to a file. -# -# To work around this problem, an exit handler is installed which tells the -# workers to exit when their work queues are empty and then waits until the -# threads finish. - -_threads_queues = weakref.WeakKeyDictionary() -_shutdown = False - -def _python_exit(): - global _shutdown - _shutdown = True - items = list(_threads_queues.items()) if _threads_queues else () - for t, q in items: - q.put(None) - for t, q in items: - t.join(sys.maxint) - -atexit.register(_python_exit) - -class _WorkItem(object): - def __init__(self, future, fn, args, kwargs): - self.future = future - self.fn = fn - self.args = args - self.kwargs = kwargs - - def run(self): - if not self.future.set_running_or_notify_cancel(): - return - - try: - result = self.fn(*self.args, **self.kwargs) - except BaseException: - e, tb = sys.exc_info()[1:] - self.future.set_exception_info(e, tb) - else: - self.future.set_result(result) - -def _worker(executor_reference, work_queue): - try: - while True: - work_item = work_queue.get(block=True) - if work_item is not None: - work_item.run() - # Delete references to object. See issue16284 - del work_item - continue - executor = executor_reference() - # Exit if: - # - The interpreter is shutting down OR - # - The executor that owns the worker has been collected OR - # - The executor that owns the worker has been shutdown. - if _shutdown or executor is None or executor._shutdown: - # Notice other workers - work_queue.put(None) - return - del executor - except BaseException: - _base.LOGGER.critical('Exception in worker', exc_info=True) - -class ThreadPoolExecutor(_base.Executor): - def __init__(self, max_workers): - """Initializes a new ThreadPoolExecutor instance. - - Args: - max_workers: The maximum number of threads that can be used to - execute the given calls. - """ - self._max_workers = max_workers - self._work_queue = queue.Queue() - self._threads = set() - self._shutdown = False - self._shutdown_lock = threading.Lock() - - def submit(self, fn, *args, **kwargs): - with self._shutdown_lock: - if self._shutdown: - raise RuntimeError('cannot schedule new futures after shutdown') - - f = _base.Future() - w = _WorkItem(f, fn, args, kwargs) - - self._work_queue.put(w) - self._adjust_thread_count() - return f - submit.__doc__ = _base.Executor.submit.__doc__ - - def _adjust_thread_count(self): - # When the executor gets lost, the weakref callback will wake up - # the worker threads. - def weakref_cb(_, q=self._work_queue): - q.put(None) - # TODO(bquinlan): Should avoid creating new threads if there are more - # idle threads than items in the work queue. - if len(self._threads) < self._max_workers: - t = threading.Thread(target=_worker, - args=(weakref.ref(self, weakref_cb), - self._work_queue)) - t.daemon = True - t.start() - self._threads.add(t) - _threads_queues[t] = self._work_queue - - def shutdown(self, wait=True): - with self._shutdown_lock: - self._shutdown = True - self._work_queue.put(None) - if wait: - for t in self._threads: - t.join(sys.maxint) - shutdown.__doc__ = _base.Executor.shutdown.__doc__ diff --git a/splunk_eventgen/lib/eventgenconfig.py b/splunk_eventgen/lib/eventgenconfig.py index fe0d4eed..3acca7c5 100644 --- a/splunk_eventgen/lib/eventgenconfig.py +++ b/splunk_eventgen/lib/eventgenconfig.py @@ -1,5 +1,3 @@ -from __future__ import division - import datetime import json import logging.handlers @@ -8,13 +6,15 @@ import random import re import types -import urllib -from ConfigParser import ConfigParser +import urllib.request +import urllib.parse +import urllib.error +from configparser import RawConfigParser -from eventgenexceptions import FailedLoadingPlugin, PluginNotLoaded -from eventgensamples import Sample -from eventgentoken import Token -from logging_config import logger +from splunk_eventgen.lib.eventgenexceptions import FailedLoadingPlugin, PluginNotLoaded +from splunk_eventgen.lib.eventgensamples import Sample +from splunk_eventgen.lib.eventgentoken import Token +from splunk_eventgen.lib.logging_config import logger # 4/21/14 CS Adding a defined constant whether we're running in standalone mode or not # Standalone mode is when we know we're Splunk embedded but we want to force @@ -69,7 +69,7 @@ class Config(object): # the config files threading = None disabled = None - blacklist = ".*\.part" + blacklist = r".*\.part" __generatorworkers = [] __outputworkers = [] @@ -96,7 +96,7 @@ class Config(object): _validReplacementTypes = [ 'static', 'timestamp', 'replaytimestamp', 'random', 'rated', 'file', 'mvfile', 'seqfile', 'integerid'] validOutputModes = [] - _intSettings = ['interval', 'outputWorkers', 'generatorWorkers', 'maxIntervalsBeforeFlush', 'maxQueueLength'] + _intSettings = ['interval', 'outputWorkers', 'generatorWorkers', 'maxIntervalsBeforeFlush', 'maxQueueLength', "fileMaxBytes"] _floatSettings = ['randomizeCount', 'delay', 'timeMultiple'] _boolSettings = [ 'disabled', 'randomizeEvents', 'bundlelines', 'profiler', 'useOutputQueue', 'autotimestamp', @@ -129,7 +129,7 @@ def __init__(self, configfile=None, sample=None, override_outputter=False, overr self.override_backfill = override_backfill self.override_end = override_end self.verbosity = verbosity - if override_generators >= 0: + if override_generators is not None and override_generators >= 0: self.generatorWorkers = override_generators if override_outputqueue: self.useOutputQueue = False @@ -142,7 +142,7 @@ def __init__(self, configfile=None, sample=None, override_outputter=False, overr # 1/11/14 CS Adding a initial config parsing step (this does this twice now, oh well, just runs once # per execution) so that I can get config before calling parse() - c = ConfigParser() + c = RawConfigParser() c.optionxform = str c.read([os.path.join(self.grandparentdir, 'default', 'eventgen.conf')]) @@ -219,7 +219,7 @@ def getSplunkUrl(self, s): try: import splunk.auth splunkUrl = splunk.auth.splunk.getLocalServerInfo() - results = re.match('(http|https)://([^:/]+):(\d+).*', splunkUrl) + results = re.match(r'(http|https)://([^:/]+):(\d+).*', splunkUrl) splunkMethod = results.groups()[0] splunkHost = results.groups()[1] splunkPort = results.groups()[2] @@ -269,11 +269,11 @@ def parse(self): stanza_map = {} stanza_list = [] - for stanza in self._confDict.keys(): + for stanza in self._confDict: stanza_list.append(stanza) stanza_map[stanza] = [] - for stanza, settings in self._confDict.iteritems(): + for stanza, settings in self._confDict.items(): for stanza_item in stanza_list: if stanza != stanza_item and re.match(stanza, stanza_item): stanza_map[stanza_item].append(stanza) @@ -310,7 +310,7 @@ def parse(self): last_token_number = int(key[6]) # Apply global tokens to the current stanza - kv_pair_items = settings.items() + kv_pair_items = list(settings.items()) if stanza in stanza_map: for global_stanza in stanza_map[stanza]: i = 0 @@ -341,7 +341,7 @@ def parse(self): else: break - keys = settings.keys() + keys = list(settings.keys()) for k, v in self._confDict[global_stanza].items(): if 'token' not in k and k not in keys: kv_pair_items.append((k, v)) @@ -358,7 +358,7 @@ def parse(self): # Token indices could be out of order, so we must check to # see whether we have enough items in the list to update the token # In general this will keep growing the list by whatever length we need - if (key.find("host.") > -1): + if key.find("host.") > -1: # logger.info("hostToken.{} = {}".format(value[1],oldvalue)) if not isinstance(s.hostToken, Token): s.hostToken = Token(s) @@ -368,7 +368,7 @@ def parse(self): else: if len(s.tokens) <= value[0]: x = (value[0] + 1) - len(s.tokens) - s.tokens.extend([None for num in xrange(0, x)]) + s.tokens.extend([None for num in range(0, x)]) if not isinstance(s.tokens[value[0]], Token): s.tokens[value[0]] = Token(s) # logger.info("token[{}].{} = {}".format(value[0],value[1],oldvalue)) @@ -386,7 +386,7 @@ def parse(self): # because they come over multiple lines # Don't error out at this point, just log it and remove the token and move on deleteidx = [] - for i in xrange(0, len(s.tokens)): + for i in range(0, len(s.tokens)): t = s.tokens[i] # If the index doesn't exist at all if t is None: @@ -398,7 +398,7 @@ def parse(self): logger.error("Token at index %s invalid" % i) deleteidx.append(i) newtokens = [] - for i in xrange(0, len(s.tokens)): + for i in range(0, len(s.tokens)): if i not in deleteidx: newtokens.append(s.tokens[i]) s.tokens = newtokens @@ -510,7 +510,7 @@ def parse(self): for token in s.tokens: if token.replacementType == 'integerid': try: - stateFile = open(os.path.join(s.sampleDir, 'state.' + urllib.pathname2url(token.token)), 'rU') + stateFile = open(os.path.join(s.sampleDir, 'state.' + urllib.request.pathname2url(token.token)), 'r') token.replacement = stateFile.read() stateFile.close() # The file doesn't exist, use the default value in the config @@ -684,7 +684,7 @@ def parse(self): if '_time' in s.sampleDict[0]: logger.debug("Found _time field, checking if default timestamp exists") t = Token() - t.token = "\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\.\d{3}" + t.token = r"\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\.\d{3}" t.replacementType = "timestamp" t.replacement = "%Y-%m-%dT%H:%M:%S.%f" @@ -742,7 +742,7 @@ def _punct(self, string): string = string.replace("'", "\\'") string = string.replace(" ", "_") string = string.replace("\t", "t") - string = re.sub("[^,;\-#\$%&+./:=\?@\\\'|*\n\r\"(){}<>\[\]\^!]", "", string, flags=re.M) + string = re.sub(r"[^,;\-#\$%&+./:=\?@\\\'|*\n\r\"(){}<>\[\]\^!]", "", string, flags=re.M) return string def _validateSetting(self, stanza, key, value): @@ -751,7 +751,7 @@ def _validateSetting(self, stanza, key, value): If we've read a token, which is a complex config, returns a tuple of parsed values.""" logger.debug("Validating setting for '%s' with value '%s' in stanza '%s'" % (key, value, stanza)) if key.find('token.') > -1: - results = re.match('token\.(\d+)\.(\w+)', key) + results = re.match(r'token\.(\d+)\.(\w+)', key) if results is not None: groups = results.groups() if groups[1] not in self._validTokenTypes: @@ -765,15 +765,15 @@ def _validateSetting(self, stanza, key, value): (value, groups[0], stanza)) raise ValueError("Could not parse token index '%s' token type '%s' in stanza '%s'" % (groups[0], groups[1], stanza)) - return (int(groups[0]), groups[1]) + return int(groups[0]), groups[1] elif key.find('host.') > -1: - results = re.match('host\.(\w+)', key) + results = re.match(r'host\.(\w+)', key) if results is not None: groups = results.groups() if groups[0] not in self._validHostTokens: logger.error("Could not parse host token type '%s' in stanza '%s'" % (groups[0], stanza)) raise ValueError("Could not parse host token type '%s' in stanza '%s'" % (groups[0], stanza)) - return (groups[0], value) + return groups[0], value elif key in self._validSettings: if key in self._intSettings: try: @@ -868,7 +868,7 @@ def _buildConfDict(self): else: logger.info('Retrieving eventgen configurations with ConfigParser()') # We assume we're in a bin directory and that there are default and local directories - conf = ConfigParser() + conf = RawConfigParser() # Make case sensitive conf.optionxform = str conffiles = [] diff --git a/splunk_eventgen/lib/eventgenoutput.py b/splunk_eventgen/lib/eventgenoutput.py index 502d2968..1290e38b 100644 --- a/splunk_eventgen/lib/eventgenoutput.py +++ b/splunk_eventgen/lib/eventgenoutput.py @@ -1,11 +1,7 @@ -from __future__ import division - import datetime -import logging -import logging.handlers import time -from Queue import Full -from logging_config import logger, metrics_logger +from queue import Full +from splunk_eventgen.lib.logging_config import logger, metrics_logger # TODO: Figure out why we load plugins from here instead of the base plugin class. diff --git a/splunk_eventgen/lib/eventgensamples.py b/splunk_eventgen/lib/eventgensamples.py index 9c687d91..3238149b 100644 --- a/splunk_eventgen/lib/eventgensamples.py +++ b/splunk_eventgen/lib/eventgensamples.py @@ -1,17 +1,16 @@ # TODO Move config settings to plugins - -from __future__ import division, with_statement - import csv import datetime import os import pprint import re import sys -import urllib +import urllib.request +import urllib.parse +import urllib.error -from timeparser import timeParser -from logging_config import logger +from splunk_eventgen.lib.timeparser import timeParser +from splunk_eventgen.lib.logging_config import logger class Sample(object): @@ -214,7 +213,7 @@ def saveState(self): """Saves state of all integer IDs of this sample to a file so when we restart we'll pick them up""" for token in self.tokens: if token.replacementType == 'integerid': - stateFile = open(os.path.join(self.sampleDir, 'state.' + urllib.pathname2url(token.token)), 'w') + stateFile = open(os.path.join(self.sampleDir, 'state.' + urllib.request.pathname2url(token.token)), 'w') stateFile.write(token.replacement) stateFile.close() @@ -302,14 +301,6 @@ def latestTime(self): def utcnow(self): return self.now(utcnow=True) - def _openSampleFile(self): - logger.debug("Opening sample '%s' in app '%s'" % (self.name, self.app)) - self._sampleFH = open(self.filePath, 'rU') - - def _closeSampleFile(self): - logger.debug("Closing sample '%s' in app '%s'" % (self.name, self.app)) - self._sampleFH.close() - def loadSample(self): """ Load sample from disk into self._sample.sampleLines and self._sample.sampleDict, using cached copy if possible @@ -317,46 +308,45 @@ def loadSample(self): if self.sampletype == 'raw': # 5/27/12 CS Added caching of the sample file if self.sampleDict is None: - self._openSampleFile() - if self.breaker == self.config.breaker: - logger.debug("Reading raw sample '%s' in app '%s'" % (self.name, self.app)) - self.sampleLines = self._sampleFH.readlines() - # 1/5/14 CS Moving to using only sampleDict and doing the breaking up into events at load time instead - # of on every generation - else: - logger.debug("Non-default breaker '%s' detected for sample '%s' in app '%s'" % - (self.breaker, self.name, self.app)) - - sampleData = self._sampleFH.read() - self.sampleLines = [] - - logger.debug("Filling array for sample '%s' in app '%s'; sampleData=%s, breaker=%s" % - (self.name, self.app, len(sampleData), self.breaker)) - - try: - breakerRE = re.compile(self.breaker, re.M) - except: - logger.error( - "Line breaker '%s' for sample '%s' in app '%s' could not be compiled; using default breaker" - % (self.breaker, self.name, self.app)) - self.breaker = self.config.breaker - - # Loop through data, finding matches of the regular expression and breaking them up into - # "lines". Each match includes the breaker itself. - extractpos = 0 - searchpos = 0 - breakerMatch = breakerRE.search(sampleData, searchpos) - while breakerMatch: - logger.debug("Breaker found at: %d, %d" % (breakerMatch.span()[0], breakerMatch.span()[1])) - # Ignore matches at the beginning of the file - if breakerMatch.span()[0] != 0: - self.sampleLines.append(sampleData[extractpos:breakerMatch.span()[0]]) - extractpos = breakerMatch.span()[0] - searchpos = breakerMatch.span()[1] + with open(self.filePath, 'r') as fh: + if self.breaker == self.config.breaker: + logger.debug("Reading raw sample '%s' in app '%s'" % (self.name, self.app)) + self.sampleLines = fh.readlines() + # 1/5/14 CS Moving to using only sampleDict and doing the breaking up into events at load time + # instead of on every generation + else: + logger.debug("Non-default breaker '%s' detected for sample '%s' in app '%s'" % + (self.breaker, self.name, self.app)) + + sampleData = fh.read() + self.sampleLines = [] + + logger.debug("Filling array for sample '%s' in app '%s'; sampleData=%s, breaker=%s" % + (self.name, self.app, len(sampleData), self.breaker)) + + try: + breakerRE = re.compile(self.breaker, re.M) + except: + logger.error( + "Line breaker '%s' for sample '%s' in app '%s' could not be compiled; using default breaker" + % (self.breaker, self.name, self.app)) + self.breaker = self.config.breaker + + # Loop through data, finding matches of the regular expression and breaking them up into + # "lines". Each match includes the breaker itself. + extractpos = 0 + searchpos = 0 breakerMatch = breakerRE.search(sampleData, searchpos) - self.sampleLines.append(sampleData[extractpos:]) + while breakerMatch: + logger.debug("Breaker found at: %d, %d" % (breakerMatch.span()[0], breakerMatch.span()[1])) + # Ignore matches at the beginning of the file + if breakerMatch.span()[0] != 0: + self.sampleLines.append(sampleData[extractpos:breakerMatch.span()[0]]) + extractpos = breakerMatch.span()[0] + searchpos = breakerMatch.span()[1] + breakerMatch = breakerRE.search(sampleData, searchpos) + self.sampleLines.append(sampleData[extractpos:]) - self._closeSampleFile() self.sampleDict = [] for line in self.sampleLines: if line == '\n': @@ -370,36 +360,36 @@ def loadSample(self): % (len(self.sampleLines), len(self.sampleDict))) elif self.sampletype == 'csv': if self.sampleDict is None: - self._openSampleFile() - logger.debug("Reading csv sample '%s' in app '%s'" % (self.name, self.app)) - self.sampleDict = [] - self.sampleLines = [] - # Fix to load large csv files, work with python 2.5 onwards - csv.field_size_limit(sys.maxint) - csvReader = csv.DictReader(self._sampleFH) - for line in csvReader: - if '_raw' in line: - # Use conf-defined values for these params instead of sample-defined ones - current_line_keys = line.keys() - if "host" not in current_line_keys: - line["host"] = self.host - if "hostRegex" not in current_line_keys: - line["hostRegex"] = self.hostRegex - if "source" not in current_line_keys: - line["source"] = self.source - if "sourcetype" not in current_line_keys: - line["sourcetype"] = self.sourcetype - if "index" not in current_line_keys: - line["index"] = self.index - self.sampleDict.append(line) - self.sampleLines.append(line['_raw']) - else: - logger.error("Missing _raw in line '%s'" % pprint.pformat(line)) - self._closeSampleFile() + with open(self.filePath, 'r') as fh: + logger.debug("Reading csv sample '%s' in app '%s'" % (self.name, self.app)) + self.sampleDict = [] + self.sampleLines = [] + # Fix to load large csv files, work with python 2.5 onwards + csv.field_size_limit(sys.maxsize) + csvReader = csv.DictReader(fh) + for line in csvReader: + if '_raw' in line: + # Use conf-defined values for these params instead of sample-defined ones + current_line_keys = list(line.keys()) + if "host" not in current_line_keys: + line["host"] = self.host + if "hostRegex" not in current_line_keys: + line["hostRegex"] = self.hostRegex + if "source" not in current_line_keys: + line["source"] = self.source + if "sourcetype" not in current_line_keys: + line["sourcetype"] = self.sourcetype + if "index" not in current_line_keys: + line["index"] = self.index + self.sampleDict.append(line) + self.sampleLines.append(line['_raw']) + else: + logger.error("Missing _raw in line '%s'" % pprint.pformat(line)) + logger.debug("Finished creating sampleDict & sampleLines for sample '%s'. Len sampleDict: %d" % (self.name, len(self.sampleDict))) - for i in xrange(0, len(self.sampleDict)): + for i in range(0, len(self.sampleDict)): if len(self.sampleDict[i]['_raw']) < 1 or self.sampleDict[i]['_raw'][-1] != '\n': self.sampleDict[i]['_raw'] += '\n' if self.extendIndexes: @@ -420,10 +410,7 @@ def loadSample(self): self.extendIndexes = None def get_loaded_sample(self): - if self.sampletype != 'csv' and os.path.getsize(self.filePath) > 10000000: - self._openSampleFile() - return self._sampleFH - elif self.sampletype == 'csv': + if self.sampletype == 'csv': self.loadSample() return self.sampleDict else: diff --git a/splunk_eventgen/lib/eventgentimer.py b/splunk_eventgen/lib/eventgentimer.py index 5e3c8405..90af68b4 100644 --- a/splunk_eventgen/lib/eventgentimer.py +++ b/splunk_eventgen/lib/eventgentimer.py @@ -1,9 +1,9 @@ import time import copy -from Queue import Full +from queue import Full -from timeparser import timeParserTimeMath -from logging_config import logger +from splunk_eventgen.lib.timeparser import timeParserTimeMath +from splunk_eventgen.lib.logging_config import logger class Timer(object): diff --git a/splunk_eventgen/lib/eventgentoken.py b/splunk_eventgen/lib/eventgentoken.py index e393e232..e3351e05 100644 --- a/splunk_eventgen/lib/eventgentoken.py +++ b/splunk_eventgen/lib/eventgentoken.py @@ -1,7 +1,4 @@ # TODO: Handle timestamp generation for modinput and set sample.timestamp properly for timestamp replacement - -from __future__ import division, with_statement - import datetime import json import os @@ -9,11 +6,13 @@ import random import re import time -import urllib +import urllib.request +import urllib.parse +import urllib.error import uuid -from timeparser import timeDelta2secs -from logging_config import logger +from splunk_eventgen.lib.timeparser import timeDelta2secs +from splunk_eventgen.lib.logging_config import logger class Token(object): @@ -170,35 +169,35 @@ def _getReplacement(self, old=None, earliestTime=None, latestTime=None, s=None, if self._integerMatch is not None: integerMatch = self._integerMatch else: - integerRE = re.compile('integer\[([-]?\d+):([-]?\d+)\]', re.I) + integerRE = re.compile(r'integer\[([-]?\d+):([-]?\d+)\]', re.I) integerMatch = integerRE.match(self.replacement) self._integerMatch = integerMatch if self._floatMatch is not None: floatMatch = self._floatMatch else: - floatRE = re.compile('float\[(-?\d+|-?\d+\.(\d+)):(-?\d+|-?\d+\.(\d+))\]', re.I) + floatRE = re.compile(r'float\[(-?\d+|-?\d+\.(\d+)):(-?\d+|-?\d+\.(\d+))\]', re.I) floatMatch = floatRE.match(self.replacement) self._floatMatch = floatMatch if self._stringMatch is not None: stringMatch = self._stringMatch else: - stringRE = re.compile('string\((\d+)\)', re.I) + stringRE = re.compile(r'string\((\d+)\)', re.I) stringMatch = stringRE.match(self.replacement) self._stringMatch = stringMatch if self._hexMatch is not None: hexMatch = self._hexMatch else: - hexRE = re.compile('hex\((\d+)\)', re.I) + hexRE = re.compile(r'hex\((\d+)\)', re.I) hexMatch = hexRE.match(self.replacement) self._hexMatch = hexMatch if self._listMatch is not None: listMatch = self._listMatch else: - listRE = re.compile('list(\[[^\]]+\])', re.I) + listRE = re.compile(r'list(\[[^\]]+\])', re.I) listMatch = listRE.match(self.replacement) self._listMatch = listMatch @@ -327,7 +326,7 @@ def _getReplacement(self, old=None, earliestTime=None, latestTime=None, s=None, # Generate a random ASCII between dec 33->126 replacement += chr(random.randint(33, 126)) # Practice safe strings - replacement = re.sub('%[0-9a-fA-F]+', '', urllib.quote(replacement)) + replacement = re.sub('%[0-9a-fA-F]+', '', urllib.parse.quote(replacement)) return replacement else: diff --git a/splunk_eventgen/lib/generatorplugin.py b/splunk_eventgen/lib/generatorplugin.py index 6d326d30..1c190079 100644 --- a/splunk_eventgen/lib/generatorplugin.py +++ b/splunk_eventgen/lib/generatorplugin.py @@ -1,19 +1,19 @@ -from __future__ import division - import datetime import pprint import time import random -import urllib +import urllib.request +import urllib.parse +import urllib.error from xml.dom import minidom from xml.parsers.expat import ExpatError import httplib2 -from eventgenoutput import Output -from eventgentimestamp import EventgenTimestamp -from timeparser import timeParser -from logging_config import logger +from splunk_eventgen.lib.eventgenoutput import Output +from splunk_eventgen.lib.eventgentimestamp import EventgenTimestamp +from splunk_eventgen.lib.timeparser import timeParser +from splunk_eventgen.lib.logging_config import logger class GeneratorPlugin(object): @@ -125,7 +125,7 @@ def setupBackfill(self): results = httplib2.Http(disable_ssl_certificate_validation=True).request( s.backfillSearchUrl + '/services/search/jobs', 'POST', headers={ - 'Authorization': 'Splunk %s' % s.sessionKey}, body=urllib.urlencode({ + 'Authorization': 'Splunk %s' % s.sessionKey}, body=urllib.parse.urlencode({ 'search': s.backfillSearch, 'earliest_time': s.backfill, 'exec_mode': 'oneshot'}))[1] try: temptime = minidom.parseString(results).getElementsByTagName('text')[0].childNodes[0].nodeValue diff --git a/splunk_eventgen/lib/logutils_src/LICENSE.txt b/splunk_eventgen/lib/logutils_src/LICENSE.txt deleted file mode 100644 index b633581f..00000000 --- a/splunk_eventgen/lib/logutils_src/LICENSE.txt +++ /dev/null @@ -1,26 +0,0 @@ -Copyright (c) 2008-2017 by Vinay Sajip. -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - - * Redistributions of source code must retain the above copyright notice, - this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - * The name(s) of the copyright holder(s) may not be used to endorse or - promote products derived from this software without specific prior - written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) "AS IS" AND ANY EXPRESS OR -IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF -MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO -EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY DIRECT, INDIRECT, -INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF -LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE -OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF -ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - diff --git a/splunk_eventgen/lib/logutils_src/NEWS.txt b/splunk_eventgen/lib/logutils_src/NEWS.txt deleted file mode 100644 index ef7d7e38..00000000 --- a/splunk_eventgen/lib/logutils_src/NEWS.txt +++ /dev/null @@ -1,56 +0,0 @@ -:orphan: - -.. _whats-new: - -What's New in logutils -====================== - -Version 0.3.4 -------------- - -- Return non-zero error code from "setup.py test" when a test fails. -- Make the dictConfig tests work with both Python 2.x and 3.x. - -Version 0.3.3 -------------- - -- Added encoding support to ColorizingStreamHandler. - -Version 0.3.2 -------------- - -- Improvements in QueueListener implementation. -- Added redis module with RedisQueueHandler and - RedisQueueListener. -- Added unit test for a handler in a module - where absolute imports are used. - -Version 0.3.1 -------------- - -- Improvements in setup.py and documentation. - -Version 0.3 ------------ - -- Added caches for BraceMessage/DollarMessage. -- Added ColorizingStreamHandler. - -Version 0.2 ------------ - -- Updated docstrings for improved documentation. -- Added hasHanders() function. -- Changed LoggerAdapter.hasHandlers() to use logutils.hasHandlers(). -- Documentation improvements. -- NullHandler moved to logutils package (from queue package). -- Formatter added to logutils package. Adds support for {}- and $-formatting - in format strings, as well as %-formatting. -- BraceMessage and DollarMessage classes added to facilitate {}- and $- - formatting in logging calls (as opposed to Formatter formats). -- Added some more unit tests. - -Version 0.1 ------------ - -First public release. diff --git a/splunk_eventgen/lib/logutils_src/PKG-INFO b/splunk_eventgen/lib/logutils_src/PKG-INFO deleted file mode 100644 index 794a048a..00000000 --- a/splunk_eventgen/lib/logutils_src/PKG-INFO +++ /dev/null @@ -1,36 +0,0 @@ -Metadata-Version: 1.1 -Name: logutils -Version: 0.3.4.1 -Summary: Logging utilities -Home-page: http://code.google.com/p/logutils/ -Author: Vinay Sajip -Author-email: vinay_sajip@red-dove.com -License: Copyright (C) 2010-2017 by Vinay Sajip. All Rights Reserved. See LICENSE.txt for license. -Description: The logutils package provides a set of handlers for the Python standard - library's logging package. - - Some of these handlers are out-of-scope for the standard library, and - so they are packaged here. Others are updated versions which have - appeared in recent Python releases, but are usable with older versions - of Python and so are packaged here. - - The source code repository is at: - - https://bitbucket.org/vinay.sajip/logutils/ - - Documentation is available at: - - https://logutils.readthedocs.io/ - - https://pythonhosted.org/logutils/ - -Platform: UNKNOWN -Classifier: Development Status :: 5 - Production/Stable -Classifier: Environment :: Console -Classifier: Intended Audience :: Developers -Classifier: License :: OSI Approved :: BSD License -Classifier: Operating System :: OS Independent -Classifier: Programming Language :: Python -Classifier: Programming Language :: Python :: 2 -Classifier: Programming Language :: Python :: 3 -Classifier: Topic :: Software Development diff --git a/splunk_eventgen/lib/logutils_src/README.rst b/splunk_eventgen/lib/logutils_src/README.rst deleted file mode 100644 index 0070fea2..00000000 --- a/splunk_eventgen/lib/logutils_src/README.rst +++ /dev/null @@ -1,41 +0,0 @@ -.. image:: https://travis-ci.org/vsajip/logutils.svg - :target: https://travis-ci.org/vsajip/logutils - -.. image:: https://coveralls.io/repos/vsajip/logutils/badge.svg - :target: https://coveralls.io/github/vsajip/logutils - -logutils 0.3.4 -============== -The logutils package provides a set of handlers for the Python standard -library's logging package. - -Some of these handlers are out-of-scope for the standard library, and -so they are packaged here. Others are updated versions which have -appeared in recent Python releases, but are usable with older versions -of Python and so are packaged here. - -Requirements & Installation ---------------------------- -The logutils package requires Python 2.5 or greater, and can be -installed with the standard Python installation procedure: - - python setup.py install - -There is a set of unit tests which you can invoke with - - python setup.py test - -before running the installation. - -Availability & Documentation ----------------------------- - -The source code repository is at: - -https://bitbucket.org/vinay.sajip/logutils/ - -Documentation is available at: - -https://logutils.readthedocs.io/ - -https://pythonhosted.org/logutils/ diff --git a/splunk_eventgen/lib/logutils_src/__init__.py b/splunk_eventgen/lib/logutils_src/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/splunk_eventgen/lib/logutils_src/doc/Makefile b/splunk_eventgen/lib/logutils_src/doc/Makefile deleted file mode 100644 index dbc6dec2..00000000 --- a/splunk_eventgen/lib/logutils_src/doc/Makefile +++ /dev/null @@ -1,75 +0,0 @@ -# Makefile for Sphinx documentation -# - -# You can set these variables from the command line. -SPHINXOPTS = -SPHINXBUILD = sphinx-build -PAPER = - -# Internal variables. -PAPEROPT_a4 = -D latex_paper_size=a4 -PAPEROPT_letter = -D latex_paper_size=letter -ALLSPHINXOPTS = -d _build/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . - -.PHONY: help clean html web pickle htmlhelp latex changes linkcheck - -help: - @echo "Please use \`make ' where is one of" - @echo " html to make standalone HTML files" - @echo " pickle to make pickle files" - @echo " json to make JSON files" - @echo " htmlhelp to make HTML files and a HTML help project" - @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" - @echo " changes to make an overview over all changed/added/deprecated items" - @echo " linkcheck to check all external links for integrity" - -clean: - -rm -rf _build/* - -html: - mkdir -p _build/html _build/doctrees - $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) _build/html - @echo - @echo "Build finished. The HTML pages are in _build/html." - -pickle: - mkdir -p _build/pickle _build/doctrees - $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) _build/pickle - @echo - @echo "Build finished; now you can process the pickle files." - -web: pickle - -json: - mkdir -p _build/json _build/doctrees - $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) _build/json - @echo - @echo "Build finished; now you can process the JSON files." - -htmlhelp: - mkdir -p _build/htmlhelp _build/doctrees - $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) _build/htmlhelp - @echo - @echo "Build finished; now you can run HTML Help Workshop with the" \ - ".hhp project file in _build/htmlhelp." - -latex: - mkdir -p _build/latex _build/doctrees - $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) _build/latex - @echo - @echo "Build finished; the LaTeX files are in _build/latex." - @echo "Run \`make all-pdf' or \`make all-ps' in that directory to" \ - "run these through (pdf)latex." - -changes: - mkdir -p _build/changes _build/doctrees - $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) _build/changes - @echo - @echo "The overview file is in _build/changes." - -linkcheck: - mkdir -p _build/linkcheck _build/doctrees - $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) _build/linkcheck - @echo - @echo "Link check complete; look for any errors in the above output " \ - "or in _build/linkcheck/output.txt." diff --git a/splunk_eventgen/lib/logutils_src/doc/adapter.rst b/splunk_eventgen/lib/logutils_src/doc/adapter.rst deleted file mode 100644 index a277de41..00000000 --- a/splunk_eventgen/lib/logutils_src/doc/adapter.rst +++ /dev/null @@ -1,16 +0,0 @@ -Working with Logger adapters -============================ - -**N.B.** This is part of the standard library since Python 2.6 / 3.1, so the -version here is for use with earlier Python versions. - -The class was enhanced for Python 3.2, so you may wish to use this version -with earlier Python versions. - -However, note that the :class:`~logutils.adapter.LoggerAdapter` class will **not** -work with Python 2.4 or earlier, as it uses the `extra` keyword argument which -was added in later Python versions. - -.. automodule:: logutils.adapter - :members: - diff --git a/splunk_eventgen/lib/logutils_src/doc/colorize.rst b/splunk_eventgen/lib/logutils_src/doc/colorize.rst deleted file mode 100644 index f18a6568..00000000 --- a/splunk_eventgen/lib/logutils_src/doc/colorize.rst +++ /dev/null @@ -1,11 +0,0 @@ -Colorizing Console Streams -========================== - -``ColorizingStreamHandler`` is a handler which allows colorizing of console -streams, described here_ in more detail. - -.. _here: http://plumberjack.blogspot.com/2010/12/colorizing-logging-output-in-terminals.html - -.. automodule:: logutils.colorize - :members: - diff --git a/splunk_eventgen/lib/logutils_src/doc/conf.py b/splunk_eventgen/lib/logutils_src/doc/conf.py deleted file mode 100644 index 17a8499c..00000000 --- a/splunk_eventgen/lib/logutils_src/doc/conf.py +++ /dev/null @@ -1,194 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Logutils documentation build configuration file, created by -# sphinx-quickstart on Fri Oct 1 15:54:52 2010. -# -# This file is execfile()d with the current directory set to its containing dir. -# -# The contents of this file are pickled, so don't put values in the namespace -# that aren't pickleable (module imports are okay, they're removed automatically). -# -# Note that not all possible configuration values are present in this -# autogenerated file. -# -# All configuration values have a default; values that are commented out -# serve to show the default. - -import os -import sys - -# If your extensions (or modules documented by autodoc) are in another directory, -# add these directories to sys.path here. If the directory is relative to the -# documentation root, use os.path.abspath to make it absolute, like shown here. -sys.path.append(os.path.abspath('..')) - -# General configuration -# --------------------- - -# Add any Sphinx extension module names here, as strings. They can be extensions -# coming with Sphinx (named 'sphinx.ext.*') or your custom ones. -extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx'] - -# Add any paths that contain templates here, relative to this directory. -templates_path = ['_templates'] - -# The suffix of source filenames. -source_suffix = '.rst' - -# The encoding of source files. -# source_encoding = 'utf-8' - -# The master toctree document. -master_doc = 'index' - -# General information about the project. -project = u'Logutils' -copyright = u'2010-2017, Vinay Sajip' - -# The version info for the project you're documenting, acts as replacement for -# |version| and |release|, also used in various other places throughout the -# built documents. -# -# The short X.Y version. -version = '0.3' -# The full version, including alpha/beta/rc tags. -release = '0.3.4' - -# The language for content autogenerated by Sphinx. Refer to documentation -# for a list of supported languages. -# language = None - -# There are two options for replacing |today|: either, you set today to some -# non-false value, then it is used: -# today = '' -# Else, today_fmt is used as the format for a strftime call. -# today_fmt = '%B %d, %Y' - -# List of documents that shouldn't be included in the build. -# unused_docs = [] - -# List of directories, relative to source directory, that shouldn't be searched -# for source files. -exclude_trees = ['_build'] - -# The reST default role (used for this markup: `text`) to use for all documents. -# default_role = None - -# If true, '()' will be appended to :func: etc. cross-reference text. -# add_function_parentheses = True - -# If true, the current module name will be prepended to all description -# unit titles (such as .. function::). -# add_module_names = True - -# If true, sectionauthor and moduleauthor directives will be shown in the -# output. They are ignored by default. -# show_authors = False - -# The name of the Pygments (syntax highlighting) style to use. -pygments_style = 'sphinx' - -# Options for HTML output -# ----------------------- - -# The style sheet to use for HTML and HTML Help pages. A file of that name -# must exist either in Sphinx' static/ path, or in one of the custom paths -# given in html_static_path. -# html_style = 'default.css' - -# The name for this set of Sphinx documents. If None, it defaults to -# " v documentation". -# html_title = None - -# A shorter title for the navigation bar. Default is the same as html_title. -# html_short_title = None - -# The name of an image file (relative to this directory) to place at the top -# of the sidebar. -# html_logo = None - -# The name of an image file (within the static path) to use as favicon of the -# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 -# pixels large. -# html_favicon = None - -# Add any paths that contain custom static files (such as style sheets) here, -# relative to this directory. They are copied after the builtin static files, -# so a file named "default.css" will overwrite the builtin "default.css". -# html_static_path = ['_static'] - -# If not '', a 'Last updated on:' timestamp is inserted at every page bottom, -# using the given strftime format. -# html_last_updated_fmt = '%b %d, %Y' - -# If true, SmartyPants will be used to convert quotes and dashes to -# typographically correct entities. -# html_use_smartypants = True - -# Custom sidebar templates, maps document names to template names. -# html_sidebars = {} - -# Additional templates that should be rendered to pages, maps page names to -# template names. -# html_additional_pages = {} - -# If false, no module index is generated. -# html_use_modindex = True - -# If false, no index is generated. -# html_use_index = True - -# If true, the index is split into individual pages for each letter. -# html_split_index = False - -# If true, the reST sources are included in the HTML build as _sources/. -# html_copy_source = True - -# If true, an OpenSearch description file will be output, and all pages will -# contain a tag referring to it. The value of this option must be the -# base URL from which the finished HTML is served. -# html_use_opensearch = '' - -# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml"). -# html_file_suffix = '' - -html_theme = os.environ.get('DOCS_THEME', 'alabaster') -html_theme_path = ['themes'] - -# Output file base name for HTML help builder. -htmlhelp_basename = 'Logutilsdoc' - -# Options for LaTeX output -# ------------------------ - -# The paper size ('letter' or 'a4'). -# latex_paper_size = 'letter' - -# The font size ('10pt', '11pt' or '12pt'). -# latex_font_size = '10pt' - -# Grouping the document tree into LaTeX files. List of tuples -# (source start file, target name, title, author, document class [howto/manual]). -latex_documents = [ - ('index', 'Logutils.tex', ur'Logutils Documentation', ur'Vinay Sajip', 'manual'), ] - -# The name of an image file (relative to this directory) to place at the top of -# the title page. -# latex_logo = None - -# For "manual" documents, if this is true, then toplevel headings are parts, -# not chapters. -# latex_use_parts = False - -# Additional stuff for the LaTeX preamble. -# latex_preamble = '' - -# Documents to append as an appendix to all manuals. -# latex_appendices = [] - -# If false, no module index is generated. -# latex_use_modindex = True - -# Example configuration for intersphinx: refer to the Python standard library. -intersphinx_mapping = { - 'http://docs.python.org/dev': None, } diff --git a/splunk_eventgen/lib/logutils_src/doc/dictconfig.rst b/splunk_eventgen/lib/logutils_src/doc/dictconfig.rst deleted file mode 100644 index 575e3707..00000000 --- a/splunk_eventgen/lib/logutils_src/doc/dictconfig.rst +++ /dev/null @@ -1,15 +0,0 @@ -Dictionary-based Configuration -============================== - -This module implements dictionary-based configuration according to PEP 391. - -**N.B.** This is part of the standard library since Python 2.7 / 3.2, so the -version here is for use with earlier Python versions. - -.. automodule:: logutils.dictconfig - -.. autoclass:: logutils.dictconfig.DictConfigurator - :members: configure - -.. autofunction:: dictConfig - diff --git a/splunk_eventgen/lib/logutils_src/doc/http.rst b/splunk_eventgen/lib/logutils_src/doc/http.rst deleted file mode 100644 index 621292db..00000000 --- a/splunk_eventgen/lib/logutils_src/doc/http.rst +++ /dev/null @@ -1,11 +0,0 @@ -Working with web sites -====================== - -**N.B.** The :class:`~logutils.http.HTTPHandler` class has been present in the -:mod:`logging` package since the first release, but was enhanced for Python -3.2 to add options for secure connections and user credentials. You may wish -to use this version with earlier Python releases. - -.. automodule:: logutils.http - :members: - diff --git a/splunk_eventgen/lib/logutils_src/doc/index.rst b/splunk_eventgen/lib/logutils_src/doc/index.rst deleted file mode 100644 index 1fc321b3..00000000 --- a/splunk_eventgen/lib/logutils_src/doc/index.rst +++ /dev/null @@ -1,33 +0,0 @@ -.. Logutils documentation master file, created by sphinx-quickstart on Fri Oct 1 15:54:52 2010. - You can adapt this file completely to your liking, but it should at least - contain the root `toctree` directive. - -Logutils documentation -====================== - -.. automodule:: logutils - - For recent changes, see :ref:`whats-new`. - -There are a number of subcomponents to this package, relating to particular -tasks you may want to perform: - -.. toctree:: - :maxdepth: 2 - - libraries - queue - redis - testing - dictconfig - adapter - http - colorize - -Indices and tables -================== - -* :ref:`genindex` -* :ref:`modindex` -* :ref:`search` - diff --git a/splunk_eventgen/lib/logutils_src/doc/libraries.rst b/splunk_eventgen/lib/logutils_src/doc/libraries.rst deleted file mode 100644 index 611a4666..00000000 --- a/splunk_eventgen/lib/logutils_src/doc/libraries.rst +++ /dev/null @@ -1,25 +0,0 @@ -Configuring Libraries -===================== - -When developing libraries, you'll probably need to use the -:class:`~logutils.NullHandler` class. - -**N.B.** This is part of the standard library since Python 2.7 / 3.1, so the -version here is for use with earlier Python versions. - -Typical usage:: - - import logging - try: - from logging import NullHandler - except ImportError: - from logutils import NullHandler - - # use this in all your library's subpackages/submodules - logger = logging.getLogger(__name__) - - # use this just in your library's top-level package - logger.addHandler(NullHandler()) - -.. autoclass:: logutils.NullHandler - :members: diff --git a/splunk_eventgen/lib/logutils_src/doc/queue.rst b/splunk_eventgen/lib/logutils_src/doc/queue.rst deleted file mode 100644 index 984631f1..00000000 --- a/splunk_eventgen/lib/logutils_src/doc/queue.rst +++ /dev/null @@ -1,6 +0,0 @@ -Working with queues -=================== - -.. automodule:: logutils.queue - :members: - diff --git a/splunk_eventgen/lib/logutils_src/doc/redis.rst b/splunk_eventgen/lib/logutils_src/doc/redis.rst deleted file mode 100644 index 8293c306..00000000 --- a/splunk_eventgen/lib/logutils_src/doc/redis.rst +++ /dev/null @@ -1,11 +0,0 @@ -Working with Redis queues -========================= - -:class:`~logutils.queue.QueueHandler` and :class:`~logutils.queue.QueueListener` classes are provided to facilitate interfacing with Redis. - -.. autoclass:: logutils.redis.RedisQueueHandler - :members: - -.. autoclass:: logutils.redis.RedisQueueListener - :members: - diff --git a/splunk_eventgen/lib/logutils_src/doc/testing.rst b/splunk_eventgen/lib/logutils_src/doc/testing.rst deleted file mode 100644 index 1f959cac..00000000 --- a/splunk_eventgen/lib/logutils_src/doc/testing.rst +++ /dev/null @@ -1,65 +0,0 @@ -Unit testing -============ - -When developing unit tests, you may find the -:class:`~logutils.testing.TestHandler` and :class:`~logutils.testing.Matcher` -classes useful. - -Typical usage:: - - import logging - from logutils.testing import TestHandler, Matcher - import unittest - - class LoggingTest(unittest.TestCase): - def setUp(self): - self.handler = h = TestHandler(Matcher()) - self.logger = l = logging.getLogger() - l.addHandler(h) - - def tearDown(self): - self.logger.removeHandler(self.handler) - self.handler.close() - - def test_simple(self): - "Simple test of logging test harness." - # Just as a demo, let's log some messages. - # Only one should show up in the log. - self.logger.debug("This won't show up.") - self.logger.info("Neither will this.") - self.logger.warning("But this will.") - h = self.handler - self.assertTrue(h.matches(levelno=logging.WARNING)) - self.assertFalse(h.matches(levelno=logging.DEBUG)) - self.assertFalse(h.matches(levelno=logging.INFO)) - - def test_partial(self): - "Test of partial matching in logging test harness." - # Just as a demo, let's log some messages. - # Only one should show up in the log. - self.logger.debug("This won't show up.") - self.logger.info("Neither will this.") - self.logger.warning("But this will.") - h = self.handler - self.assertTrue(h.matches(msg="ut th")) # from "But this will" - self.assertTrue(h.matches(message="ut th")) # from "But this will" - self.assertFalse(h.matches(message="either")) - self.assertFalse(h.matches(message="won't")) - - def test_multiple(self): - "Test of matching multiple values in logging test harness." - # Just as a demo, let's log some messages. - # Only one should show up in the log. - self.logger.debug("This won't show up.") - self.logger.info("Neither will this.") - self.logger.warning("But this will.") - self.logger.error("And so will this.") - h = self.handler - self.assertTrue(h.matches(levelno=logging.WARNING, - message='ut thi')) - self.assertTrue(h.matches(levelno=logging.ERROR, - message='nd so wi')) - self.assertFalse(h.matches(levelno=logging.INFO)) - -.. automodule:: logutils.testing - :members: diff --git a/splunk_eventgen/lib/logutils_src/doc/whatsnew.rst b/splunk_eventgen/lib/logutils_src/doc/whatsnew.rst deleted file mode 100644 index 3be96d10..00000000 --- a/splunk_eventgen/lib/logutils_src/doc/whatsnew.rst +++ /dev/null @@ -1,2 +0,0 @@ -.. include:: ../NEWS.txt - diff --git a/splunk_eventgen/lib/logutils_src/logutils/__init__.py b/splunk_eventgen/lib/logutils_src/logutils/__init__.py deleted file mode 100644 index 2811a987..00000000 --- a/splunk_eventgen/lib/logutils_src/logutils/__init__.py +++ /dev/null @@ -1,196 +0,0 @@ -# -# Copyright (C) 2010-2017 Vinay Sajip. See LICENSE.txt for details. -# -""" -The logutils package provides a set of handlers for the Python standard -library's logging package. - -Some of these handlers are out-of-scope for the standard library, and -so they are packaged here. Others are updated versions which have -appeared in recent Python releases, but are usable with older versions -of Python, and so are packaged here. -""" -import logging -from string import Template - -__version__ = '0.3.4.1' - - -class NullHandler(logging.Handler): - """ - This handler does nothing. It's intended to be used to avoid the - "No handlers could be found for logger XXX" one-off warning. This is - important for library code, which may contain code to log events. If a user - of the library does not configure logging, the one-off warning might be - produced; to avoid this, the library developer simply needs to instantiate - a NullHandler and add it to the top-level logger of the library module or - package. - """ - - def handle(self, record): - """ - Handle a record. Does nothing in this class, but in other - handlers it typically filters and then emits the record in a - thread-safe way. - """ - pass - - def emit(self, record): - """ - Emit a record. This does nothing and shouldn't be called during normal - processing, unless you redefine :meth:`~logutils.NullHandler.handle`. - """ - pass - - def createLock(self): - """ - Since this handler does nothing, it has no underlying I/O to protect - against multi-threaded access, so this method returns `None`. - """ - self.lock = None - - -class PercentStyle(object): - - default_format = '%(message)s' - asctime_format = '%(asctime)s' - - def __init__(self, fmt): - self._fmt = fmt or self.default_format - - def usesTime(self): - return self._fmt.find(self.asctime_format) >= 0 - - def format(self, record): - return self._fmt % record.__dict__ - - -class StrFormatStyle(PercentStyle): - default_format = '{message}' - asctime_format = '{asctime}' - - def format(self, record): - return self._fmt.format(**record.__dict__) - - -class StringTemplateStyle(PercentStyle): - default_format = '${message}' - asctime_format = '${asctime}' - - def __init__(self, fmt): - self._fmt = fmt or self.default_format - self._tpl = Template(self._fmt) - - def usesTime(self): - fmt = self._fmt - return fmt.find('$asctime') >= 0 or fmt.find(self.asctime_format) >= 0 - - def format(self, record): - return self._tpl.substitute(**record.__dict__) - - -_STYLES = {'%': PercentStyle, '{': StrFormatStyle, '$': StringTemplateStyle} - - -class Formatter(logging.Formatter): - """ - Subclasses Formatter in Pythons earlier than 3.2 in order to give - 3.2 Formatter behaviour with respect to allowing %-, {} or $- - formatting. - """ - - def __init__(self, fmt=None, datefmt=None, style='%'): - """ - Initialize the formatter with specified format strings. - - Initialize the formatter either with the specified format string, or a - default as described above. Allow for specialized date formatting with - the optional datefmt argument (if omitted, you get the ISO8601 format). - - Use a style parameter of '%', '{' or '$' to specify that you want to - use one of %-formatting, :meth:`str.format` (``{}``) formatting or - :class:`string.Template` formatting in your format string. - """ - if style not in _STYLES: - raise ValueError('Style must be one of: %s' % ','.join(_STYLES.keys())) - self._style = _STYLES[style](fmt) - self._fmt = self._style._fmt - self.datefmt = datefmt - - def usesTime(self): - """ - Check if the format uses the creation time of the record. - """ - return self._style.usesTime() - - def formatMessage(self, record): - return self._style.format(record) - - def format(self, record): - """ - Format the specified record as text. - - The record's attribute dictionary is used as the operand to a - string formatting operation which yields the returned string. - Before formatting the dictionary, a couple of preparatory steps - are carried out. The message attribute of the record is computed - using LogRecord.getMessage(). If the formatting string uses the - time (as determined by a call to usesTime(), formatTime() is - called to format the event time. If there is exception information, - it is formatted using formatException() and appended to the message. - """ - record.message = record.getMessage() - if self.usesTime(): - record.asctime = self.formatTime(record, self.datefmt) - s = self.formatMessage(record) - if record.exc_info: - # Cache the traceback text to avoid converting it multiple times - # (it's constant anyway) - if not record.exc_text: - record.exc_text = self.formatException(record.exc_info) - if record.exc_text: - if s[-1:] != "\n": - s = s + "\n" - s = s + record.exc_text - return s - - -class BraceMessage(object): - def __init__(self, fmt, *args, **kwargs): - self.fmt = fmt - self.args = args - self.kwargs = kwargs - self.str = None - - def __str__(self): - if self.str is None: - self.str = self.fmt.format(*self.args, **self.kwargs) - return self.str - - -class DollarMessage(object): - def __init__(self, fmt, **kwargs): - self.fmt = fmt - self.kwargs = kwargs - self.str = None - - def __str__(self): - if self.str is None: - self.str = Template(self.fmt).substitute(**self.kwargs) - return self.str - - -def hasHandlers(logger): - """ - See if a logger has any handlers. - """ - rv = False - while logger: - if logger.handlers: - rv = True - break - elif not logger.propagate: - break - else: - logger = logger.parent - return rv diff --git a/splunk_eventgen/lib/logutils_src/logutils/adapter.py b/splunk_eventgen/lib/logutils_src/logutils/adapter.py deleted file mode 100644 index 220c188d..00000000 --- a/splunk_eventgen/lib/logutils_src/logutils/adapter.py +++ /dev/null @@ -1,117 +0,0 @@ -# -# Copyright (C) 2010-2017 Vinay Sajip. See LICENSE.txt for details. -# -import logging - -import logutils - - -class LoggerAdapter(object): - """ - An adapter for loggers which makes it easier to specify contextual - information in logging output. - """ - - def __init__(self, logger, extra): - """ - Initialize the adapter with a logger and a dict-like object which - provides contextual information. This constructor signature allows - easy stacking of LoggerAdapters, if so desired. - - You can effectively pass keyword arguments as shown in the - following example: - - adapter = LoggerAdapter(someLogger, dict(p1=v1, p2="v2")) - """ - self.logger = logger - self.extra = extra - - def process(self, msg, kwargs): - """ - Process the logging message and keyword arguments passed in to - a logging call to insert contextual information. You can either - manipulate the message itself, the keyword args or both. Return - the message and kwargs modified (or not) to suit your needs. - - Normally, you'll only need to override this one method in a - LoggerAdapter subclass for your specific needs. - """ - kwargs["extra"] = self.extra - return msg, kwargs - - # - # Boilerplate convenience methods - # - def debug(self, msg, *args, **kwargs): - """ - Delegate a debug call to the underlying logger. - """ - self.log(logging.DEBUG, msg, *args, **kwargs) - - def info(self, msg, *args, **kwargs): - """ - Delegate an info call to the underlying logger. - """ - self.log(logging.INFO, msg, *args, **kwargs) - - def warning(self, msg, *args, **kwargs): - """ - Delegate a warning call to the underlying logger. - """ - self.log(logging.WARNING, msg, *args, **kwargs) - - warn = warning - - def error(self, msg, *args, **kwargs): - """ - Delegate an error call to the underlying logger. - """ - self.log(logging.ERROR, msg, *args, **kwargs) - - def exception(self, msg, *args, **kwargs): - """ - Delegate an exception call to the underlying logger. - """ - kwargs["exc_info"] = 1 - self.log(logging.ERROR, msg, *args, **kwargs) - - def critical(self, msg, *args, **kwargs): - """ - Delegate a critical call to the underlying logger. - """ - self.log(logging.CRITICAL, msg, *args, **kwargs) - - def log(self, level, msg, *args, **kwargs): - """ - Delegate a log call to the underlying logger, after adding - contextual information from this adapter instance. - """ - if self.isEnabledFor(level): - msg, kwargs = self.process(msg, kwargs) - self.logger._log(level, msg, args, **kwargs) - - def isEnabledFor(self, level): - """ - Is this logger enabled for level 'level'? - """ - if self.logger.manager.disable >= level: - return False - return level >= self.getEffectiveLevel() - - def setLevel(self, level): - """ - Set the specified level on the underlying logger. - """ - self.logger.setLevel(level) - - def getEffectiveLevel(self): - """ - Get the effective level for the underlying logger. - """ - return self.logger.getEffectiveLevel() - - def hasHandlers(self): - """ - See if the underlying logger has any handlers. - """ - return logutils.hasHandlers(self.logger) diff --git a/splunk_eventgen/lib/logutils_src/logutils/colorize.py b/splunk_eventgen/lib/logutils_src/logutils/colorize.py deleted file mode 100644 index 8f375e5d..00000000 --- a/splunk_eventgen/lib/logutils_src/logutils/colorize.py +++ /dev/null @@ -1,191 +0,0 @@ -# -# Copyright (C) 2010-2017 Vinay Sajip. All rights reserved. -# -import ctypes -import logging -import os - -try: - unicode -except NameError: - unicode = None - - -class ColorizingStreamHandler(logging.StreamHandler): - """ - A stream handler which supports colorizing of console streams - under Windows, Linux and Mac OS X. - - :param strm: The stream to colorize - typically ``sys.stdout`` - or ``sys.stderr``. - """ - - # color names to indices - color_map = { - 'black': 0, - 'red': 1, - 'green': 2, - 'yellow': 3, - 'blue': 4, - 'magenta': 5, - 'cyan': 6, - 'white': 7, } - - # levels to (background, foreground, bold/intense) - if os.name == 'nt': - level_map = { - logging.DEBUG: (None, 'blue', True), - logging.INFO: (None, 'white', False), - logging.WARNING: (None, 'yellow', True), - logging.ERROR: (None, 'red', True), - logging.CRITICAL: ('red', 'white', True), } - else: - "Maps levels to colour/intensity settings." - level_map = { - logging.DEBUG: (None, 'blue', False), - logging.INFO: (None, 'black', False), - logging.WARNING: (None, 'yellow', False), - logging.ERROR: (None, 'red', False), - logging.CRITICAL: ('red', 'white', True), } - - csi = '\x1b[' - reset = '\x1b[0m' - - @property - def is_tty(self): - "Returns true if the handler's stream is a terminal." - isatty = getattr(self.stream, 'isatty', None) - return isatty and isatty() - - def emit(self, record): - try: - message = self.format(record) - stream = self.stream - if unicode and isinstance(message, unicode): - enc = getattr(stream, 'encoding', 'utf-8') - message = message.encode(enc, 'replace') - if not self.is_tty: - stream.write(message) - else: - self.output_colorized(message) - stream.write(getattr(self, 'terminator', '\n')) - self.flush() - except (KeyboardInterrupt, SystemExit): - raise - except: - self.handleError(record) - - if os.name != 'nt': - - def output_colorized(self, message): - """ - Output a colorized message. - - On Linux and Mac OS X, this method just writes the - already-colorized message to the stream, since on these - platforms console streams accept ANSI escape sequences - for colorization. On Windows, this handler implements a - subset of ANSI escape sequence handling by parsing the - message, extracting the sequences and making Win32 API - calls to colorize the output. - - :param message: The message to colorize and output. - """ - self.stream.write(message) - else: - import re - ansi_esc = re.compile(r'\x1b\[((?:\d+)(?:;(?:\d+))*)m') - - nt_color_map = { - 0: 0x00, # black - 1: 0x04, # red - 2: 0x02, # green - 3: 0x06, # yellow - 4: 0x01, # blue - 5: 0x05, # magenta - 6: 0x03, # cyan - 7: 0x07, # white - } - - def output_colorized(self, message): - """ - Output a colorized message. - - On Linux and Mac OS X, this method just writes the - already-colorized message to the stream, since on these - platforms console streams accept ANSI escape sequences - for colorization. On Windows, this handler implements a - subset of ANSI escape sequence handling by parsing the - message, extracting the sequences and making Win32 API - calls to colorize the output. - - :param message: The message to colorize and output. - """ - parts = self.ansi_esc.split(message) - write = self.stream.write - h = None - fd = getattr(self.stream, 'fileno', None) - if fd is not None: - fd = fd() - if fd in (1, 2): # stdout or stderr - h = ctypes.windll.kernel32.GetStdHandle(-10 - fd) - while parts: - text = parts.pop(0) - if text: - write(text) - if parts: - params = parts.pop(0) - if h is not None: - params = [int(p) for p in params.split(';')] - color = 0 - for p in params: - if 40 <= p <= 47: - color |= self.nt_color_map[p - 40] << 4 - elif 30 <= p <= 37: - color |= self.nt_color_map[p - 30] - elif p == 1: - color |= 0x08 # foreground intensity on - elif p == 0: # reset to default color - color = 0x07 - else: - pass # error condition ignored - ctypes.windll.kernel32.SetConsoleTextAttribute(h, color) - - def colorize(self, message, record): - """ - Colorize a message for a logging event. - - This implementation uses the ``level_map`` class attribute to - map the LogRecord's level to a colour/intensity setting, which is - then applied to the whole message. - - :param message: The message to colorize. - :param record: The ``LogRecord`` for the message. - """ - if record.levelno in self.level_map: - bg, fg, bold = self.level_map[record.levelno] - params = [] - if bg in self.color_map: - params.append(str(self.color_map[bg] + 40)) - if fg in self.color_map: - params.append(str(self.color_map[fg] + 30)) - if bold: - params.append('1') - if params: - message = ''.join((self.csi, ';'.join(params), 'm', message, self.reset)) - return message - - def format(self, record): - """ - Formats a record for output. - - This implementation colorizes the message line, but leaves - any traceback unolorized. - """ - message = logging.StreamHandler.format(self, record) - if self.is_tty: - # Don't colorize any traceback - parts = message.split('\n', 1) - parts[0] = self.colorize(parts[0], record) - message = '\n'.join(parts) - return message diff --git a/splunk_eventgen/lib/logutils_src/logutils/dictconfig.py b/splunk_eventgen/lib/logutils_src/logutils/dictconfig.py deleted file mode 100644 index 26b8886e..00000000 --- a/splunk_eventgen/lib/logutils_src/logutils/dictconfig.py +++ /dev/null @@ -1,551 +0,0 @@ -# -# Copyright (C) 2009-2017 Vinay Sajip. See LICENSE.txt for details. -# -import logging.handlers -import re -import sys - -try: - basestring -except NameError: - basestring = str -try: - StandardError -except NameError: - StandardError = Exception - -IDENTIFIER = re.compile('^[a-z_][a-z0-9_]*$', re.I) - - -def valid_ident(s): - m = IDENTIFIER.match(s) - if not m: - raise ValueError('Not a valid Python identifier: %r' % s) - return True - - -# -# This function is defined in logging only in recent versions of Python -# -try: - from logging import _checkLevel -except ImportError: - - def _checkLevel(level): - if isinstance(level, int): - rv = level - elif str(level) == level: - try: - levelnames = logging._levelNames - except AttributeError: - levelnames = logging._nameToLevel - if level not in levelnames: - raise ValueError('Unknown level: %r' % level) - rv = levelnames[level] - else: - raise TypeError('Level not an integer or a ' 'valid string: %r' % level) - return rv - - -# The ConvertingXXX classes are wrappers around standard Python containers, -# and they serve to convert any suitable values in the container. The -# conversion converts base dicts, lists and tuples to their wrapped -# equivalents, whereas strings which match a conversion format are converted -# appropriately. -# -# Each wrapper should have a configurator attribute holding the actual -# configurator to use for conversion. - - -class ConvertingDict(dict): - """A converting dictionary wrapper.""" - - def __getitem__(self, key): - value = dict.__getitem__(self, key) - result = self.configurator.convert(value) - # If the converted value is different, save for next time - if value is not result: - self[key] = result - if type(result) in (ConvertingDict, ConvertingList, ConvertingTuple): - result.parent = self - result.key = key - return result - - def get(self, key, default=None): - value = dict.get(self, key, default) - result = self.configurator.convert(value) - # If the converted value is different, save for next time - if value is not result: - self[key] = result - if type(result) in (ConvertingDict, ConvertingList, ConvertingTuple): - result.parent = self - result.key = key - return result - - def pop(self, key, default=None): - value = dict.pop(self, key, default) - result = self.configurator.convert(value) - if value is not result: - if type(result) in (ConvertingDict, ConvertingList, ConvertingTuple): - result.parent = self - result.key = key - return result - - -class ConvertingList(list): - """A converting list wrapper.""" - - def __getitem__(self, key): - value = list.__getitem__(self, key) - result = self.configurator.convert(value) - # If the converted value is different, save for next time - if value is not result: - self[key] = result - if type(result) in (ConvertingDict, ConvertingList, ConvertingTuple): - result.parent = self - result.key = key - return result - - def pop(self, idx=-1): - value = list.pop(self, idx) - result = self.configurator.convert(value) - if value is not result: - if type(result) in (ConvertingDict, ConvertingList, ConvertingTuple): - result.parent = self - return result - - -class ConvertingTuple(tuple): - """A converting tuple wrapper.""" - - def __getitem__(self, key): - value = tuple.__getitem__(self, key) - result = self.configurator.convert(value) - if value is not result: - if type(result) in (ConvertingDict, ConvertingList, ConvertingTuple): - result.parent = self - result.key = key - return result - - -class BaseConfigurator(object): - """ - The configurator base class which defines some useful defaults. - """ - - CONVERT_PATTERN = re.compile(r'^(?P[a-z]+)://(?P.*)$') - - WORD_PATTERN = re.compile(r'^\s*(\w+)\s*') - DOT_PATTERN = re.compile(r'^\.\s*(\w+)\s*') - INDEX_PATTERN = re.compile(r'^\[\s*(\w+)\s*\]\s*') - DIGIT_PATTERN = re.compile(r'^\d+$') - - value_converters = { - 'ext': 'ext_convert', - 'cfg': 'cfg_convert', } - - # We might want to use a different one, e.g. importlib - importer = __import__ - "Allows the importer to be redefined." - - def __init__(self, config): - """ - Initialise an instance with the specified configuration - dictionary. - """ - self.config = ConvertingDict(config) - self.config.configurator = self - - def resolve(self, s): - """ - Resolve strings to objects using standard import and attribute - syntax. - """ - name = s.split('.') - used = name.pop(0) - try: - found = self.importer(used) - for frag in name: - used += '.' + frag - try: - found = getattr(found, frag) - except AttributeError: - self.importer(used) - found = getattr(found, frag) - return found - except ImportError: - e, tb = sys.exc_info()[1:] - v = ValueError('Cannot resolve %r: %s' % (s, e)) - v.__cause__, v.__traceback__ = e, tb - raise v - - def ext_convert(self, value): - """Default converter for the ext:// protocol.""" - return self.resolve(value) - - def cfg_convert(self, value): - """Default converter for the cfg:// protocol.""" - rest = value - m = self.WORD_PATTERN.match(rest) - if m is None: - raise ValueError("Unable to convert %r" % value) - else: - rest = rest[m.end():] - d = self.config[m.groups()[0]] - while rest: - m = self.DOT_PATTERN.match(rest) - if m: - d = d[m.groups()[0]] - else: - m = self.INDEX_PATTERN.match(rest) - if m: - idx = m.groups()[0] - if not self.DIGIT_PATTERN.match(idx): - d = d[idx] - else: - try: - n = int(idx) # try as number first (most likely) - d = d[n] - except TypeError: - d = d[idx] - if m: - rest = rest[m.end():] - else: - raise ValueError('Unable to convert ' '%r at %r' % (value, rest)) - # rest should be empty - return d - - def convert(self, value): - """ - Convert values to an appropriate type. dicts, lists and tuples are - replaced by their converting alternatives. Strings are checked to - see if they have a conversion format and are converted if they do. - """ - if not isinstance(value, ConvertingDict) and isinstance(value, dict): - value = ConvertingDict(value) - value.configurator = self - elif not isinstance(value, ConvertingList) and isinstance(value, list): - value = ConvertingList(value) - value.configurator = self - elif not isinstance(value, ConvertingTuple) and isinstance(value, tuple): - value = ConvertingTuple(value) - value.configurator = self - elif isinstance(value, basestring): - m = self.CONVERT_PATTERN.match(value) - if m: - d = m.groupdict() - prefix = d['prefix'] - converter = self.value_converters.get(prefix, None) - if converter: - suffix = d['suffix'] - converter = getattr(self, converter) - value = converter(suffix) - return value - - def configure_custom(self, config): - """Configure an object with a user-supplied factory.""" - c = config.pop('()') - if isinstance(c, basestring): - c = self.resolve(c) - props = config.pop('.', None) - # Check for valid identifiers - kwargs = dict([(k, config[k]) for k in config if valid_ident(k)]) - result = c(**kwargs) - if props: - for name, value in props.items(): - setattr(result, name, value) - return result - - def as_tuple(self, value): - """Utility function which converts lists to tuples.""" - if isinstance(value, list): - value = tuple(value) - return value - - -def named_handlers_supported(): - major, minor = sys.version_info[:2] - if major == 2: - result = minor >= 7 - elif major == 3: - result = minor >= 2 - else: - result = (major > 3) - return result - - -class DictConfigurator(BaseConfigurator): - """ - Configure logging using a dictionary-like object to describe the - configuration. - """ - - def configure(self): - """Do the configuration.""" - - config = self.config - if 'version' not in config: - raise ValueError("dictionary doesn't specify a version") - if config['version'] != 1: - raise ValueError("Unsupported version: %s" % config['version']) - incremental = config.pop('incremental', False) - EMPTY_DICT = {} - logging._acquireLock() - try: - if incremental: - handlers = config.get('handlers', EMPTY_DICT) - # incremental handler config only if handler name - # ties in to logging._handlers (Python 2.7, 3.2+) - if named_handlers_supported(): - for name in handlers: - if name not in logging._handlers: - raise ValueError('No handler found with ' 'name %r' % name) - else: - try: - handler = logging._handlers[name] - handler_config = handlers[name] - level = handler_config.get('level', None) - if level: - handler.setLevel(_checkLevel(level)) - except StandardError: - e = sys.exc_info()[1] - raise ValueError('Unable to configure handler ' '%r: %s' % (name, e)) - loggers = config.get('loggers', EMPTY_DICT) - for name in loggers: - try: - self.configure_logger(name, loggers[name], True) - except StandardError: - e = sys.exc_info()[1] - raise ValueError('Unable to configure logger ' '%r: %s' % (name, e)) - root = config.get('root', None) - if root: - try: - self.configure_root(root, True) - except StandardError: - e = sys.exc_info()[1] - raise ValueError('Unable to configure root ' 'logger: %s' % e) - else: - disable_existing = config.pop('disable_existing_loggers', True) - - logging._handlers.clear() - del logging._handlerList[:] - - # Do formatters first - they don't refer to anything else - formatters = config.get('formatters', EMPTY_DICT) - for name in formatters: - try: - formatters[name] = self.configure_formatter(formatters[name]) - except StandardError: - e = sys.exc_info()[1] - raise ValueError('Unable to configure ' 'formatter %r: %s' % (name, e)) - # Next, do filters - they don't refer to anything else, either - filters = config.get('filters', EMPTY_DICT) - for name in filters: - try: - filters[name] = self.configure_filter(filters[name]) - except StandardError: - e = sys.exc_info()[1] - raise ValueError('Unable to configure ' 'filter %r: %s' % (name, e)) - - # Next, do handlers - they refer to formatters and filters - # As handlers can refer to other handlers, sort the keys - # to allow a deterministic order of configuration - handlers = config.get('handlers', EMPTY_DICT) - for name in sorted(handlers): - try: - handler = self.configure_handler(handlers[name]) - handler.name = name - handlers[name] = handler - except StandardError: - e = sys.exc_info()[1] - raise ValueError('Unable to configure handler ' '%r: %s' % (name, e)) - # Next, do loggers - they refer to handlers and filters - - # We don't want to lose the existing loggers, since other threads may have pointers to them. - # Existing is set to contain all existing loggers, and as we go through the new configuration we - # remove any which are configured. At the end, what's left in existing is the set of loggers - # which were in the previous configuration but which are not in the new configuration. - root = logging.root - existing = sorted(root.manager.loggerDict.keys()) - # The list needs to be sorted so that we can avoid disabling child loggers of explicitly named loggers. - # With a sorted list it is easier to find the child loggers. We'll keep the list of existing loggers - # which are children of named loggers here... - child_loggers = [] - # now set up the new ones... - loggers = config.get('loggers', EMPTY_DICT) - for name in loggers: - if name in existing: - i = existing.index(name) - prefixed = name + "." - pflen = len(prefixed) - num_existing = len(existing) - i = i + 1 # look at the entry after name - while (i < num_existing) and\ - (existing[i][:pflen] == prefixed): - child_loggers.append(existing[i]) - i = i + 1 - existing.remove(name) - try: - self.configure_logger(name, loggers[name]) - except StandardError: - e = sys.exc_info()[1] - raise ValueError('Unable to configure logger ' '%r: %s' % (name, e)) - - # Disable any old loggers. There's no point deleting them as other threads may continue to hold - # references and by disabling them, you stop them doing any logging. However, don't disable children of - # named loggers, as that's probably not what was intended by the user. - for log in existing: - logger = root.manager.loggerDict[log] - if log in child_loggers: - logger.level = logging.NOTSET - logger.handlers = [] - logger.propagate = True - elif disable_existing: - logger.disabled = True - - # And finally, do the root logger - root = config.get('root', None) - if root: - try: - self.configure_root(root) - except StandardError: - e = sys.exc_info()[1] - raise ValueError('Unable to configure root ' 'logger: %s' % e) - finally: - logging._releaseLock() - - def configure_formatter(self, config): - """Configure a formatter from a dictionary.""" - if '()' in config: - factory = config['()'] # for use in exception handler - try: - result = self.configure_custom(config) - except TypeError: - te = sys.exc_info()[1] - if "'format'" not in str(te): - raise - # Name of parameter changed from fmt to format. Retry with old name. This is so that code can be used - # with older Python versions (e.g. by Django) - config['fmt'] = config.pop('format') - config['()'] = factory - result = self.configure_custom(config) - else: - fmt = config.get('format', None) - dfmt = config.get('datefmt', None) - result = logging.Formatter(fmt, dfmt) - return result - - def configure_filter(self, config): - """Configure a filter from a dictionary.""" - if '()' in config: - result = self.configure_custom(config) - else: - name = config.get('name', '') - result = logging.Filter(name) - return result - - def add_filters(self, filterer, filters): - """Add filters to a filterer from a list of names.""" - for f in filters: - try: - filterer.addFilter(self.config['filters'][f]) - except StandardError: - e = sys.exc_info()[1] - raise ValueError('Unable to add filter %r: %s' % (f, e)) - - def configure_handler(self, config): - """Configure a handler from a dictionary.""" - formatter = config.pop('formatter', None) - if formatter: - try: - formatter = self.config['formatters'][formatter] - except StandardError: - e = sys.exc_info()[1] - raise ValueError('Unable to set formatter ' '%r: %s' % (formatter, e)) - level = config.pop('level', None) - filters = config.pop('filters', None) - if '()' in config: - c = config.pop('()') - if isinstance(c, basestring): - c = self.resolve(c) - factory = c - else: - klass = self.resolve(config.pop('class')) - # Special case for handler which refers to another handler - if issubclass(klass, logging.handlers.MemoryHandler) and 'target' in config: - try: - config['target'] = self.config['handlers'][config['target']] - except StandardError: - e = sys.exc_info()[1] - raise ValueError('Unable to set target handler ' '%r: %s' % (config['target'], e)) - elif issubclass(klass, logging.handlers.SMTPHandler) and 'mailhost' in config: - config['mailhost'] = self.as_tuple(config['mailhost']) - elif issubclass(klass, logging.handlers.SysLogHandler) and 'address' in config: - config['address'] = self.as_tuple(config['address']) - factory = klass - kwargs = dict([(k, config[k]) for k in config if valid_ident(k)]) - try: - result = factory(**kwargs) - except TypeError: - te = sys.exc_info()[1] - if "'stream'" not in str(te): - raise - # The argument name changed from strm to stream, so we retry with the old name. This is so that code can be - # used with older Python versions (e.g. by Django) - kwargs['strm'] = kwargs.pop('stream') - result = factory(**kwargs) - if formatter: - result.setFormatter(formatter) - if level is not None: - result.setLevel(_checkLevel(level)) - if filters: - self.add_filters(result, filters) - return result - - def add_handlers(self, logger, handlers): - """Add handlers to a logger from a list of names.""" - for h in handlers: - try: - logger.addHandler(self.config['handlers'][h]) - except StandardError: - e = sys.exc_info()[1] - raise ValueError('Unable to add handler %r: %s' % (h, e)) - - def common_logger_config(self, logger, config, incremental=False): - """ - Perform configuration which is common to root and non-root loggers. - """ - level = config.get('level', None) - if level is not None: - logger.setLevel(_checkLevel(level)) - if not incremental: - # Remove any existing handlers - for h in logger.handlers[:]: - logger.removeHandler(h) - handlers = config.get('handlers', None) - if handlers: - self.add_handlers(logger, handlers) - filters = config.get('filters', None) - if filters: - self.add_filters(logger, filters) - - def configure_logger(self, name, config, incremental=False): - """Configure a non-root logger from a dictionary.""" - logger = logging.getLogger(name) - self.common_logger_config(logger, config, incremental) - propagate = config.get('propagate', None) - if propagate is not None: - logger.propagate = propagate - - def configure_root(self, config, incremental=False): - """Configure a root logger from a dictionary.""" - root = logging.getLogger() - self.common_logger_config(root, config, incremental) - - -dictConfigClass = DictConfigurator - - -def dictConfig(config): - """Configure logging using a dictionary.""" - dictConfigClass(config).configure() diff --git a/splunk_eventgen/lib/logutils_src/logutils/http.py b/splunk_eventgen/lib/logutils_src/logutils/http.py deleted file mode 100644 index 2d59dc88..00000000 --- a/splunk_eventgen/lib/logutils_src/logutils/http.py +++ /dev/null @@ -1,91 +0,0 @@ -# -# Copyright (C) 2010-2017 Vinay Sajip. See LICENSE.txt for details. -# -import logging - - -class HTTPHandler(logging.Handler): - """ - A class which sends records to a Web server, using either GET or - POST semantics. - - :param host: The Web server to connect to. - :param url: The URL to use for the connection. - :param method: The HTTP method to use. GET and POST are supported. - :param secure: set to True if HTTPS is to be used. - :param credentials: Set to a username/password tuple if desired. If - set, a Basic authentication header is sent. WARNING: - if using credentials, make sure `secure` is `True` - to avoid sending usernames and passwords in - cleartext over the wire. - """ - - def __init__(self, host, url, method="GET", secure=False, credentials=None): - """ - Initialize an instance. - """ - logging.Handler.__init__(self) - method = method.upper() - if method not in ["GET", "POST"]: - raise ValueError("method must be GET or POST") - self.host = host - self.url = url - self.method = method - self.secure = secure - self.credentials = credentials - - def mapLogRecord(self, record): - """ - Default implementation of mapping the log record into a dict - that is sent as the CGI data. Overwrite in your class. - Contributed by Franz Glasner. - - :param record: The record to be mapped. - """ - return record.__dict__ - - def emit(self, record): - """ - Emit a record. - - Send the record to the Web server as a percent-encoded dictionary - - :param record: The record to be emitted. - """ - try: - import http.client - import urllib.parse - host = self.host - if self.secure: - h = http.client.HTTPSConnection(host) - else: - h = http.client.HTTPConnection(host) - url = self.url - data = urllib.parse.urlencode(self.mapLogRecord(record)) - if self.method == "GET": - if (url.find('?') >= 0): - sep = '&' - else: - sep = '?' - url = url + "%c%s" % (sep, data) - h.putrequest(self.method, url) - # support multiple hosts on one IP address... - # need to strip optional :port from host, if present - i = host.find(":") - if i >= 0: - host = host[:i] - h.putheader("Host", host) - if self.method == "POST": - h.putheader("Content-type", "application/x-www-form-urlencoded") - h.putheader("Content-length", str(len(data))) - if self.credentials: - import base64 - s = ('u%s:%s' % self.credentials).encode('utf-8') - s = 'Basic ' + base64.b64encode(s).strip() - h.putheader('Authorization', s) - h.endheaders(data if self.method == "POST" else None) - h.getresponse() # can't do anything with the result - except (KeyboardInterrupt, SystemExit): - raise - except: - self.handleError(record) diff --git a/splunk_eventgen/lib/logutils_src/logutils/queue.py b/splunk_eventgen/lib/logutils_src/logutils/queue.py deleted file mode 100644 index 0a7d22a2..00000000 --- a/splunk_eventgen/lib/logutils_src/logutils/queue.py +++ /dev/null @@ -1,227 +0,0 @@ -# -# Copyright (C) 2010-2017 Vinay Sajip. See LICENSE.txt for details. -# -""" -This module contains classes which help you work with queues. A typical -application is when you want to log from performance-critical threads, but -where the handlers you want to use are slow (for example, -:class:`~logging.handlers.SMTPHandler`). In that case, you can create a queue, -pass it to a :class:`QueueHandler` instance and use that instance with your -loggers. Elsewhere, you can instantiate a :class:`QueueListener` with the same -queue and some slow handlers, and call :meth:`~QueueListener.start` on it. -This will start monitoring the queue on a separate thread and call all the -configured handlers *on that thread*, so that your logging thread is not held -up by the slow handlers. - -Note that as well as in-process queues, you can use these classes with queues -from the :mod:`multiprocessing` module. - -**N.B.** This is part of the standard library since Python 3.2, so the -version here is for use with earlier Python versions. -""" -import logging -import threading - -try: - import Queue as queue -except ImportError: - import queue - - -class QueueHandler(logging.Handler): - """ - This handler sends events to a queue. Typically, it would be used together - with a multiprocessing Queue to centralise logging to file in one process - (in a multi-process application), so as to avoid file write contention - between processes. - - :param queue: The queue to send `LogRecords` to. - """ - - def __init__(self, queue): - """ - Initialise an instance, using the passed queue. - """ - logging.Handler.__init__(self) - self.queue = queue - - def enqueue(self, record): - """ - Enqueue a record. - - The base implementation uses :meth:`~queue.Queue.put_nowait`. You may - want to override this method if you want to use blocking, timeouts or - custom queue implementations. - - :param record: The record to enqueue. - """ - self.queue.put_nowait(record) - - def prepare(self, record): - """ - Prepares a record for queuing. The object returned by this method is - enqueued. - - The base implementation formats the record to merge the message - and arguments, and removes unpickleable items from the record - in-place. - - You might want to override this method if you want to convert - the record to a dict or JSON string, or send a modified copy - of the record while leaving the original intact. - - :param record: The record to prepare. - """ - # The format operation gets traceback text into record.exc_text - # (if there's exception data), and also puts the message into - # record.message. We can then use this to replace the original - # msg + args, as these might be unpickleable. We also zap the - # exc_info attribute, as it's no longer needed and, if not None, - # will typically not be pickleable. - self.format(record) - record.msg = record.message - record.args = None - record.exc_info = None - return record - - def emit(self, record): - """ - Emit a record. - - Writes the LogRecord to the queue, preparing it for pickling first. - - :param record: The record to emit. - """ - try: - self.enqueue(self.prepare(record)) - except (KeyboardInterrupt, SystemExit): - raise - except: - self.handleError(record) - - -class QueueListener(object): - """ - This class implements an internal threaded listener which watches for - LogRecords being added to a queue, removes them and passes them to a - list of handlers for processing. - - :param record: The queue to listen to. - :param handlers: The handlers to invoke on everything received from - the queue. - """ - _sentinel = None - - def __init__(self, queue, *handlers): - """ - Initialise an instance with the specified queue and - handlers. - """ - self.queue = queue - self.handlers = handlers - self._stop = threading.Event() - self._thread = None - - def dequeue(self, block): - """ - Dequeue a record and return it, optionally blocking. - - The base implementation uses :meth:`~queue.Queue.get`. You may want to - override this method if you want to use timeouts or work with custom - queue implementations. - - :param block: Whether to block if the queue is empty. If `False` and - the queue is empty, an :class:`~queue.Empty` exception - will be thrown. - """ - return self.queue.get(block) - - def start(self): - """ - Start the listener. - - This starts up a background thread to monitor the queue for - LogRecords to process. - """ - self._thread = t = threading.Thread(target=self._monitor) - t.setDaemon(True) - t.start() - - def prepare(self, record): - """ - Prepare a record for handling. - - This method just returns the passed-in record. You may want to - override this method if you need to do any custom marshalling or - manipulation of the record before passing it to the handlers. - - :param record: The record to prepare. - """ - return record - - def handle(self, record): - """ - Handle a record. - - This just loops through the handlers offering them the record - to handle. - - :param record: The record to handle. - """ - record = self.prepare(record) - for handler in self.handlers: - handler.handle(record) - - def _monitor(self): - """ - Monitor the queue for records, and ask the handler - to deal with them. - - This method runs on a separate, internal thread. - The thread will terminate if it sees a sentinel object in the queue. - """ - q = self.queue - has_task_done = hasattr(q, 'task_done') - while not self._stop.isSet(): - try: - record = self.dequeue(True) - if record is self._sentinel: - break - self.handle(record) - if has_task_done: - q.task_done() - except queue.Empty: - pass - # There might still be records in the queue. - while True: - try: - record = self.dequeue(False) - if record is self._sentinel: - break - self.handle(record) - if has_task_done: - q.task_done() - except queue.Empty: - break - - def enqueue_sentinel(self): - """ - Writes a sentinel to the queue to tell the listener to quit. This - implementation uses ``put_nowait()``. You may want to override this - method if you want to use timeouts or work with custom queue - implementations. - """ - self.queue.put_nowait(self._sentinel) - - def stop(self): - """ - Stop the listener. - - This asks the thread to terminate, and then waits for it to do so. - Note that if you don't call this before your application exits, there - may be some records still left on the queue, which won't be processed. - """ - self._stop.set() - self.enqueue_sentinel() - self._thread.join() - self._thread = None diff --git a/splunk_eventgen/lib/logutils_src/logutils/redis.py b/splunk_eventgen/lib/logutils_src/logutils/redis.py deleted file mode 100644 index 46641bf2..00000000 --- a/splunk_eventgen/lib/logutils_src/logutils/redis.py +++ /dev/null @@ -1,79 +0,0 @@ -# -# Copyright (C) 2011-2017 Vinay Sajip. See LICENSE.txt for details. -# -""" -This module contains classes which help you work with Redis queues. -""" - -from logutils.queue import QueueHandler, QueueListener - -try: - import cPickle as pickle -except ImportError: - import pickle - - -class RedisQueueHandler(QueueHandler): - """ - A QueueHandler implementation which pushes pickled - records to a Redis queue using a specified key. - - :param key: The key to use for the queue. Defaults to - "python.logging". - :param redis: If specified, this instance is used to - communicate with a Redis instance. - :param limit: If specified, the queue is restricted to - have only this many elements. - """ - - def __init__(self, key='python.logging', redis=None, limit=0): - if redis is None: - from redis import Redis - redis = Redis() - self.key = key - assert limit >= 0 - self.limit = limit - QueueHandler.__init__(self, redis) - - def enqueue(self, record): - s = pickle.dumps(vars(record)) - self.queue.rpush(self.key, s) - if self.limit: - self.queue.ltrim(self.key, -self.limit, -1) - - -class RedisQueueListener(QueueListener): - """ - A QueueListener implementation which fetches pickled - records from a Redis queue using a specified key. - - :param key: The key to use for the queue. Defaults to - "python.logging". - :param redis: If specified, this instance is used to - communicate with a Redis instance. - """ - - def __init__(self, *handlers, **kwargs): - redis = kwargs.get('redis') - if redis is None: - from redis import Redis - redis = Redis() - self.key = kwargs.get('key', 'python.logging') - QueueListener.__init__(self, redis, *handlers) - - def dequeue(self, block): - """ - Dequeue and return a record. - """ - if block: - s = self.queue.blpop(self.key)[1] - else: - s = self.queue.lpop(self.key) - if not s: - record = None - else: - record = pickle.loads(s) - return record - - def enqueue_sentinel(self): - self.queue.rpush(self.key, '') diff --git a/splunk_eventgen/lib/logutils_src/logutils/testing.py b/splunk_eventgen/lib/logutils_src/logutils/testing.py deleted file mode 100644 index bb8ac3df..00000000 --- a/splunk_eventgen/lib/logutils_src/logutils/testing.py +++ /dev/null @@ -1,157 +0,0 @@ -# -# Copyright (C) 2010-2017 Vinay Sajip. See LICENSE.txt for details. -# -from logging.handlers import BufferingHandler - - -class TestHandler(BufferingHandler): - """ - This handler collects records in a buffer for later inspection by - your unit test code. - - :param matcher: The :class:`~logutils.testing.Matcher` instance to - use for matching. - """ - - def __init__(self, matcher): - # BufferingHandler takes a "capacity" argument - # so as to know when to flush. As we're overriding - # shouldFlush anyway, we can set a capacity of zero. - # You can call flush() manually to clear out the - # buffer. - BufferingHandler.__init__(self, 0) - self.formatted = [] - self.matcher = matcher - - def shouldFlush(self): - """ - Should the buffer be flushed? - - This returns `False` - you'll need to flush manually, usually after - your unit test code checks the buffer contents against your - expectations. - """ - return False - - def emit(self, record): - """ - Saves the `__dict__` of the record in the `buffer` attribute, - and the formatted records in the `formatted` attribute. - - :param record: The record to emit. - """ - self.formatted.append(self.format(record)) - self.buffer.append(record.__dict__) - - def flush(self): - """ - Clears out the `buffer` and `formatted` attributes. - """ - BufferingHandler.flush(self) - self.formatted = [] - - def matches(self, **kwargs): - """ - Look for a saved dict whose keys/values match the supplied arguments. - - Return `True` if found, else `False`. - - :param kwargs: A set of keyword arguments whose names are LogRecord - attributes and whose values are what you want to - match in a stored LogRecord. - """ - result = False - for d in self.buffer: - if self.matcher.matches(d, **kwargs): - result = True - break - # if not result: - # print('*** matcher failed completely on %d records' % len(self.buffer)) - return result - - def matchall(self, kwarglist): - """ - Accept a list of keyword argument values and ensure that the handler's - buffer of stored records matches the list one-for-one. - - Return `True` if exactly matched, else `False`. - - :param kwarglist: A list of keyword-argument dictionaries, each of - which will be passed to :meth:`matches` with the - corresponding record from the buffer. - """ - if self.count != len(kwarglist): - result = False - else: - result = True - for d, kwargs in zip(self.buffer, kwarglist): - if not self.matcher.matches(d, **kwargs): - result = False - break - return result - - @property - def count(self): - """ - The number of records in the buffer. - """ - return len(self.buffer) - - -class Matcher(object): - """ - This utility class matches a stored dictionary of - :class:`logging.LogRecord` attributes with keyword arguments - passed to its :meth:`~logutils.testing.Matcher.matches` method. - """ - - _partial_matches = ('msg', 'message') - """ - A list of :class:`logging.LogRecord` attribute names which - will be checked for partial matches. If not in this list, - an exact match will be attempted. - """ - - def matches(self, d, **kwargs): - """ - Try to match a single dict with the supplied arguments. - - Keys whose values are strings and which are in self._partial_matches - will be checked for partial (i.e. substring) matches. You can extend - this scheme to (for example) do regular expression matching, etc. - - Return `True` if found, else `False`. - - :param kwargs: A set of keyword arguments whose names are LogRecord - attributes and whose values are what you want to - match in a stored LogRecord. - """ - result = True - for k in kwargs: - v = kwargs[k] - dv = d.get(k) - if not self.match_value(k, dv, v): - # print('*** matcher failed: %s, %r, %r' % (k, dv, v)) - result = False - break - return result - - def match_value(self, k, dv, v): - """ - Try to match a single stored value (dv) with a supplied value (v). - - Return `True` if found, else `False`. - - :param k: The key value (LogRecord attribute name). - :param dv: The stored value to match against. - :param v: The value to compare with the stored value. - """ - if type(v) != type(dv): - result = False - elif type(dv) is not str or k not in self._partial_matches: - result = (v == dv) - else: - result = dv.find(v) >= 0 - # if not result: - # print('*** matcher failed on %s: %r vs. %r' % (k, dv, v)) - return result diff --git a/splunk_eventgen/lib/logutils_src/logutils_src_setup.py b/splunk_eventgen/lib/logutils_src/logutils_src_setup.py deleted file mode 100644 index 8eb90944..00000000 --- a/splunk_eventgen/lib/logutils_src/logutils_src_setup.py +++ /dev/null @@ -1,65 +0,0 @@ -# -*- coding: utf-8 -*- - -import distutils.core -import re -from os.path import dirname, join - -import logutils - - -def description(): - f = open(join(dirname(__file__), 'README.rst')) - readme = f.read() - f.close() - regexp = r'logutils\s*[\d.]*\s*\n=======+\s*\n(.*)Requirements ' - reqts, = re.findall(regexp, readme, re.DOTALL) - regexp = r'Availability & Documentation\s*\n-----+\s*\n(.*)' - avail, = re.findall(regexp, readme, re.DOTALL) - return reqts + avail - - -class TestCommand(distutils.core.Command): - user_options = [] - - def run(self): - import sys - import unittest - - sys.path.append(join(dirname(__file__), 'tests')) - import logutil_tests - loader = unittest.TestLoader() - runner = unittest.TextTestRunner() - test_results = runner.run(loader.loadTestsFromModule(logutil_tests)) - if not test_results.wasSuccessful(): - sys.exit(1) - - def initialize_options(self): - pass - - def finalize_options(self): - pass - - -distutils.core.setup( - name='logutils', - version=logutils.__version__, - author='Vinay Sajip', - author_email='vinay_sajip@red-dove.com', - url='http://code.google.com/p/logutils/', - description='Logging utilities', - long_description=description(), - license='Copyright (C) 2010-2017 by Vinay Sajip. All Rights Reserved. See LICENSE.txt for license.', - classifiers=[ - 'Development Status :: 5 - Production/Stable', - 'Environment :: Console', - 'Intended Audience :: Developers', - 'License :: OSI Approved :: BSD License', - 'Operating System :: OS Independent', - 'Programming Language :: Python', - "Programming Language :: Python :: 2", - "Programming Language :: Python :: 3", - 'Topic :: Software Development', ], - packages=['logutils'], - cmdclass={ - 'test': TestCommand, }, -) diff --git a/splunk_eventgen/lib/logutils_src/tests/mytest.py b/splunk_eventgen/lib/logutils_src/tests/mytest.py deleted file mode 100644 index ac9cbcc2..00000000 --- a/splunk_eventgen/lib/logutils_src/tests/mytest.py +++ /dev/null @@ -1,8 +0,0 @@ -from __future__ import absolute_import - -from logutils.testing import Matcher, TestHandler - - -class MyTestHandler(TestHandler): - def __init__(self): - TestHandler.__init__(self, Matcher()) diff --git a/splunk_eventgen/lib/logutils_src/tests/test_adapter.py b/splunk_eventgen/lib/logutils_src/tests/test_adapter.py deleted file mode 100644 index a827f95d..00000000 --- a/splunk_eventgen/lib/logutils_src/tests/test_adapter.py +++ /dev/null @@ -1,70 +0,0 @@ -# -# Copyright (C) 2008-2017 Vinay Sajip. See LICENSE.txt for details. -# -import logging -import unittest - -from logutils.adapter import LoggerAdapter -from logutils.testing import Matcher, TestHandler - - -class AdapterTest(unittest.TestCase): - def setUp(self): - self.handler = h = TestHandler(Matcher()) - self.logger = temp_logger = logging.getLogger() - temp_logger.addHandler(h) - self.adapter = LoggerAdapter(temp_logger, {}) - - def tearDown(self): - self.logger.removeHandler(self.handler) - self.handler.close() - - def test_simple(self): - """Simple test of logging test harness.""" - # Just as a demo, let's log some messages. - # Only one should show up in the log. - self.adapter.debug("This won't show up.") - self.adapter.info("Neither will this.") - self.adapter.warning("But this will.") - h = self.handler - self.assertTrue(h.matches(levelno=logging.WARNING)) - self.assertFalse(h.matches(levelno=logging.DEBUG)) - self.assertFalse(h.matches(levelno=logging.INFO)) - - def test_partial(self): - """Test of partial matching in logging test harness.""" - # Just as a demo, let's log some messages. - # Only one should show up in the log. - self.adapter.debug("This won't show up.") - self.adapter.info("Neither will this.") - self.adapter.warning("But this will.") - h = self.handler - self.assertTrue(h.matches(msg="ut th")) # from "But this will" - self.assertTrue(h.matches(message="ut th")) # from "But this will" - self.assertFalse(h.matches(message="either")) - self.assertFalse(h.matches(message="won't")) - - def test_multiple(self): - """Test of matching multiple values in logging test harness.""" - # Just as a demo, let's log some messages. - # Only one should show up in the log. - self.adapter.debug("This won't show up.") - self.adapter.info("Neither will this.") - self.adapter.warning("But this will.") - self.adapter.error("And so will this.") - h = self.handler - self.assertTrue(h.matches(levelno=logging.WARNING, message='ut th')) - self.assertTrue(h.matches(levelno=logging.ERROR, message='nd so w')) - self.assertFalse(h.matches(levelno=logging.INFO)) - - def test_hashandlers(self): - """Test of hasHandlers() functionality.""" - self.assertTrue(self.adapter.hasHandlers()) - self.logger.removeHandler(self.handler) - self.assertFalse(self.adapter.hasHandlers()) - self.logger.addHandler(self.handler) - self.assertTrue(self.adapter.hasHandlers()) - - -if __name__ == '__main__': - unittest.main() diff --git a/splunk_eventgen/lib/logutils_src/tests/test_colorize.py b/splunk_eventgen/lib/logutils_src/tests/test_colorize.py deleted file mode 100644 index 18d1b263..00000000 --- a/splunk_eventgen/lib/logutils_src/tests/test_colorize.py +++ /dev/null @@ -1,24 +0,0 @@ -# -# Copyright (C) 2012-2017 Vinay Sajip. See LICENSE.txt for details. -# -import logging -import sys -import unittest - -import logutils.colorize - -if sys.version_info[0] < 3: - u = lambda o: unicode(o, 'unicode_escape') -else: - u = lambda o: o - - -class ColorizeTest(unittest.TestCase): - def test_colorize(self): - logger = logging.getLogger() - handler = logutils.colorize.ColorizingStreamHandler() - logger.addHandler(handler) - try: - logger.warning(u('Some unicode string with some \u015b\u0107\u017a\xf3\u0142 chars')) - finally: - logger.removeHandler(handler) diff --git a/splunk_eventgen/lib/logutils_src/tests/test_dictconfig.py b/splunk_eventgen/lib/logutils_src/tests/test_dictconfig.py deleted file mode 100644 index 950bcc6c..00000000 --- a/splunk_eventgen/lib/logutils_src/tests/test_dictconfig.py +++ /dev/null @@ -1,550 +0,0 @@ -# -# Copyright 2009-2017 by Vinay Sajip. See LICENSE.txt for details. -# -import logging -import unittest - -from logutils.adapter import LoggerAdapter -from logutils.dictconfig import dictConfig, named_handlers_supported -from logutils.testing import Matcher, TestHandler - -try: - StandardError -except NameError: - StandardError = Exception - - -class ExceptionFormatter(logging.Formatter): - """A special exception formatter.""" - - def formatException(self, ei): - return "Got a [%s]" % ei[0].__name__ - - -def formatFunc(format, datefmt=None): - return logging.Formatter(format, datefmt) - - -def testHandler(): - return TestHandler(Matcher()) - - -def handlerFunc(): - return logging.StreamHandler() - - -class CustomHandler(logging.StreamHandler): - pass - - -class ConfigDictTest(unittest.TestCase): - """Reading logging config from a dictionary.""" - - def setUp(self): - self.logger = temp_logger = logging.getLogger() - self.adapter = LoggerAdapter(temp_logger, {}) - - logger_dict = logging.getLogger().manager.loggerDict - logging._acquireLock() - try: - self.saved_handlers = logging._handlers.copy() - self.saved_handler_list = logging._handlerList[:] - self.saved_loggers = logger_dict.copy() - if hasattr(logging, '_levelNames'): - self.saved_level_names = logging._levelNames.copy() - else: - self.saved_level_to_name = logging._levelToName.copy() - self.saved_name_to_level = logging._nameToLevel.copy() - finally: - logging._releaseLock() - - self.root_logger = logging.getLogger("") - self.original_logging_level = self.root_logger.getEffectiveLevel() - - def tearDown(self): - self.root_logger.setLevel(self.original_logging_level) - logging._acquireLock() - try: - if hasattr(logging, '_levelNames'): - logging._levelNames.clear() - logging._levelNames.update(self.saved_level_names) - else: - logging._levelToName.clear() - logging._levelToName.update(self.saved_level_to_name) - logging._nameToLevel.clear() - logging._nameToLevel.update(self.saved_name_to_level) - logging._handlers.clear() - logging._handlers.update(self.saved_handlers) - logging._handlerList[:] = self.saved_handler_list - loggerDict = logging.getLogger().manager.loggerDict - loggerDict.clear() - loggerDict.update(self.saved_loggers) - finally: - logging._releaseLock() - - message_num = 0 - - def next_message(self): - """Generate a message consisting solely of an auto-incrementing - integer.""" - self.message_num += 1 - return "%d" % self.message_num - - # config0 is a standard configuration. - config0 = { - 'version': 1, - 'formatters': { - 'form1': { - 'format': '%(levelname)s ++ %(message)s', }, }, - 'handlers': {'hand1': { - '()': testHandler, - 'formatter': 'form1', }}, - 'root': { - 'level': 'WARNING', - 'handlers': ['hand1'], }, } - - # config1 adds a little to the standard configuration. - config1 = { - 'version': 1, - 'formatters': { - 'form1': { - 'format': '%(levelname)s ++ %(message)s', }, }, - 'handlers': {'hand1': { - '()': testHandler, - 'formatter': 'form1', }}, - 'loggers': { - 'compiler.parser': { - 'level': 'DEBUG', - 'handlers': ['hand1'], }, }, - 'root': { - 'level': 'WARNING', }, } - - # config2 has a subtle configuration error that should be reported - config2 = { - 'formatters': { - 'form1': { - 'format': '%(levelname)s ++ %(message)s', }, }, - 'handlers': { - 'hand1': { - 'class': 'logging.StreamHandler', - 'formatter': 'form1', - 'level': 'NOTSET', - 'stream': 'ext://sys.stdbout', }, }, - 'loggers': { - 'compiler.parser': { - 'level': 'DEBUG', - 'handlers': ['hand1'], }, }, - 'root': { - 'level': 'WARNING', }, } - - # As config1 but with a misspelt level on a handler - config2a = { - 'formatters': { - 'form1': { - 'format': '%(levelname)s ++ %(message)s', }, }, - 'handlers': { - 'hand1': { - 'class': 'logging.StreamHandler', - 'formatter': 'form1', - 'level': 'NTOSET', - 'stream': 'ext://sys.stdout', }, }, - 'loggers': { - 'compiler.parser': { - 'level': 'DEBUG', - 'handlers': ['hand1'], }, }, - 'root': { - 'level': 'WARNING', }, } - - # As config1 but with a misspelt level on a logger - config2b = { - 'formatters': { - 'form1': { - 'format': '%(levelname)s ++ %(message)s', }, }, - 'handlers': { - 'hand1': { - 'class': 'logging.StreamHandler', - 'formatter': 'form1', - 'level': 'NOTSET', - 'stream': 'ext://sys.stdout', }, }, - 'loggers': { - 'compiler.parser': { - 'level': 'DEBUG', - 'handlers': ['hand1'], }, }, - 'root': { - 'level': 'WRANING', }, } - - # config3 has a less subtle configuration error - config3 = { - 'formatters': { - 'form1': { - 'format': '%(levelname)s ++ %(message)s', }, }, - 'handlers': { - 'hand1': { - 'class': 'logging.StreamHandler', - 'formatter': 'misspelled_name', - 'level': 'NOTSET', - 'stream': 'ext://sys.stdout', }, }, - 'loggers': { - 'compiler.parser': { - 'level': 'DEBUG', - 'handlers': ['hand1'], }, }, - 'root': { - 'level': 'WARNING', }, } - - # config4 specifies a custom formatter class to be loaded - config4 = { - 'version': 1, - 'formatters': { - 'form1': { - '()': __name__ + '.ExceptionFormatter', - 'format': '%(levelname)s:%(name)s:%(message)s', }, }, - 'handlers': {'hand1': { - '()': testHandler, - 'formatter': 'form1', }}, - 'root': { - 'level': 'NOTSET', - 'handlers': ['hand1'], }, } - - # As config4 but using an actual callable rather than a string - config4a = { - 'version': 1, - 'formatters': { - 'form1': { - '()': ExceptionFormatter, - 'format': '%(levelname)s:%(name)s:%(message)s', }, - 'form2': { - '()': __name__ + '.formatFunc', - 'format': '%(levelname)s:%(name)s:%(message)s', }, - 'form3': { - '()': formatFunc, - 'format': '%(levelname)s:%(name)s:%(message)s', }, }, - 'handlers': { - 'hand1': { - '()': testHandler, - 'formatter': 'form1', }, - 'hand2': { - '()': handlerFunc, }, }, - 'root': { - 'level': 'NOTSET', - 'handlers': ['hand1'], }, } - - # config5 specifies a custom handler class to be loaded - config5 = { - 'version': 1, - 'formatters': { - 'form1': { - 'format': '%(levelname)s ++ %(message)s', }, }, - 'handlers': {'hand1': { - '()': testHandler, - 'formatter': 'form1', }}, - 'loggers': { - 'compiler.parser': { - 'level': 'DEBUG', - 'handlers': ['hand1'], }, }, - 'root': { - 'level': 'WARNING', }, } - - # config6 specifies a custom handler class to be loaded - # but has bad arguments - config6 = { - 'formatters': { - 'form1': { - 'format': '%(levelname)s ++ %(message)s', }, }, - 'handlers': { - 'hand1': { - 'class': __name__ + '.CustomHandler', - 'formatter': 'form1', - 'level': 'NOTSET', - 'stream': 'ext://sys.stdout', - '9': 'invalid parameter name', }, }, - 'loggers': { - 'compiler.parser': { - 'level': 'DEBUG', - 'handlers': ['hand1'], }, }, - 'root': { - 'level': 'WARNING', }, } - - # config 7 does not define compiler.parser but defines compiler.lexer - # so compiler.parser should be disabled after applying it - config7 = { - 'version': 1, - 'formatters': { - 'form1': { - 'format': '%(levelname)s ++ %(message)s', }, }, - 'handlers': {'hand1': { - '()': testHandler, - 'formatter': 'form1', }}, - 'loggers': { - 'compiler.lexer': { - 'level': 'DEBUG', - 'handlers': ['hand1'], }, }, - 'root': { - 'level': 'WARNING', }, } - - config8 = { - 'version': 1, - 'disable_existing_loggers': False, - 'formatters': { - 'form1': { - 'format': '%(levelname)s ++ %(message)s', }, }, - 'handlers': {'hand1': { - '()': testHandler, - 'formatter': 'form1', }}, - 'loggers': { - 'compiler': { - 'level': 'DEBUG', - 'handlers': ['hand1'], }, - 'compiler.lexer': {}, }, - 'root': { - 'level': 'WARNING', }, } - - config9 = { - 'version': 1, - 'formatters': { - 'form1': { - 'format': '%(levelname)s ++ %(message)s', }, }, - 'handlers': {'hand1': { - '()': testHandler, - 'formatter': 'form1', }}, - 'loggers': { - 'compiler.parser': { - 'level': 'WARNING', - 'handlers': ['hand1'], }, }, - 'root': { - 'level': 'NOTSET', }, } - - config9a = { - 'version': 1, - 'incremental': True, - 'handlers': { - 'hand1': { - 'level': 'WARNING', }, }, - 'loggers': { - 'compiler.parser': { - 'level': 'INFO', }, }, } - - config9b = { - 'version': 1, - 'incremental': True, - 'handlers': { - 'hand1': { - 'level': 'INFO', }, }, - 'loggers': { - 'compiler.parser': { - 'level': 'INFO', }, }, } - - # As config1 but with a filter added - config10 = { - 'version': 1, - 'formatters': { - 'form1': { - 'format': '%(levelname)s ++ %(message)s', }, }, - 'filters': { - 'filt1': { - 'name': 'compiler.parser', }, }, - 'handlers': {'hand1': { - '()': testHandler, - 'formatter': 'form1', - 'filters': ['filt1'], }}, - 'loggers': { - 'compiler.parser': { - 'level': 'DEBUG', - 'filters': ['filt1'], }, }, - 'root': { - 'level': 'WARNING', - 'handlers': ['hand1'], }, } - - # As config10, but declaring a handler in a module using - # absolute imports - config11 = { - 'version': 1, - 'formatters': { - 'form1': { - 'format': '%(levelname)s ++ %(message)s', }, }, - 'filters': { - 'filt1': { - 'name': 'compiler.parser', }, }, - 'handlers': {'hand1': { - '()': 'mytest.MyTestHandler', - 'formatter': 'form1', - 'filters': ['filt1'], }}, - 'loggers': { - 'compiler.parser': { - 'level': 'DEBUG', - 'filters': ['filt1'], }, }, - 'root': { - 'level': 'WARNING', - 'handlers': ['hand1'], }, } - - def apply_config(self, conf): - dictConfig(conf) - - def test_config0_ok(self): - # A simple config which overrides the default settings. - self.apply_config(self.config0) - logger = logging.getLogger() - # Won't output anything - logger.info(self.next_message()) - # Outputs a message - logger.error(self.next_message()) - h = logger.handlers[0] - self.assertEqual(1, h.count) - self.assertTrue(h.matchall([dict(levelname='ERROR', message='2')])) - - def test_config1_ok(self, config=config1): - # A config defining a sub-parser as well. - self.apply_config(config) - logger = logging.getLogger("compiler.parser") - # Both will output a message - logger.info(self.next_message()) - logger.error(self.next_message()) - h = logger.handlers[0] - self.assertTrue(h.matchall([ - dict(levelname='INFO', message='1'), - dict(levelname='ERROR', message='2'), ])) - - def test_config2_failure(self): - # A simple config which overrides the default settings. - self.assertRaises(StandardError, self.apply_config, self.config2) - - def test_config2a_failure(self): - # A simple config which overrides the default settings. - self.assertRaises(StandardError, self.apply_config, self.config2a) - - def test_config2b_failure(self): - # A simple config which overrides the default settings. - self.assertRaises(StandardError, self.apply_config, self.config2b) - - def test_config3_failure(self): - # A simple config which overrides the default settings. - self.assertRaises(StandardError, self.apply_config, self.config3) - - def test_config4_ok(self): - # A config specifying a custom formatter class. - self.apply_config(self.config4) - logger = logging.getLogger() - h = logger.handlers[0] - try: - raise RuntimeError() - except RuntimeError: - logging.exception("just testing") - self.assertEquals(h.formatted[0], "ERROR:root:just testing\nGot a [RuntimeError]") - - def test_config4a_ok(self): - # A config specifying a custom formatter class. - self.apply_config(self.config4a) - logger = logging.getLogger() - h = logger.handlers[0] - try: - raise RuntimeError() - except RuntimeError: - logging.exception("just testing") - self.assertEquals(h.formatted[0], "ERROR:root:just testing\nGot a [RuntimeError]") - - def test_config5_ok(self): - self.test_config1_ok(config=self.config5) - - def test_config6_failure(self): - self.assertRaises(StandardError, self.apply_config, self.config6) - - def test_config7_ok(self): - self.apply_config(self.config1) - logger = logging.getLogger("compiler.parser") - # Both will output a message - logger.info(self.next_message()) - logger.error(self.next_message()) - h = logger.handlers[0] - self.assertTrue(h.matchall([ - dict(levelname='INFO', message='1'), - dict(levelname='ERROR', message='2'), ])) - self.apply_config(self.config7) - logger = logging.getLogger("compiler.parser") - self.assertTrue(logger.disabled) - logger = logging.getLogger("compiler.lexer") - # Both will output a message - h = logger.handlers[0] - logger.info(self.next_message()) - logger.error(self.next_message()) - self.assertTrue(h.matchall([ - dict(levelname='INFO', message='3'), - dict(levelname='ERROR', message='4'), ])) - - # Same as test_config_7_ok but don't disable old loggers. - def test_config_8_ok(self): - self.apply_config(self.config1) - logger = logging.getLogger("compiler.parser") - # Both will output a message - logger.info(self.next_message()) - logger.error(self.next_message()) - h = logger.handlers[0] - self.assertTrue(h.matchall([ - dict(levelname='INFO', message='1'), - dict(levelname='ERROR', message='2'), ])) - self.apply_config(self.config8) - logger = logging.getLogger("compiler.parser") - self.assertFalse(logger.disabled) - toplogger = logging.getLogger("compiler") - # Both will output a message - logger.info(self.next_message()) - logger.error(self.next_message()) - logger = logging.getLogger("compiler.lexer") - # Both will output a message - logger.info(self.next_message()) - logger.error(self.next_message()) - h = toplogger.handlers[0] - self.assertTrue( - h.matchall([ - dict(levelname='INFO', message='3'), - dict(levelname='ERROR', message='4'), - dict(levelname='INFO', message='5'), - dict(levelname='ERROR', message='6'), ])) - - def test_config_9_ok(self): - self.apply_config(self.config9) - logger = logging.getLogger("compiler.parser") - # Nothing will be output since both handler and logger are set to WARNING - logger.info(self.next_message()) - h = logger.handlers[0] - self.assertEqual(0, h.count) - self.apply_config(self.config9a) - # Nothing will be output since both handler is still set to WARNING - logger.info(self.next_message()) - h = logger.handlers[0] - nhs = named_handlers_supported() - if nhs: - self.assertEqual(0, h.count) - else: - self.assertEqual(1, h.count) - self.apply_config(self.config9b) - # Message should now be output - logger.info(self.next_message()) - if nhs: - h = logger.handlers[0] - self.assertTrue(h.matchall([ - dict(levelname='INFO', message='3'), ])) - else: - self.assertEqual(2, h.count) - - def test_config_10_ok(self): - self.apply_config(self.config10) - logger = logging.getLogger("compiler.parser") - logger.warning(self.next_message()) - logger = logging.getLogger('compiler') - # Not output, because filtered - logger.warning(self.next_message()) - logger = logging.getLogger('compiler.lexer') - # Not output, because filtered - logger.warning(self.next_message()) - logger = logging.getLogger("compiler.parser.codegen") - # Output, as not filtered - logger.error(self.next_message()) - h = logging.getLogger().handlers[0] - self.assertTrue(h.matchall([ - dict(levelname='WARNING', message='1'), - dict(levelname='ERROR', message='4'), ])) - - def test_config_11_ok(self): - self.apply_config(self.config11) - h = logging.getLogger().handlers[0] - self.assertEqual(h.__module__, 'mytest') - self.assertEqual(h.__class__.__name__, 'MyTestHandler') diff --git a/splunk_eventgen/lib/logutils_src/tests/test_formatter.py b/splunk_eventgen/lib/logutils_src/tests/test_formatter.py deleted file mode 100644 index 011ba234..00000000 --- a/splunk_eventgen/lib/logutils_src/tests/test_formatter.py +++ /dev/null @@ -1,74 +0,0 @@ -# -# Copyright (C) 2009-2017 Vinay Sajip. See LICENSE.txt for details. -# -import logging -import os -import sys -import unittest - -import logutils - - -class FormatterTest(unittest.TestCase): - def setUp(self): - self.common = { - 'name': 'formatter.test', - 'level': logging.DEBUG, - 'pathname': os.path.join('path', 'to', 'dummy.ext'), - 'lineno': 42, - 'exc_info': None, - 'func': None, - 'msg': 'Message with %d %s', - 'args': (2, 'placeholders'), } - self.variants = {} - - def get_record(self, name=None): - result = dict(self.common) - if name is not None: - result.update(self.variants[name]) - return logging.makeLogRecord(result) - - def test_percent(self): - "Test %-formatting" - r = self.get_record() - f = logutils.Formatter('${%(message)s}') - self.assertEqual(f.format(r), '${Message with 2 placeholders}') - f = logutils.Formatter('%(random)s') - self.assertRaises(KeyError, f.format, r) - self.assertFalse(f.usesTime()) - f = logutils.Formatter('%(asctime)s') - self.assertTrue(f.usesTime()) - f = logutils.Formatter('asctime') - self.assertFalse(f.usesTime()) - - if sys.version_info[:2] >= (2, 6): - - def test_braces(self): - "Test {}-formatting" - r = self.get_record() - f = logutils.Formatter('$%{message}%$', style='{') - self.assertEqual(f.format(r), '$%Message with 2 placeholders%$') - f = logutils.Formatter('{random}', style='{') - self.assertRaises(KeyError, f.format, r) - self.assertFalse(f.usesTime()) - f = logutils.Formatter('{asctime}', style='{') - self.assertTrue(f.usesTime()) - f = logutils.Formatter('asctime', style='{') - self.assertFalse(f.usesTime()) - - def test_dollars(self): - "Test $-formatting" - r = self.get_record() - f = logutils.Formatter('$message', style='$') - self.assertEqual(f.format(r), 'Message with 2 placeholders') - f = logutils.Formatter('$$%${message}%$$', style='$') - self.assertEqual(f.format(r), '$%Message with 2 placeholders%$') - f = logutils.Formatter('${random}', style='$') - self.assertRaises(KeyError, f.format, r) - self.assertFalse(f.usesTime()) - f = logutils.Formatter('${asctime}', style='$') - self.assertTrue(f.usesTime()) - f = logutils.Formatter('$asctime', style='$') - self.assertTrue(f.usesTime()) - f = logutils.Formatter('asctime', style='$') - self.assertFalse(f.usesTime()) diff --git a/splunk_eventgen/lib/logutils_src/tests/test_messages.py b/splunk_eventgen/lib/logutils_src/tests/test_messages.py deleted file mode 100644 index 0a221105..00000000 --- a/splunk_eventgen/lib/logutils_src/tests/test_messages.py +++ /dev/null @@ -1,34 +0,0 @@ -import sys -import unittest - -import logutils - - -class MessageTest(unittest.TestCase): - if sys.version_info[:2] >= (2, 6): - - def test_braces(self): - "Test whether brace-formatting works." - __ = logutils.BraceMessage - m = __('Message with {0} {1}', 2, 'placeholders') - self.assertEqual(str(m), 'Message with 2 placeholders') - m = __('Message with {0:d} {1}', 2, 'placeholders') - self.assertEqual(str(m), 'Message with 2 placeholders') - m = __('Message without {0:x} {1}', 16, 'placeholders') - self.assertEqual(str(m), 'Message without 10 placeholders') - - class Dummy: - pass - - dummy = Dummy() - dummy.x, dummy.y = 0.0, 1.0 - m = __('Message with coordinates: ({point.x:.2f}, {point.y:.2f})', point=dummy) - self.assertEqual(str(m), 'Message with coordinates: (0.00, 1.00)') - - def test_dollars(self): - "Test whether dollar-formatting works." - __ = logutils.DollarMessage - m = __('Message with $num ${what}', num=2, what='placeholders') - self.assertEqual(str(m), 'Message with 2 placeholders') - ignored = object() - self.assertRaises(TypeError, __, 'Message with $num ${what}', ignored, num=2, what='placeholders') diff --git a/splunk_eventgen/lib/logutils_src/tests/test_queue.py b/splunk_eventgen/lib/logutils_src/tests/test_queue.py deleted file mode 100644 index f85074c6..00000000 --- a/splunk_eventgen/lib/logutils_src/tests/test_queue.py +++ /dev/null @@ -1,69 +0,0 @@ -# -# Copyright (C) 2010-2017 Vinay Sajip. See LICENSE.txt for details. -# -import logging -import unittest - -from logutils.queue import QueueHandler, QueueListener, queue -from logutils.testing import Matcher, TestHandler - - -class QueueTest(unittest.TestCase): - def setUp(self): - self.handler = h = TestHandler(Matcher()) - self.logger = temp_logger = logging.getLogger() - self.queue = q = queue.Queue(-1) - self.qh = qh = QueueHandler(q) - self.ql = ql = QueueListener(q, h) - ql.start() - temp_logger.addHandler(qh) - - def tearDown(self): - self.logger.removeHandler(self.qh) - self.qh.close() - self.handler.close() - - def test_simple(self): - "Simple test of queue handling and listening." - # Just as a demo, let's log some messages. - # Only one should show up in the log. - self.logger.debug("This won't show up.") - self.logger.info("Neither will this.") - self.logger.warning("But this will.") - self.ql.stop() # ensure all records have come through. - h = self.handler - self.assertTrue(h.matches(levelno=logging.WARNING)) - self.assertFalse(h.matches(levelno=logging.DEBUG)) - self.assertFalse(h.matches(levelno=logging.INFO)) - - def test_partial(self): - "Test of partial matching through queues." - # Just as a demo, let's log some messages. - # Only one should show up in the log. - self.logger.debug("This won't show up.") - self.logger.info("Neither will this.") - self.logger.warning("But this will.") - self.ql.stop() # ensure all records have come through. - h = self.handler - self.assertTrue(h.matches(msg="ut th")) # from "But this will" - self.assertTrue(h.matches(message="ut th")) # from "But this will" - self.assertFalse(h.matches(message="either")) - self.assertFalse(h.matches(message="won't")) - - def test_multiple(self): - "Test of matching multiple values through queues." - # Just as a demo, let's log some messages. - # Only one should show up in the log. - self.logger.debug("This won't show up.") - self.logger.info("Neither will this.") - self.logger.warning("But this will.") - self.logger.error("And so will this.") - self.ql.stop() # ensure all records have come through. - h = self.handler - self.assertTrue(h.matches(levelno=logging.WARNING, message='ut thi')) - self.assertTrue(h.matches(levelno=logging.ERROR, message='nd so wi')) - self.assertFalse(h.matches(levelno=logging.INFO)) - - -if __name__ == '__main__': - unittest.main() diff --git a/splunk_eventgen/lib/logutils_src/tests/test_redis.py b/splunk_eventgen/lib/logutils_src/tests/test_redis.py deleted file mode 100644 index e53bdb5d..00000000 --- a/splunk_eventgen/lib/logutils_src/tests/test_redis.py +++ /dev/null @@ -1,98 +0,0 @@ -# -# Copyright (C) 2011-2017 Vinay Sajip. See LICENSE.txt for details. -# -import logging -import socket -import subprocess -import time -import unittest - -from logutils.redis import RedisQueueHandler, RedisQueueListener -from logutils.testing import Matcher, TestHandler - -from redis import Redis - - -class QueueListener(RedisQueueListener): - def dequeue(self, block): - record = RedisQueueListener.dequeue(self, block) - if record: - record = logging.makeLogRecord(record) - return record - - -class RedisQueueTest(unittest.TestCase): - def setUp(self): - self.handler = h = TestHandler(Matcher()) - self.logger = temp_logger = logging.getLogger() - self.server = subprocess.Popen(['redis-server'], stdout=subprocess.PIPE, stderr=subprocess.PIPE) - self.wait_for_server() - self.queue = q = Redis() - self.qh = qh = RedisQueueHandler(redis=q) - self.ql = ql = QueueListener(h, redis=q) - ql.start() - temp_logger.addHandler(qh) - - def tearDown(self): - self.logger.removeHandler(self.qh) - self.qh.close() - self.handler.close() - self.server.terminate() - - def wait_for_server(self): - maxtime = time.time() + 2 # 2 seconds to wait for server - sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) - while time.time() < maxtime: - try: - sock.connect(('localhost', 6379)) - break - except socket.error: - pass - if time.time() >= maxtime: - raise Exception('unable to connect to Redis server') - sock.close() - - def test_simple(self): - "Simple test of queue handling and listening." - # Just as a demo, let's log some messages. - # Only one should show up in the log. - self.logger.debug("This won't show up.") - self.logger.info("Neither will this.") - self.logger.warning("But this will.") - self.ql.stop() # ensure all records have come through. - h = self.handler - self.assertTrue(h.matches(levelno=logging.WARNING)) - self.assertFalse(h.matches(levelno=logging.DEBUG)) - self.assertFalse(h.matches(levelno=logging.INFO)) - - def test_partial(self): - "Test of partial matching through queues." - # Just as a demo, let's log some messages. - # Only one should show up in the log. - self.logger.debug("This won't show up.") - self.logger.info("Neither will this.") - self.logger.warning("But this will.") - self.ql.stop() # ensure all records have come through. - h = self.handler - self.assertTrue(h.matches(msg="ut th")) # from "But this will" - self.assertTrue(h.matches(message="ut th")) # from "But this will" - self.assertFalse(h.matches(message="either")) - self.assertFalse(h.matches(message="won't")) - - def test_multiple(self): - "Test of matching multiple values through queues." - # Just as a demo, let's log some messages. - # Only one should show up in the log. - self.logger.debug("This won't show up.") - self.logger.info("Neither will this.") - self.logger.warning("But this will.") - self.logger.error("And so will this.") - self.ql.stop() # ensure all records have come through. - h = self.handler - self.assertTrue(h.matches(levelno=logging.WARNING, message='ut thi')) - self.assertTrue(h.matches(levelno=logging.ERROR, message='nd so wi')) - self.assertFalse(h.matches(levelno=logging.INFO)) - - -if __name__ == '__main__': - unittest.main() diff --git a/splunk_eventgen/lib/logutils_src/tests/test_testing.py b/splunk_eventgen/lib/logutils_src/tests/test_testing.py deleted file mode 100644 index ef61beb7..00000000 --- a/splunk_eventgen/lib/logutils_src/tests/test_testing.py +++ /dev/null @@ -1,60 +0,0 @@ -# -# Copyright (C) 2010-2017 Vinay Sajip. See LICENSE.txt for details. -# -import logging -import unittest - -from logutils.testing import Matcher, TestHandler - - -class LoggingTest(unittest.TestCase): - def setUp(self): - self.handler = h = TestHandler(Matcher()) - self.logger = temp_logger = logging.getLogger() - temp_logger.addHandler(h) - - def tearDown(self): - self.logger.removeHandler(self.handler) - self.handler.close() - - def test_simple(self): - """Simple test of logging test harness.""" - # Just as a demo, let's log some messages. - # Only one should show up in the log. - self.logger.debug("This won't show up.") - self.logger.info("Neither will this.") - self.logger.warning("But this will.") - h = self.handler - self.assertTrue(h.matches(levelno=logging.WARNING)) - self.assertFalse(h.matches(levelno=logging.DEBUG)) - self.assertFalse(h.matches(levelno=logging.INFO)) - - def test_partial(self): - """Test of partial matching in logging test harness.""" - # Just as a demo, let's log some messages. - # Only one should show up in the log. - self.logger.debug("This won't show up.") - self.logger.info("Neither will this.") - self.logger.warning("But this will.") - h = self.handler - self.assertTrue(h.matches(msg="ut th")) # from "But this will" - self.assertTrue(h.matches(message="ut th")) # from "But this will" - self.assertFalse(h.matches(message="either")) - self.assertFalse(h.matches(message="won't")) - - def test_multiple(self): - """Test of matching multiple values in logging test harness.""" - # Just as a demo, let's log some messages. - # Only one should show up in the log. - self.logger.debug("This won't show up.") - self.logger.info("Neither will this.") - self.logger.warning("But this will.") - self.logger.error("And so will this.") - h = self.handler - self.assertTrue(h.matches(levelno=logging.WARNING, message='ut thi')) - self.assertTrue(h.matches(levelno=logging.ERROR, message='nd so wi')) - self.assertFalse(h.matches(levelno=logging.INFO)) - - -if __name__ == '__main__': - unittest.main() diff --git a/splunk_eventgen/lib/outputcounter.py b/splunk_eventgen/lib/outputcounter.py index 36f0ff4c..e8f8872a 100644 --- a/splunk_eventgen/lib/outputcounter.py +++ b/splunk_eventgen/lib/outputcounter.py @@ -1,6 +1,6 @@ import time -from logging_config import logger +from splunk_eventgen.lib.logging_config import logger class OutputCounter(object): diff --git a/splunk_eventgen/lib/outputplugin.py b/splunk_eventgen/lib/outputplugin.py index e6429761..663042dc 100644 --- a/splunk_eventgen/lib/outputplugin.py +++ b/splunk_eventgen/lib/outputplugin.py @@ -1,7 +1,5 @@ -from __future__ import division - from collections import deque -from logging_config import logger +from splunk_eventgen.lib.logging_config import logger class OutputPlugin(object): diff --git a/splunk_eventgen/lib/plugins/__init__.py b/splunk_eventgen/lib/plugins/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/splunk_eventgen/lib/plugins/generator/__init__.py b/splunk_eventgen/lib/plugins/generator/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/splunk_eventgen/lib/plugins/generator/default.py b/splunk_eventgen/lib/plugins/generator/default.py index fb37e76f..7495476b 100644 --- a/splunk_eventgen/lib/plugins/generator/default.py +++ b/splunk_eventgen/lib/plugins/generator/default.py @@ -1,13 +1,10 @@ # TODO: Sample object is incredibly overloaded and not threadsafe. Need to make it simpler to get a copy without the # whole object get a copy of whats needed without the whole object. - -from __future__ import division - import datetime import random -from generatorplugin import GeneratorPlugin -from logging_config import logger +from splunk_eventgen.lib.generatorplugin import GeneratorPlugin +from splunk_eventgen.lib.logging_config import logger class DefaultGenerator(GeneratorPlugin): @@ -38,7 +35,7 @@ def gen(self, count, earliest, latest, samplename=None): logger.debug( "Bundlelines, filling eventsDict for sample '%s' in app '%s' with %d copies of sampleDict" % (self._sample.name, self._sample.app, count)) - for x in xrange(count): + for x in range(count): eventsDict.extend(self._sample.sampleDict) # Otherwise fill count events into eventsDict or keep making copies of events out of sampleDict until diff --git a/splunk_eventgen/lib/plugins/generator/jinja.py b/splunk_eventgen/lib/plugins/generator/jinja.py index d48c2416..c2d8d608 100644 --- a/splunk_eventgen/lib/plugins/generator/jinja.py +++ b/splunk_eventgen/lib/plugins/generator/jinja.py @@ -1,5 +1,3 @@ -from __future__ import division - import datetime import os import random @@ -8,11 +6,11 @@ from jinja2 import nodes from jinja2.ext import Extension -from generatorplugin import GeneratorPlugin -from logging_config import logger +from splunk_eventgen.lib.generatorplugin import GeneratorPlugin +from splunk_eventgen.lib.logging_config import logger try: import ujson as json -except: +except ImportError: import json as json @@ -111,7 +109,7 @@ def parse(self, parser): target_var_name = {"time_now": "time_now", "time_slice": "time_target"} tag = parser.stream.current.value name_base = target_var_name[tag] - lineno = parser.stream.next().lineno + lineno = next(parser.stream).lineno args, kwargs = self.parse_args(parser) task_list = [] epoch_name = name_base + "_epoch" @@ -259,7 +257,7 @@ def gen(self, count, earliest, latest, samplename=None): "Please note, you must meet the requirements for json.loads in python if you have" + "not installed ujson. Native python does not support multi-line events.") continue - current_line_keys = target_line.keys() + current_line_keys = list(target_line.keys()) if "_time" not in current_line_keys: # TODO: Add a custom exception here raise Exception("No _time field supplied, please add time to your jinja template.") diff --git a/splunk_eventgen/lib/plugins/generator/perdayvolumegenerator.py b/splunk_eventgen/lib/plugins/generator/perdayvolumegenerator.py index 7b84845d..8a7fdd5e 100644 --- a/splunk_eventgen/lib/plugins/generator/perdayvolumegenerator.py +++ b/splunk_eventgen/lib/plugins/generator/perdayvolumegenerator.py @@ -1,10 +1,8 @@ -from __future__ import division - import datetime import random -from generatorplugin import GeneratorPlugin -from logging_config import logger +from splunk_eventgen.lib.generatorplugin import GeneratorPlugin +from splunk_eventgen.lib.logging_config import logger class PerDayVolumeGenerator(GeneratorPlugin): diff --git a/splunk_eventgen/lib/plugins/generator/replay.py b/splunk_eventgen/lib/plugins/generator/replay.py index d8c68a49..7cb7092d 100644 --- a/splunk_eventgen/lib/plugins/generator/replay.py +++ b/splunk_eventgen/lib/plugins/generator/replay.py @@ -1,13 +1,10 @@ # TODO Add timestamp detection for common timestamp format - -from __future__ import division - import datetime import time -from eventgentimestamp import EventgenTimestamp -from generatorplugin import GeneratorPlugin -from logging_config import logger +from splunk_eventgen.lib.eventgentimestamp import EventgenTimestamp +from splunk_eventgen.lib.generatorplugin import GeneratorPlugin +from splunk_eventgen.lib.logging_config import logger class ReplayGenerator(GeneratorPlugin): diff --git a/splunk_eventgen/lib/plugins/generator/weblog.py b/splunk_eventgen/lib/plugins/generator/weblog.py index b6c0e841..e61c9664 100755 --- a/splunk_eventgen/lib/plugins/generator/weblog.py +++ b/splunk_eventgen/lib/plugins/generator/weblog.py @@ -1,9 +1,7 @@ -from __future__ import division - import random import time -from generatorplugin import GeneratorPlugin +from splunk_eventgen.lib.generatorplugin import GeneratorPlugin class WeblogGenerator(GeneratorPlugin): @@ -45,7 +43,7 @@ def gen(self, count, earliest, latest, **kwargs): self._sample.sourcetype, 'host': self._sample.host, 'source': self._sample.source, '_time': - int(time.mktime(latest.timetuple()))} for i in xrange(count)] + int(time.mktime(latest.timetuple()))} for i in range(count)] self._out.bulksend(payload) return 0 diff --git a/splunk_eventgen/lib/plugins/generator/windbag.py b/splunk_eventgen/lib/plugins/generator/windbag.py index 6b0965b9..952f2910 100644 --- a/splunk_eventgen/lib/plugins/generator/windbag.py +++ b/splunk_eventgen/lib/plugins/generator/windbag.py @@ -1,9 +1,8 @@ -from __future__ import division import datetime from datetime import timedelta -from generatorplugin import GeneratorPlugin -from logging_config import logger +from splunk_eventgen.lib.generatorplugin import GeneratorPlugin +from splunk_eventgen.lib.logging_config import logger class WindbagGenerator(GeneratorPlugin): @@ -15,7 +14,7 @@ def gen(self, count, earliest, latest, samplename=None): logger.warning('Sample size not found for count=-1 and generator=windbag, defaulting to count=60') count = 60 time_interval = timedelta.total_seconds((latest - earliest)) / count - for i in xrange(count): + for i in range(count): current_time_object = earliest + datetime.timedelta(0, time_interval * (i + 1)) msg = '{0} -0700 WINDBAG Event {1} of {2}'.format(current_time_object, (i + 1), count) self._out.send(msg) diff --git a/splunk_eventgen/lib/plugins/output/__init__.py b/splunk_eventgen/lib/plugins/output/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/splunk_eventgen/lib/plugins/output/awss3.py b/splunk_eventgen/lib/plugins/output/awss3.py index 7a2b4104..11805695 100644 --- a/splunk_eventgen/lib/plugins/output/awss3.py +++ b/splunk_eventgen/lib/plugins/output/awss3.py @@ -1,13 +1,11 @@ -from __future__ import division - import datetime import threading import uuid import requests -from outputplugin import OutputPlugin -from logging_config import logger +from splunk_eventgen.lib.outputplugin import OutputPlugin +from splunk_eventgen.lib.logging_config import logger try: import boto3 @@ -127,9 +125,9 @@ def _transmitEvents(self, payloadstring): uuid.uuid1()) + self.awsS3objectsuffix logger.debug("Uploading %d events into s3 key: %s " % (len(records), s3keyname)) if self.awsS3compressiontype == 'gz': - import StringIO + import io import gzip - out = StringIO.StringIO() + out = io.StringIO() with gzip.GzipFile(fileobj=out, mode="w") as f: f.write(records) records = out.getvalue() diff --git a/splunk_eventgen/lib/plugins/output/counter.py b/splunk_eventgen/lib/plugins/output/counter.py index 35971428..8ee173e6 100755 --- a/splunk_eventgen/lib/plugins/output/counter.py +++ b/splunk_eventgen/lib/plugins/output/counter.py @@ -2,7 +2,7 @@ import pprint import sys -from outputplugin import OutputPlugin +from splunk_eventgen.lib.outputplugin import OutputPlugin class CounterOutputPlugin(OutputPlugin): @@ -37,7 +37,7 @@ def _output_end(self): CounterOutputPlugin.lastPrintAt = CounterOutputPlugin.flushCount def _print_info(self, msg): - print >> sys.stderr, '{} {}'.format(datetime.datetime.now(), msg) + print('{} {}'.format(datetime.datetime.now(), msg), file=sys.stderr) def load(): diff --git a/splunk_eventgen/lib/plugins/output/devnull.py b/splunk_eventgen/lib/plugins/output/devnull.py index 1835157b..da6d763f 100755 --- a/splunk_eventgen/lib/plugins/output/devnull.py +++ b/splunk_eventgen/lib/plugins/output/devnull.py @@ -1,6 +1,4 @@ -from __future__ import division - -from outputplugin import OutputPlugin +from splunk_eventgen.lib.outputplugin import OutputPlugin class DevNullOutputPlugin(OutputPlugin): diff --git a/splunk_eventgen/lib/plugins/output/file.py b/splunk_eventgen/lib/plugins/output/file.py index 29a4854f..7bd09ab2 100644 --- a/splunk_eventgen/lib/plugins/output/file.py +++ b/splunk_eventgen/lib/plugins/output/file.py @@ -1,11 +1,7 @@ # Note as implemented this plugin is not threadsafe, file should only be used with one output worker - -from __future__ import division - import os - -from outputplugin import OutputPlugin -from logging_config import logger +from splunk_eventgen.lib.outputplugin import OutputPlugin +from splunk_eventgen.lib.logging_config import logger class FileOutputPlugin(OutputPlugin): diff --git a/splunk_eventgen/lib/plugins/output/httpevent.py b/splunk_eventgen/lib/plugins/output/httpevent.py index 699b5f76..704e26d2 100644 --- a/splunk_eventgen/lib/plugins/output/httpevent.py +++ b/splunk_eventgen/lib/plugins/output/httpevent.py @@ -1,18 +1,16 @@ -from __future__ import division - -from httpevent_core import HTTPCoreOutputPlugin -from logging_config import logger +from splunk_eventgen.lib.plugins.output.httpevent_core import HTTPCoreOutputPlugin +from splunk_eventgen.lib.logging_config import logger try: import requests from requests import Session from requests_futures.sessions import FuturesSession from concurrent.futures import ThreadPoolExecutor -except ImportError: +except: pass try: import ujson as json -except: +except ImportError: import json diff --git a/splunk_eventgen/lib/plugins/output/httpevent_core.py b/splunk_eventgen/lib/plugins/output/httpevent_core.py index ba2a0b92..56eff9c8 100644 --- a/splunk_eventgen/lib/plugins/output/httpevent_core.py +++ b/splunk_eventgen/lib/plugins/output/httpevent_core.py @@ -1,22 +1,21 @@ -from __future__ import division - -import logging import random -import urllib +import urllib.request +import urllib.parse +import urllib.error -from outputplugin import OutputPlugin -from logging_config import logger +from splunk_eventgen.lib.outputplugin import OutputPlugin +from splunk_eventgen.lib.logging_config import logger try: import requests from requests import Session from requests_futures.sessions import FuturesSession from concurrent.futures import ThreadPoolExecutor -except ImportError: +except: pass try: import ujson as json -except: +except ImportError: import json @@ -59,7 +58,7 @@ def _urlencode(value): :param value: string :return: urlencoded string ''' - return urllib.quote(value) + return urllib.parse.quote(value) @staticmethod def _bg_convert_json(sess, resp): diff --git a/splunk_eventgen/lib/plugins/output/metric_httpevent.py b/splunk_eventgen/lib/plugins/output/metric_httpevent.py index d6ee47c2..04dcf74e 100644 --- a/splunk_eventgen/lib/plugins/output/metric_httpevent.py +++ b/splunk_eventgen/lib/plugins/output/metric_httpevent.py @@ -1,18 +1,9 @@ -from __future__ import division +from splunk_eventgen.lib.plugins.output.httpevent_core import HTTPCoreOutputPlugin +from splunk_eventgen.lib.logging_config import logger -from httpevent_core import HTTPCoreOutputPlugin -from logging_config import logger - -try: - import requests - from requests import Session - from requests_futures.sessions import FuturesSession - from concurrent.futures import ThreadPoolExecutor -except ImportError: - pass try: import ujson as json -except: +except ImportError: import json diff --git a/splunk_eventgen/lib/plugins/output/modinput.py b/splunk_eventgen/lib/plugins/output/modinput.py index 43f5658f..1f1ecfdc 100644 --- a/splunk_eventgen/lib/plugins/output/modinput.py +++ b/splunk_eventgen/lib/plugins/output/modinput.py @@ -1,14 +1,7 @@ -# import sys, os -# path_prepend = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) -# sys.path.append(path_prepend) -# from eventgenoutputtemplates import OutputTemplate - -from __future__ import division - import sys from xml.sax.saxutils import escape -from outputplugin import OutputPlugin +from splunk_eventgen.lib.outputplugin import OutputPlugin class ModInputOutputPlugin(OutputPlugin): diff --git a/splunk_eventgen/lib/plugins/output/s2s.py b/splunk_eventgen/lib/plugins/output/s2s.py index 28203f28..7820ab32 100644 --- a/splunk_eventgen/lib/plugins/output/s2s.py +++ b/splunk_eventgen/lib/plugins/output/s2s.py @@ -1,9 +1,7 @@ -from __future__ import division - import socket import struct -from outputplugin import OutputPlugin +from splunk_eventgen.lib.outputplugin import OutputPlugin class S2S: @@ -35,7 +33,7 @@ def _open_connection(self, host='localhost', port=9997): self.s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self.s.connect((host, int(port))) - def _encode_sig(self, serverName='s2s-api', mgmtPort='9997'): + def _encode_sig(self, serverName='s2s-api'.encode('utf-8'), mgmtPort='9997'.encode('utf-8')): """ Create Signature element of the S2S Message. Signature is C struct: @@ -48,7 +46,7 @@ def _encode_sig(self, serverName='s2s-api', mgmtPort='9997'): """ if not self.signature_sent: self.signature_sent = True - return struct.pack('!128s256s16s', '--splunk-cooked-mode-v2--', serverName, mgmtPort) + return struct.pack('!128s256s16s', '--splunk-cooked-mode-v2--'.encode('utf-8'), serverName, mgmtPort).decode('utf-8') else: return '' @@ -59,8 +57,8 @@ def _encode_string(self, tosend=''): Wire protocol has an unsigned integer of the length of the string followed by a null terminated string. """ - tosend = str(tosend) - return struct.pack('!I%ds' % (len(tosend) + 1), len(tosend) + 1, tosend) + tosend = str(tosend).encode('utf-8') + return struct.pack('!I%ds' % (len(tosend) + 1), len(tosend) + 1, tosend).decode('utf-8') def _encode_key_value(self, key='', value=''): """ @@ -131,9 +129,9 @@ def _encode_event(self, index='main', host='', source='', sourcetype='', _raw='_ # Create buffer, starting with the signature buf = sig # Add 32 bit integer with the size of the msg, calculated earlier - buf += struct.pack('!I', msg_size) + buf += struct.pack('!I', msg_size).decode('utf-8') # Add number of map entries, which is 5, index, host, source, sourcetype, raw - buf += struct.pack('!I', maps) + buf += struct.pack('!I', maps).decode('utf-8') # Add the map entries, index, source, sourcetype, host, raw buf += encoded_index buf += encoded_host if encoded_host else '' @@ -143,7 +141,7 @@ def _encode_event(self, index='main', host='', source='', sourcetype='', _raw='_ buf += encoded_done buf += encoded_raw # Add dummy zero - buf += struct.pack('!I', 0) + buf += struct.pack('!I', 0).decode('utf-8') # Add trailer raw buf += encoded_raw_trailer return buf @@ -154,7 +152,7 @@ def send_event(self, index='main', host='', source='', sourcetype='', _raw='', _ """ if len(_raw) > 0: e = self._encode_event(index, host, source, sourcetype, _raw, _time) - self.s.sendall(e) + self.s.sendall(e.encode('utf-8')) def close(self): """ diff --git a/splunk_eventgen/lib/plugins/output/splunkstream.py b/splunk_eventgen/lib/plugins/output/splunkstream.py index f13ba713..35eaf033 100644 --- a/splunk_eventgen/lib/plugins/output/splunkstream.py +++ b/splunk_eventgen/lib/plugins/output/splunkstream.py @@ -1,14 +1,14 @@ -from __future__ import division - -import httplib -import urllib +import http.client +import urllib.request +import urllib.parse +import urllib.error from collections import deque from xml.dom import minidom import httplib2 -from outputplugin import OutputPlugin -from logging_config import logger +from splunk_eventgen.lib.outputplugin import OutputPlugin +from splunk_eventgen.lib.logging_config import logger class SplunkStreamOutputPlugin(OutputPlugin): @@ -30,20 +30,24 @@ class SplunkStreamOutputPlugin(OutputPlugin): def __init__(self, sample, output_counter=None): OutputPlugin.__init__(self, sample, output_counter) - from eventgenconfig import Config + from splunk_eventgen.lib.eventgenconfig import Config globals()['c'] = Config() self._splunkUrl, self._splunkMethod, self._splunkHost, self._splunkPort = c.getSplunkUrl(self._sample) # noqa self._splunkUser = self._sample.splunkUser self._splunkPass = self._sample.splunkPass + # Cancel SSL verification + import ssl + ssl._create_default_https_context = ssl._create_unverified_context + if not self._sample.sessionKey: try: myhttp = httplib2.Http(disable_ssl_certificate_validation=True) logger.debug("Getting session key from '%s' with user '%s' and pass '%s'" % (self._splunkUrl + '/services/auth/login', self._splunkUser, self._splunkPass)) response = myhttp.request( - self._splunkUrl + '/services/auth/login', 'POST', headers={}, body=urllib.urlencode({ + self._splunkUrl + '/services/auth/login', 'POST', headers={}, body=urllib.parse.urlencode({ 'username': self._splunkUser, 'password': self._splunkPass}))[1] @@ -73,7 +77,7 @@ def flush(self, q): queues[row['source'] + '_' + row['sourcetype']].append(row) # Iterate sub-queues, each holds events for a specific source/sourcetype combo - for k, queue in queues.items(): + for k, queue in list(queues.items()): if len(queue) > 0: streamout = "" index = source = sourcetype = host = hostRegex = None @@ -93,9 +97,9 @@ def flush(self, q): (self._sample.name, self._app, self._sample.source)) try: if self._splunkMethod == 'https': - connmethod = httplib.HTTPSConnection + connmethod = http.client.HTTPSConnection else: - connmethod = httplib.HTTPConnection + connmethod = http.client.HTTPConnection splunkhttp = connmethod(self._splunkHost, self._splunkPort) splunkhttp.connect() urlparams = [] @@ -109,7 +113,7 @@ def flush(self, q): urlparams.append(('host_regex', hostRegex)) if host: urlparams.append(('host', host)) - url = '/services/receivers/simple?%s' % (urllib.urlencode(urlparams)) + url = '/services/receivers/simple?%s' % (urllib.parse.urlencode(urlparams)) headers = {'Authorization': "Splunk %s" % self._sample.sessionKey} # Iterate each raw event string in its sub-queue @@ -127,7 +131,7 @@ def flush(self, q): "POSTing to url %s on %s://%s:%s with sessionKey %s" % (url, self._splunkMethod, self._splunkHost, self._splunkPort, self._sample.sessionKey)) - except httplib.HTTPException, e: + except http.client.HTTPException as e: logger.error( 'Error connecting to Splunk for logging for sample %s. Exception "%s" Config: %s' % (self._sample.name, e.args, self)) @@ -138,7 +142,7 @@ def flush(self, q): data = response.read() if response.status != 200: logger.error("Data not written to Splunk. Splunk returned %s" % data) - except httplib.BadStatusLine: + except http.client.BadStatusLine: logger.error("Received bad status from Splunk for sample '%s'" % self._sample) logger.debug("Closing splunkhttp connection") if splunkhttp: diff --git a/splunk_eventgen/lib/plugins/output/spool.py b/splunk_eventgen/lib/plugins/output/spool.py index acda08aa..c5a0e3fb 100644 --- a/splunk_eventgen/lib/plugins/output/spool.py +++ b/splunk_eventgen/lib/plugins/output/spool.py @@ -1,15 +1,8 @@ -# import sys, os -# path_prepend = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) -# sys.path.append(path_prepend) -# from eventgenoutputtemplates import OutputTemplate - -from __future__ import division - import os import time -from outputplugin import OutputPlugin -from logging_config import logger +from splunk_eventgen.lib.outputplugin import OutputPlugin +from splunk_eventgen.lib.logging_config import logger class SpoolOutputPlugin(OutputPlugin): @@ -40,7 +33,8 @@ def flush(self, q): with open(self.spoolPath, 'a') as dst: dst.write(data) break - except: + except Exception as e: + logger.error(str(e)) time.sleep(0.1) logger.debug("Queue for app '%s' sample '%s' written" % (self._app, self._sample.name)) diff --git a/splunk_eventgen/lib/plugins/output/stdout.py b/splunk_eventgen/lib/plugins/output/stdout.py index 3c285e4c..54734b5c 100644 --- a/splunk_eventgen/lib/plugins/output/stdout.py +++ b/splunk_eventgen/lib/plugins/output/stdout.py @@ -1,6 +1,4 @@ -from __future__ import division - -from outputplugin import OutputPlugin +from splunk_eventgen.lib.outputplugin import OutputPlugin class StdOutOutputPlugin(OutputPlugin): @@ -13,7 +11,7 @@ def __init__(self, sample, output_counter=None): def flush(self, q): for x in q: - print x['_raw'].rstrip() + print(x['_raw'].rstrip()) def load(): diff --git a/splunk_eventgen/lib/plugins/output/syslogout.py b/splunk_eventgen/lib/plugins/output/syslogout.py index 226f3bd9..31267aa9 100644 --- a/splunk_eventgen/lib/plugins/output/syslogout.py +++ b/splunk_eventgen/lib/plugins/output/syslogout.py @@ -1,9 +1,7 @@ -from __future__ import division - import logging import logging.handlers -from outputplugin import OutputPlugin +from splunk_eventgen.lib.outputplugin import OutputPlugin # Dict of flags to gate adding the syslogHandler only once to the given singleton logger loggerInitialized = {} diff --git a/splunk_eventgen/lib/plugins/output/tcpout.py b/splunk_eventgen/lib/plugins/output/tcpout.py index f2254a5e..5843072d 100644 --- a/splunk_eventgen/lib/plugins/output/tcpout.py +++ b/splunk_eventgen/lib/plugins/output/tcpout.py @@ -1,7 +1,5 @@ -from __future__ import division - -from outputplugin import OutputPlugin -from logging_config import logger +from splunk_eventgen.lib.outputplugin import OutputPlugin +from splunk_eventgen.lib.logging_config import logger class TcpOutputPlugin(OutputPlugin): diff --git a/splunk_eventgen/lib/plugins/output/udpout.py b/splunk_eventgen/lib/plugins/output/udpout.py index 720de0a7..02699acd 100644 --- a/splunk_eventgen/lib/plugins/output/udpout.py +++ b/splunk_eventgen/lib/plugins/output/udpout.py @@ -1,7 +1,5 @@ -from __future__ import division - -from outputplugin import OutputPlugin -from logging_config import logger +from splunk_eventgen.lib.outputplugin import OutputPlugin +from splunk_eventgen.lib.logging_config import logger class UdpOutputPlugin(OutputPlugin): diff --git a/splunk_eventgen/lib/plugins/rater/__init__.py b/splunk_eventgen/lib/plugins/rater/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/splunk_eventgen/lib/plugins/rater/config.py b/splunk_eventgen/lib/plugins/rater/config.py index 4d252fcf..f0b9131b 100644 --- a/splunk_eventgen/lib/plugins/rater/config.py +++ b/splunk_eventgen/lib/plugins/rater/config.py @@ -1,8 +1,6 @@ -from __future__ import division - import datetime import random -from logging_config import logger +from splunk_eventgen.lib.logging_config import logger class ConfigRater(object): diff --git a/splunk_eventgen/lib/plugins/rater/perdayvolume.py b/splunk_eventgen/lib/plugins/rater/perdayvolume.py index 37abfd02..81781087 100644 --- a/splunk_eventgen/lib/plugins/rater/perdayvolume.py +++ b/splunk_eventgen/lib/plugins/rater/perdayvolume.py @@ -1,9 +1,7 @@ -from __future__ import division - import datetime import random -from config import ConfigRater -from logging_config import logger +from splunk_eventgen.lib.plugins.rater.config import ConfigRater +from splunk_eventgen.lib.logging_config import logger class PerDayVolume(ConfigRater): diff --git a/splunk_eventgen/lib/requests_futures/__init__.py b/splunk_eventgen/lib/requests_futures/__init__.py deleted file mode 100755 index 9ac9cd31..00000000 --- a/splunk_eventgen/lib/requests_futures/__init__.py +++ /dev/null @@ -1,29 +0,0 @@ -# -*- coding: utf-8 -*- - -# Requests Futures - -""" -async requests HTTP library -~~~~~~~~~~~~~~~~~~~~~ - - -""" - -import logging - -__title__ = 'requests-futures' -__version__ = '0.9.7' -__build__ = 0x000000 -__author__ = 'Ross McFarland' -__license__ = 'Apache 2.0' -__copyright__ = 'Copyright 2013 Ross McFarland' - -# Set default logging handler to avoid "No handler found" warnings. -try: # Python 2.7+ - from logging import NullHandler -except ImportError: - class NullHandler(logging.Handler): - def emit(self, record): - pass - -logging.getLogger(__name__).addHandler(NullHandler()) diff --git a/splunk_eventgen/lib/requests_futures/sessions.py b/splunk_eventgen/lib/requests_futures/sessions.py deleted file mode 100755 index 7fba4226..00000000 --- a/splunk_eventgen/lib/requests_futures/sessions.py +++ /dev/null @@ -1,99 +0,0 @@ -# -*- coding: utf-8 -*- -""" -requests_futures -~~~~~~~~~~~~~~~~ - -This module provides a small add-on for the requests http library. It makes use -of python 3.3's concurrent.futures or the futures backport for previous -releases of python. - - from requests_futures import FuturesSession - - session = FuturesSession() - # request is run in the background - future = session.get('http://httpbin.org/get') - # ... do other stuff ... - # wait for the request to complete, if it hasn't already - response = future.result() - print('response status: {0}'.format(response.status_code)) - print(response.content) - -""" -from concurrent.futures import ThreadPoolExecutor, ProcessPoolExecutor -from functools import partial -from pickle import dumps, PickleError - -from requests import Session -from requests.adapters import DEFAULT_POOLSIZE, HTTPAdapter - - -def wrap(self, sup, background_callback, *args_, **kwargs_): - """ A global top-level is required for ProcessPoolExecutor """ - resp = sup(*args_, **kwargs_) - return background_callback(self, resp) or resp - - -PICKLE_ERROR = ('Cannot pickle function. Refer to documentation: https://' - 'github.com/ross/requests-futures/#using-processpoolexecutor') - - -class FuturesSession(Session): - - def __init__(self, executor=None, max_workers=2, session=None, *args, - **kwargs): - """Creates a FuturesSession - - Notes - ~~~~~ - * `ProcessPoolExecutor` may be used with Python > 3.4; - see README for more information. - - * If you provide both `executor` and `max_workers`, the latter is - ignored and provided executor is used as is. - """ - super(FuturesSession, self).__init__(*args, **kwargs) - if executor is None: - executor = ThreadPoolExecutor(max_workers=max_workers) - # set connection pool size equal to max_workers if needed - if max_workers > DEFAULT_POOLSIZE: - adapter_kwargs = dict(pool_connections=max_workers, - pool_maxsize=max_workers) - self.mount('https://', HTTPAdapter(**adapter_kwargs)) - self.mount('http://', HTTPAdapter(**adapter_kwargs)) - - self.executor = executor - self.session = session - - def request(self, *args, **kwargs): - """Maintains the existing api for Session.request. - - Used by all of the higher level methods, e.g. Session.get. - - The background_callback param allows you to do some processing on the - response in the background, e.g. call resp.json() so that json parsing - happens in the background thread. - """ - if self.session: - func = self.session.request - else: - # avoid calling super to not break pickled method - func = partial(Session.request, self) - - background_callback = kwargs.pop('background_callback', None) - if background_callback: - func = partial(wrap, self, func, background_callback) - - if isinstance(self.executor, ProcessPoolExecutor): - # verify function can be pickled - try: - dumps(func) - except (TypeError, PickleError): - raise RuntimeError(PICKLE_ERROR) - - return self.executor.submit(func, *args, **kwargs) - - def __enter__(self): - return self - - def __exit__(self, type, value, traceback): - self.executor.shutdown() diff --git a/splunk_eventgen/lib/timeparser.py b/splunk_eventgen/lib/timeparser.py index 2298a6ee..e5f32b8d 100644 --- a/splunk_eventgen/lib/timeparser.py +++ b/splunk_eventgen/lib/timeparser.py @@ -1,4 +1,4 @@ -from logging_config import logger +from splunk_eventgen.lib.logging_config import logger import datetime import math import os @@ -46,8 +46,8 @@ def timeParser(ts='now', timezone=datetime.timedelta(days=1), now=None, utcnow=N unitsre = "(seconds|second|secs|sec|minutes|minute|min|hours|hour|hrs|hr|days|day|weeks|week|w[0-6]|" + \ "months|month|mon|quarters|quarter|qtrs|qtr|years|year|yrs|yr|s|h|m|d|w|y|w|q)" - reltimere = "(?i)(?P[+-]*)(?P\d{1,})(?P" + unitsre + "{1})(([\@](?P" + \ - unitsre + "{1})((?P[+-])(?P\d+)(?P" + unitsre + \ + reltimere = r"(?i)(?P[+-]*)(?P\d{1,})(?P" + unitsre + r"{1})(([\@](?P" + \ + unitsre + r"{1})((?P[+-])(?P\d+)(?P" + unitsre + \ "{1}))*)*)" results = re.match(reltimere, ts) diff --git a/splunk_eventgen/logger/__init__.py b/splunk_eventgen/logger/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/splunk_eventgen/logger/logger_config.py b/splunk_eventgen/logger/logger_config.py deleted file mode 100644 index 8c1850bc..00000000 --- a/splunk_eventgen/logger/logger_config.py +++ /dev/null @@ -1,17 +0,0 @@ -controller_logger_config = { - 'version': 1, - 'formatters': { - 'detailed': { - 'class': 'logging.Formatter', 'format': - '%(asctime)s %(name)-15s %(levelname)-8s %(processName)-10s %(message)s'}}, - 'handlers': { - 'console': { - 'class': 'logging.StreamHandler', - 'level': 'INFO', - 'formatter': 'detailed', }, - 'main': { - 'class': 'logging.FileHandler', - 'filename': 'eventgen-controller-main.log', - 'mode': 'w', - 'formatter': 'detailed', }}, - 'root': {'level': 'DEBUG', 'handlers': ['console', 'main']}, } diff --git a/splunk_eventgen/logger/requests_futures/__init__.py b/splunk_eventgen/logger/requests_futures/__init__.py deleted file mode 100755 index ac0c4f3e..00000000 --- a/splunk_eventgen/logger/requests_futures/__init__.py +++ /dev/null @@ -1,30 +0,0 @@ -# -*- coding: utf-8 -*- - -# Requests Futures -""" -async requests HTTP library -~~~~~~~~~~~~~~~~~~~~~ - - -""" - -import logging - -__title__ = 'requests-futures' -__version__ = '0.9.7' -__build__ = 0x000000 -__author__ = 'Ross McFarland' -__license__ = 'Apache 2.0' -__copyright__ = 'Copyright 2013 Ross McFarland' - -# Set default logging handler to avoid "No handler found" warnings. -try: # Python 2.7+ - from logging import NullHandler -except ImportError: - - class NullHandler(logging.Handler): - def emit(self, record): - pass - - -logging.getLogger(__name__).addHandler(NullHandler()) diff --git a/splunk_eventgen/logger/requests_futures/sessions.py b/splunk_eventgen/logger/requests_futures/sessions.py deleted file mode 100755 index 643f4e1d..00000000 --- a/splunk_eventgen/logger/requests_futures/sessions.py +++ /dev/null @@ -1,96 +0,0 @@ -# -*- coding: utf-8 -*- -""" -requests_futures -~~~~~~~~~~~~~~~~ - -This module provides a small add-on for the requests http library. It makes use -of python 3.3's concurrent.futures or the futures backport for previous -releases of python. - - from requests_futures import FuturesSession - - session = FuturesSession() - # request is run in the background - future = session.get('http://httpbin.org/get') - # ... do other stuff ... - # wait for the request to complete, if it hasn't already - response = future.result() - print('response status: {0}'.format(response.status_code)) - print(response.content) - -""" -from concurrent.futures import ProcessPoolExecutor, ThreadPoolExecutor -from functools import partial -from pickle import PickleError, dumps - -from requests import Session -from requests.adapters import DEFAULT_POOLSIZE, HTTPAdapter - - -def wrap(self, sup, background_callback, *args_, **kwargs_): - """ A global top-level is required for ProcessPoolExecutor """ - resp = sup(*args_, **kwargs_) - return background_callback(self, resp) or resp - - -PICKLE_ERROR = ('Cannot pickle function. Refer to documentation: https://' - 'github.com/ross/requests-futures/#using-processpoolexecutor') - - -class FuturesSession(Session): - def __init__(self, executor=None, max_workers=2, session=None, *args, **kwargs): - """Creates a FuturesSession - - Notes - ~~~~~ - * `ProcessPoolExecutor` may be used with Python > 3.4; - see README for more information. - - * If you provide both `executor` and `max_workers`, the latter is - ignored and provided executor is used as is. - """ - super(FuturesSession, self).__init__(*args, **kwargs) - if executor is None: - executor = ThreadPoolExecutor(max_workers=max_workers) - # set connection pool size equal to max_workers if needed - if max_workers > DEFAULT_POOLSIZE: - adapter_kwargs = dict(pool_connections=max_workers, pool_maxsize=max_workers) - self.mount('https://', HTTPAdapter(**adapter_kwargs)) - self.mount('http://', HTTPAdapter(**adapter_kwargs)) - - self.executor = executor - self.session = session - - def request(self, *args, **kwargs): - """Maintains the existing api for Session.request. - - Used by all of the higher level methods, e.g. Session.get. - - The background_callback param allows you to do some processing on the - response in the background, e.g. call resp.json() so that json parsing - happens in the background thread. - """ - if self.session: - func = self.session.request - else: - # avoid calling super to not break pickled method - func = partial(Session.request, self) - - background_callback = kwargs.pop('background_callback', None) - if background_callback: - func = partial(wrap, self, func, background_callback) - - if isinstance(self.executor, ProcessPoolExecutor): - # verify function can be pickled - try: - dumps(func) - except (TypeError, PickleError): - raise RuntimeError(PICKLE_ERROR) - - return self.executor.submit(func, *args, **kwargs) - - def __enter__(self): - return self - - def __exit__(self, type, value, traceback): - self.executor.shutdown() diff --git a/splunk_eventgen/eventgen_api_server/__init__.py b/splunk_eventgen/logs/__init__ similarity index 100% rename from splunk_eventgen/eventgen_api_server/__init__.py rename to splunk_eventgen/logs/__init__ diff --git a/splunk_eventgen/logs/__init__.py b/splunk_eventgen/logs/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/splunk_eventgen/splunk_app/README/eventgen.conf.spec b/splunk_eventgen/splunk_app/README/eventgen.conf.spec index 422cbd00..c713f2f5 100644 --- a/splunk_eventgen/splunk_app/README/eventgen.conf.spec +++ b/splunk_eventgen/splunk_app/README/eventgen.conf.spec @@ -1,4 +1,4 @@ -# Copyright (C) 2005-2015 Splunk Inc. All Rights Reserved. +# Copyright (C) 2005-2019 Splunk Inc. All Rights Reserved. # # This file contains all possible options for an eventgen.conf file. Use this file to configure # Splunk's event generation properties. diff --git a/splunk_eventgen/splunk_app/bin/modinput_eventgen.py b/splunk_eventgen/splunk_app/bin/modinput_eventgen.py index d1f283a6..42bbc13f 100644 --- a/splunk_eventgen/splunk_app/bin/modinput_eventgen.py +++ b/splunk_eventgen/splunk_app/bin/modinput_eventgen.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 # encoding: utf-8 import argparse import logging @@ -30,17 +30,23 @@ class SimpleNamespace(dict): class Eventgen(ModularInput): scheme_args = { - 'title': "SA-Eventgen", 'description': "This modular input generates data for Splunk.", - 'use_external_validation': "true", 'streaming_mode': "xml", 'use_single_instance': "False"} + 'title': "SA-Eventgen", + 'description': "This modular input generates data for Splunk.", + 'use_external_validation': "true", + 'streaming_mode': "xml", + 'use_single_instance': "False" + } def __init__(self): logger.debug("Setting up SA-Eventgen Modular Input") self.output = XMLOutputManager() self.args = [ - VerbosityField("verbosity", "Verbosity", + VerbosityField("verbosity", + "Verbosity", "Logging Level (DEBUG(10), INFO(20), WARN(30), ERROR(40), CRITICAL(50))", - required_on_create=True, required_on_edit=True)] + required_on_create=True, + required_on_edit=True)] ModularInput.__init__(self, self.scheme_args, self.args) def create_args(self): diff --git a/splunk_eventgen/splunk_app/default/app.conf b/splunk_eventgen/splunk_app/default/app.conf index a09ad811..880a9a25 100644 --- a/splunk_eventgen/splunk_app/default/app.conf +++ b/splunk_eventgen/splunk_app/default/app.conf @@ -1,4 +1,4 @@ -# Copyright (C) 2005-2018 Splunk Inc. All Rights Reserved. +# Copyright (C) 2005-2019 Splunk Inc. All Rights Reserved. # DO NOT EDIT THIS FILE! # Please make all changes to files in $SPLUNK_HOME/etc/apps/SA-Eventgen/local. # To make changes, copy the section/stanza you want to change from $SPLUNK_HOME/etc/apps/SA-Eventgen/default diff --git a/splunk_eventgen/splunk_app/default/distsearch.conf b/splunk_eventgen/splunk_app/default/distsearch.conf index 58b29e77..a05ad58f 100644 --- a/splunk_eventgen/splunk_app/default/distsearch.conf +++ b/splunk_eventgen/splunk_app/default/distsearch.conf @@ -1,4 +1,4 @@ -# Copyright (C) 2005-2015 Splunk Inc. All Rights Reserved. +# Copyright (C) 2005-2019 Splunk Inc. All Rights Reserved. # DO NOT EDIT THIS FILE! # Please make all changes to files in $SPLUNK_HOME/etc/apps/SA-Eventgen/local. # To make changes, copy the section/stanza you want to change from $SPLUNK_HOME/etc/apps/SA-Eventgen/default diff --git a/splunk_eventgen/splunk_app/default/distsearch.conf.windows b/splunk_eventgen/splunk_app/default/distsearch.conf.windows index 9cb11cf2..c345033c 100644 --- a/splunk_eventgen/splunk_app/default/distsearch.conf.windows +++ b/splunk_eventgen/splunk_app/default/distsearch.conf.windows @@ -1,4 +1,4 @@ -# Copyright (C) 2005-2011 Splunk Inc. All Rights Reserved. +# Copyright (C) 2005-2019 Splunk Inc. All Rights Reserved. # DO NOT EDIT THIS FILE! # Please make all changes to files in $SPLUNK_HOME/etc/apps/SA-Eventgen/local. # To make changes, copy the section/stanza you want to change from $SPLUNK_HOME/etc/apps/SA-Eventgen/default @@ -9,4 +9,4 @@ [replicationBlacklist] ## Prevent event generator app from being replicated via distsearch -noeventgen = apps\SA-Eventgen\... \ No newline at end of file +noeventgen = apps\SA-Eventgen\... diff --git a/splunk_eventgen/splunk_app/default/inputs.conf b/splunk_eventgen/splunk_app/default/inputs.conf index f93d28c8..7316ffd4 100644 --- a/splunk_eventgen/splunk_app/default/inputs.conf +++ b/splunk_eventgen/splunk_app/default/inputs.conf @@ -2,4 +2,5 @@ verbosity = 40 [modinput_eventgen://default] +python.version = python3 disabled = true diff --git a/splunk_eventgen/splunk_app/lib/mod_input/__init__.py b/splunk_eventgen/splunk_app/lib/mod_input/__init__.py index 44c4bcc3..7058f5ca 100644 --- a/splunk_eventgen/splunk_app/lib/mod_input/__init__.py +++ b/splunk_eventgen/splunk_app/lib/mod_input/__init__.py @@ -1,6 +1,3 @@ -""" -Copyright (C) 2005 - 2018 Splunk Inc. All Rights Reserved. -""" import argparse import getpass import hashlib @@ -131,7 +128,10 @@ class ModularInput(object): IntervalField("interval", "Interval", "The interval the script will be run on"), Field("name", "Stanza name", "The name of the stanza for this modular input"), Field("source", "Source", "The source for events created by this modular input"), - Field("sourcetype", "Stanza name", "The name of the stanza for this modular input")] + Field("sourcetype", "Stanza name", "The name of the stanza for this modular input"), + # added for Splunk 8.0.0 support + Field("python.version", "Python version", "Python version to run this modular input") + ] checkpoint_dir = None @@ -194,7 +194,7 @@ def _create_event(self, doc, params, stanza, unbroken=False, close=True): valid_elements = ['host', 'index', 'source', 'sourcetype', 'time', 'data'] # Append the valid child elements. Invalid elements will be dropped. - for element in filter(lambda x: x in valid_elements, params.keys()): + for element in [x for x in list(params.keys()) if x in valid_elements]: event.appendChild(self._create_formatter_textnode(doc, element, params[element])) if close: @@ -438,7 +438,7 @@ def validate_parameters(self, parameters): all_args[a.name] = a # Convert and check the parameters - for name, value in parameters.items(): + for name, value in list(parameters.items()): # If the argument was found, then validate and convert it if name in all_args: @@ -676,7 +676,7 @@ def set_checkpoint_data(self, filename, data, checkpoint_dir=None): success = False try: - with open(os.path.join(checkpoint_dir, filename), 'w') as fp: + with open(os.path.join(checkpoint_dir, filename), 'wb') as fp: json.dump(data, fp) success = True except IOError: @@ -717,7 +717,7 @@ def get_checkpoint_data(self, filename, checkpoint_dir=None, raise_known_excepti try: if os.path.isfile(checkpoint_path): - with open(checkpoint_path, 'r') as fp: + with open(checkpoint_path, 'rb') as fp: data = json.load(fp) except (IOError, ValueError) as e: logger.exception( @@ -755,7 +755,7 @@ def do_run(self, in_stream=sys.stdin, log_exception_and_continue=False): # Validate all stanza parameters. stanzas = [] - for stanza_name, unclean_stanza in input_config.configuration.items(): + for stanza_name, unclean_stanza in list(input_config.configuration.items()): try: stanzas.append(self.validate_parameters(unclean_stanza)) except FieldValidationException as e: diff --git a/splunk_eventgen/splunk_app/lib/mod_input/fields.py b/splunk_eventgen/splunk_app/lib/mod_input/fields.py index 681ae6d5..79064688 100644 --- a/splunk_eventgen/splunk_app/lib/mod_input/fields.py +++ b/splunk_eventgen/splunk_app/lib/mod_input/fields.py @@ -1,6 +1,3 @@ -''' -Copyright (C) 2005 - 2018 Splunk Inc. All Rights Reserved. -''' import json import re @@ -148,7 +145,7 @@ class DurationField(Field): The string is converted to an integer indicating the number of seconds. """ - DURATION_RE = re.compile("(?P[0-9]+)\s*(?P[a-z]*)", re.IGNORECASE) + DURATION_RE = re.compile(r"(?P[0-9]+)\s*(?P[a-z]*)", re.IGNORECASE) MINUTE = 60 HOUR = 3600 @@ -265,7 +262,7 @@ class IntervalField(Field): # cron field. cron_rx = re.compile( - ''' + r''' ( \d{1,2} # A digit. |\d{1,2}-\d{1,2} # A range. @@ -396,7 +393,7 @@ class SeverityField(Field): # same value as "CRITICAL". SEVERITIES = {'DEBUG': 10, 'INFO': 20, 'WARN': 30, 'ERROR': 40, 'CRITICAL': 50} - SEVERITIES_BY_INT = {v: k for k, v in SEVERITIES.iteritems()} + SEVERITIES_BY_INT = {v: k for k, v in SEVERITIES.items()} def to_python(self, value): diff --git a/tests/large/provision/docker-compose.yml b/tests/large/provision/docker-compose.yml index 1e772f55..f8165a8e 100644 --- a/tests/large/provision/docker-compose.yml +++ b/tests/large/provision/docker-compose.yml @@ -17,11 +17,3 @@ services: - "/var/run/docker.sock:/var/run/docker.sock" # to make terminal colorful tty: true - # expose a TCP socket for accessing docker API - socat: - image: alpine/socat:1.0.2 - command: TCP4-LISTEN:2375,fork,reuseaddr UNIX-CONNECT:/var/run/docker.sock - ports: - - 2375:2375 - volumes: - - "/var/run/docker.sock:/var/run/docker.sock" diff --git a/tests/large/test_eventgen_orchestration.py b/tests/large/test_eventgen_orchestration.py index f5d8e0da..ba67e05b 100644 --- a/tests/large/test_eventgen_orchestration.py +++ b/tests/large/test_eventgen_orchestration.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 # encoding: utf-8 import json @@ -51,25 +51,25 @@ class TestEventgenOrchestration(object): def setup_class(cls): # Build the image from scratch cls.client = APIClient(base_url="unix://var/run/docker.sock") - response = cls.client.build(path=REPO_DIR, dockerfile=os.path.join("dockerfiles", "Dockerfile"), tag=IMAGE_NAME, rm=True, nocache=True, pull=True, stream=False) + response = cls.client.build(path=REPO_DIR, dockerfile=os.path.join("dockerfiles", "Dockerfile"), tag=IMAGE_NAME, rm=True, nocache=True, pull=True) for line in response: - print line, + print(line, end=' ') # Create a network for both the controller and server to run in cls.client.create_network(NETWORK_NAME, driver="bridge", attachable=True) networking_config = cls.client.create_networking_config({NETWORK_NAME: cls.client.create_endpoint_config()}) # Start the controller - print 'creating controller' + print('creating controller') host_config = cls.client.create_host_config(auto_remove=True, publish_all_ports=True) container = cls.client.create_container(image=IMAGE_NAME, command="controller", host_config=host_config, networking_config=networking_config) cls.client.start(container["Id"]) TestEventgenOrchestration.controller_id = container["Id"] - print container["Id"] + print(container["Id"]) cls.controller_container = cls.client.inspect_container(container["Id"]) cls.controller_eventgen_webport = cls.controller_container["NetworkSettings"]["Ports"]["9500/tcp"][0][ "HostPort"] # Start the server - print 'creating server' + print('creating server') redis_host = container["Id"][:12] container = cls.client.create_container( image=IMAGE_NAME, command="server", environment=["REDIS_HOST={}".format(redis_host)], @@ -77,19 +77,19 @@ def setup_class(cls): networking_config=networking_config) cls.client.start(container["Id"]) TestEventgenOrchestration.server_id = container["Id"] - print container["Id"] + print(container["Id"]) cls.server_container = cls.client.inspect_container(container["Id"]) cls.server_eventgen_webport = cls.server_container["NetworkSettings"]["Ports"]["9500/tcp"][0]["HostPort"] # Wait for the controller to be available - print "Waiting for Eventgen Controller to become available." + print("Waiting for Eventgen Controller to become available.") wait_for_response("http://127.0.0.1:{}".format(cls.controller_eventgen_webport)) - print "Eventgen Controller has become available." + print("Eventgen Controller has become available.") # Wait for the server to be available - print "Waiting for Eventgen Server to become available." + print("Waiting for Eventgen Server to become available.") wait_for_response("http://127.0.0.1:{}".format(cls.server_eventgen_webport)) - print "Eventgen Server has become available." + print("Eventgen Server has become available.") time.sleep(30) cls.test_json = { @@ -116,14 +116,14 @@ def teardown_class(cls): def test_controller_root(self): r = requests.get("http://127.0.0.1:{}/".format(self.controller_eventgen_webport)) assert r.status_code == 200 - assert "running_eventgen_controller" in r.content + assert b"running_eventgen_controller" in r.content def test_controller_index(self): r = requests.get("http://127.0.0.1:{}/index".format(self.controller_eventgen_webport)) assert r.status_code == 200 - assert "Eventgen Controller" in r.content - assert "Host: " in r.content - assert "You are running Eventgen Controller" in r.content + assert b"Eventgen Controller" in r.content + assert b"Host: " in r.content + assert b"You are running Eventgen Controller" in r.content def test_controller_status(self): max_retry = 5 @@ -140,34 +140,34 @@ def test_controller_status(self): def test_controller_conf(self): r = requests.post("http://127.0.0.1:{}/conf".format(self.controller_eventgen_webport), json=self.test_json) assert r.status_code == 200 - assert "windbag" in r.content + assert b"windbag" in r.content def test_controller_start(self): r = requests.post("http://127.0.0.1:{}/start".format(self.controller_eventgen_webport)) assert r.status_code == 200 - assert "Eventgen has successfully started" in r.content + assert b"Eventgen has successfully started" in r.content def test_controller_start_with_target(self): r = requests.post("http://127.0.0.1:{}/start/{}".format(self.controller_eventgen_webport, TestEventgenOrchestration.server_id[:12])) assert r.status_code == 200 - assert "Eventgen already started" in r.content + assert b"Eventgen already started" in r.content def test_controller_restart(self): r = requests.post("http://127.0.0.1:{}/restart".format(self.controller_eventgen_webport)) assert r.status_code == 200 - assert "Eventgen is restarting" in r.content + assert b"Eventgen is restarting" in r.content def test_controller_restart_with_target(self): r = requests.post("http://127.0.0.1:{}/restart/{}".format(self.controller_eventgen_webport, TestEventgenOrchestration.server_id[:12])) assert r.status_code == 200 - assert "Eventgen is restarting" in r.content + assert b"Eventgen is restarting" in r.content def test_controller_bundle_invalid_request(self): r = requests.post("http://127.0.0.1:{}/bundle".format(self.controller_eventgen_webport)) assert r.status_code == 500 - assert "Internal Error Occurred" in r.content + assert b"Internal Error Occurred" in r.content def test_controller_bundle_with_url(self): r = requests.post("http://127.0.0.1:{}/bundle".format(self.controller_eventgen_webport), json={ @@ -196,7 +196,7 @@ def test_controller_get_volume(self): def test_controller_set_volume_invalid_request(self): r = requests.post("http://127.0.0.1:{}/volume".format(self.controller_eventgen_webport)) assert r.status_code == 500 - assert "Internal Error Occurred" in r.content + assert b"Internal Error Occurred" in r.content def test_controller_set_volume_with_volume(self): r = requests.post("http://127.0.0.1:{}/volume".format(self.controller_eventgen_webport), json={ @@ -217,13 +217,13 @@ def test_controller_stop(self): r = requests.post("http://127.0.0.1:{}/stop".format(self.controller_eventgen_webport)) assert r.status_code == 200 assert r.status_code == 200 - assert "Eventgen is stopping" in r.content + assert b"Eventgen is stopping" in r.content def test_controller_stop_with_target(self): r = requests.post("http://127.0.0.1:{}/stop/{}".format(self.controller_eventgen_webport, TestEventgenOrchestration.server_id[:12])) assert r.status_code == 200 - assert "Eventgen is stopping" in r.content + assert b"Eventgen is stopping" in r.content # Server tests # @@ -234,19 +234,19 @@ def test_server_reset(self): def test_server_root(self): r = requests.get("http://127.0.0.1:{}".format(self.server_eventgen_webport)) assert r.status_code == 200 - assert "running_eventgen_server" in r.content + assert b"running_eventgen_server" in r.content def test_server_index(self): r = requests.get("http://127.0.0.1:{}/index".format(self.server_eventgen_webport)) assert r.status_code == 200 - assert "Host: " in r.content - assert "Eventgen Status" in r.content - assert "Eventgen Config file exists" in r.content - assert "Eventgen Config file path" in r.content - assert "Total volume:" in r.content - assert "Worker Queue Status" in r.content - assert "Sample Queue Status" in r.content - assert "Output Queue Status" in r.content + assert b"Host: " in r.content + assert b"Eventgen Status" in r.content + assert b"Eventgen Config file exists" in r.content + assert b"Eventgen Config file path" in r.content + assert b"Total volume:" in r.content + assert b"Worker Queue Status" in r.content + assert b"Sample Queue Status" in r.content + assert b"Output Queue Status" in r.content def test_server_status(self): r = requests.get("http://127.0.0.1:{}/status".format(self.server_eventgen_webport)) @@ -272,22 +272,22 @@ def test_server_get_and_set_conf(self): def test_server_start(self): r = requests.post("http://127.0.0.1:{}/start".format(self.server_eventgen_webport), timeout=5) assert r.status_code == 200 - assert "Eventgen has successfully started" in r.content + assert b"Eventgen has successfully started" in r.content def test_server_restart(self): r = requests.post("http://127.0.0.1:{}/restart".format(self.server_eventgen_webport)) assert r.status_code == 200 - assert "Eventgen has successfully restarted" in r.content + assert b"Eventgen has successfully restarted" in r.content def test_server_stop(self): r = requests.post("http://127.0.0.1:{}/stop".format(self.server_eventgen_webport)) assert r.status_code == 200 - assert "Eventgen is stopped" in r.content + assert b"Eventgen is stopped" in r.content def test_server_bundle(self): r = requests.post("http://127.0.0.1:{}/bundle".format(self.server_eventgen_webport)) assert r.status_code == 500 - assert "Internal Error Occurred" in r.content + assert b"Internal Error Occurred" in r.content def test_server_get_and_set_volume(self): # Must initialize a stanza with the perDayVolume setting before hitting the /volume endpoint diff --git a/tests/large/test_jinja_template.py b/tests/large/test_jinja_template.py index bb3468f3..d9aa6741 100644 --- a/tests/large/test_jinja_template.py +++ b/tests/large/test_jinja_template.py @@ -20,7 +20,7 @@ def test_jinja_template_simple(eventgen_test_helper): event_datetime = datetime.datetime.strptime(result.group(1), ts_format) delta_seconds = (event_datetime - current_datetime).total_seconds() # assert the event time is after (now - earliest) time - assert delta_seconds >= -3 and delta_seconds < 3, 'fail to check event ```{}```'.format(event) + assert -3 <= delta_seconds < 3, 'fail to check event ```{}```'.format(event) assert loop == int(result.group(2)), 'fail to check event ```{}```'.format(event) loop += 1 @@ -40,7 +40,7 @@ def test_jinja_template_dir_conf(eventgen_test_helper): event_datetime = datetime.datetime.strptime(result.group(1), ts_format) delta_seconds = (event_datetime - current_datetime).total_seconds() # assert the event time is after (now - earliest) time - assert delta_seconds >= -3 and delta_seconds < 3 + assert -3 <= delta_seconds < 3 assert loop == int(result.group(2)) loop += 1 diff --git a/tests/large/test_mode_replay.py b/tests/large/test_mode_replay.py index 99821d99..b0576852 100644 --- a/tests/large/test_mode_replay.py +++ b/tests/large/test_mode_replay.py @@ -69,7 +69,7 @@ def test_mode_replay_timemultiple(eventgen_test_helper): event_datetime = datetime.strptime(result.group(), "%Y-%m-%d %H:%M:%S") delter_seconds = (event_datetime - current_datetime).total_seconds() # assert the event time is after (now - earliest) time - assert delter_seconds < 12 + assert delter_seconds < 14 def test_mode_replay_csv(eventgen_test_helper): diff --git a/tests/large/test_mode_sample.py b/tests/large/test_mode_sample.py index 27ae910c..daea33ff 100644 --- a/tests/large/test_mode_sample.py +++ b/tests/large/test_mode_sample.py @@ -91,7 +91,7 @@ def test_mode_sample_latest(eventgen_test_helper): event_datetime = datetime.strptime(result.group(), "%Y-%m-%d %H:%M:%S") delter_seconds = (event_datetime - current_datetime).total_seconds() # assert the event time is after (now - earliest) time - assert delter_seconds < 16 + assert delter_seconds < 17 def test_mode_sample_count(eventgen_test_helper): diff --git a/tests/large/test_modular_input.py b/tests/large/test_modular_input.py index 568bf951..a5cdf2c0 100644 --- a/tests/large/test_modular_input.py +++ b/tests/large/test_modular_input.py @@ -11,11 +11,7 @@ def test_modular_input(mocker, capsys): # eventgen base directory base_dir = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) - # insert modular input directory to the sys path - mod_input_path = os.path.join(base_dir, 'splunk_eventgen', 'splunk_app', 'bin') - sys.path.insert(0, mod_input_path) - - from modinput_eventgen import Eventgen + from splunk_eventgen.splunk_app.bin.modinput_eventgen import Eventgen # input xml stream used to start modular input input_stream_path = os.path.join(base_dir, 'tests', 'large', 'splunk', 'input.xml') diff --git a/tests/large/test_output_modinput.py b/tests/large/test_output_modinput.py index 609cdff3..51f63e34 100644 --- a/tests/large/test_output_modinput.py +++ b/tests/large/test_output_modinput.py @@ -18,7 +18,7 @@ def test_output_plugin_modinput(): """ conf_file = os.path.join(file_dir, 'conf', 'eventgen_output_modinput.conf') child = subprocess.Popen(['splunk_eventgen', 'generate', conf_file], stdout=subprocess.PIPE) - all_events = child.communicate()[0] + all_events = child.communicate()[0].decode('UTF-8') parts = all_events.split('') events = ['' + p for p in parts if p.strip() != ''] @@ -46,7 +46,7 @@ def test_output_plugin_modinput(): ts_str = datetime.datetime.fromtimestamp(ts_int).strftime('%Y-%m-%d %H:%M:%S') raw = d.text.strip() assert raw.startswith(ts_str) - p = re.compile('WINDBAG Event (\d+) of 12 randint (\d+)') + p = re.compile(r'WINDBAG Event (\d+) of 12 randint (\d+)') m = p.search(raw) assert m is not None assert len(m.groups()) == 2 diff --git a/tests/large/test_output_plugin.py b/tests/large/test_output_plugin.py index c0e43862..9bf855d6 100644 --- a/tests/large/test_output_plugin.py +++ b/tests/large/test_output_plugin.py @@ -1,4 +1,4 @@ -from utils.splunk_search_util import get_session_key, preprocess_search, run_search, get_search_response +from tests.large.utils.splunk_search_util import get_session_key, preprocess_search, run_search, get_search_response def test_plugin_devnull(eventgen_test_helper): diff --git a/tests/large/test_token_replacement.py b/tests/large/test_token_replacement.py index 157d0a39..f66e452f 100644 --- a/tests/large/test_token_replacement.py +++ b/tests/large/test_token_replacement.py @@ -12,13 +12,13 @@ def test_token_replacement(eventgen_test_helper): # assert the events size is 10 since end = 1 assert len(events) == 10 - with open(os.path.join(base_dir, 'sample', 'id.csv'), 'rb') as f: + with open(os.path.join(base_dir, 'sample', 'id.csv'), 'rt') as f: id_content = f.read() - with open(os.path.join(base_dir, 'sample', 'ip.csv'), 'rb') as f: + with open(os.path.join(base_dir, 'sample', 'ip.csv'), 'rt') as f: ip_content = f.read() - with open(os.path.join(base_dir, 'sample', 'cp.csv'), 'rb') as f: + with open(os.path.join(base_dir, 'sample', 'cp.csv'), 'rt') as f: cp_content = f.read() - with open(os.path.join(base_dir, 'sample', 'city.csv'), 'rb') as f: + with open(os.path.join(base_dir, 'sample', 'city.csv'), 'rt') as f: reader = csv.reader(f) country = [] city = [] diff --git a/tests/large/utils/eventgen_test_helper.py b/tests/large/utils/eventgen_test_helper.py index cf6569e8..3eaa7575 100644 --- a/tests/large/utils/eventgen_test_helper.py +++ b/tests/large/utils/eventgen_test_helper.py @@ -30,7 +30,7 @@ def __init__(self, conf, timeout=None, mode=None, env=None): cmd.append('--multiprocess') env_var = os.environ.copy() if env is not None: - for k, v in env.iteritems(): + for k, v in env.items(): env_var[k] = v self.process = subprocess.Popen(cmd, stdout=subprocess.PIPE, env=env_var) if timeout: @@ -72,6 +72,10 @@ def get_events(self): self.breaker = self.breaker[1:] if self.breaker[-1] == '$': self.breaker = self.breaker[:-1] + + if isinstance(output, bytes): + output = output.decode("UTF-8") + results = re.split(self.breaker, output) return [x for x in results if x != ""] diff --git a/tests/large/utils/splunk_search_util.py b/tests/large/utils/splunk_search_util.py index 3252dfd2..a7a24a78 100644 --- a/tests/large/utils/splunk_search_util.py +++ b/tests/large/utils/splunk_search_util.py @@ -1,4 +1,6 @@ -import urllib +import urllib.request +import urllib.parse +import urllib.error import httplib2 from xml.dom import minidom import time @@ -14,7 +16,7 @@ def get_session_key(): BASEURL + '/services/auth/login', 'POST', headers={}, - body=urllib.urlencode({'username': USERNAME, 'password': PASSWORD}) + body=urllib.parse.urlencode({'username': USERNAME, 'password': PASSWORD}) )[1] try: session_key = minidom.parseString(server_content).getElementsByTagName('sessionKey')[0].childNodes[0].nodeValue @@ -38,7 +40,7 @@ def run_search(session_key, search_query): BASEURL + '/services/search/jobs', 'POST', headers={'Authorization': 'Splunk %s' % session_key}, - body=urllib.urlencode({'search': search_query}) + body=urllib.parse.urlencode({'search': search_query}) )[1] # return search job id try: @@ -54,7 +56,7 @@ def get_search_response(session_key, search_job_id): BASEURL + '/services/search/jobs/%s/results' % search_job_id, 'GET', headers={'Authorization': 'Splunk %s' % session_key}, - body=urllib.urlencode({'output_mode': 'json'}) + body=urllib.parse.urlencode({'output_mode': 'json'}) ) try: return json.loads(results[1])['results'] diff --git a/tests/medium/plugins/test_file_output.py b/tests/medium/plugins/test_file_output.py index f290f640..14c66c88 100644 --- a/tests/medium/plugins/test_file_output.py +++ b/tests/medium/plugins/test_file_output.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 # encoding: utf-8 import os @@ -14,7 +14,7 @@ class TestFileOutputPlugin(object): def test_output_data_to_file(self): - configfile = "tests/sample_eventgen_conf/medium_test/eventgen.conf.fileoutput" + configfile = os.path.join(FILE_DIR, "..", "..", "sample_eventgen_conf", "medium_test", "eventgen.conf.fileoutput") testargs = ["eventgen", "generate", configfile] with patch.object(sys, 'argv', testargs): pargs = parse_args() @@ -32,3 +32,7 @@ def test_output_data_to_file(self): break assert "WINDBAG Event {} of 5".format(line_count) in output_line line_count += 1 + + # tear down + if os.path.isfile(file_output_path): + os.remove(file_output_path) diff --git a/tests/medium/plugins/test_jinja_generator.py b/tests/medium/plugins/test_jinja_generator.py index f40bcdd6..b537b901 100644 --- a/tests/medium/plugins/test_jinja_generator.py +++ b/tests/medium/plugins/test_jinja_generator.py @@ -11,7 +11,7 @@ class TestJinjaGenerator(object): def test_jinja_generator_to_file(self): - configfile = "tests/sample_eventgen_conf/jinja/eventgen.conf.jinja_basic" + configfile = os.path.join(FILE_DIR, "..", "..", "sample_eventgen_conf", "jinja", "eventgen.conf.jinja_basic") testargs = ["eventgen", "generate", configfile] file_output_path = os.path.abspath(os.path.join(FILE_DIR, '..', '..', '..', OUTPUT_FILE)) # remove the result file if it exists @@ -35,3 +35,7 @@ def test_jinja_generator_to_file(self): assert "I like little windbags" in output_line assert "Im at: {0} out of: 10".format(line_count) in output_line line_count += 1 + + # tear down + if os.path.isfile(file_output_path): + os.remove(file_output_path) diff --git a/tests/medium/plugins/test_syslog_output.py b/tests/medium/plugins/test_syslog_output.py index 831296c4..365d6420 100644 --- a/tests/medium/plugins/test_syslog_output.py +++ b/tests/medium/plugins/test_syslog_output.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 # encoding: utf-8 import os @@ -31,7 +31,7 @@ def test_output_data_to_syslog(self): syslogoutput = SyslogOutOutputPlugin(sample) eventgen.start() - for i in xrange(1, 6): + for i in range(1, 6): appearance = False for logger_call in syslogoutput._l.info.call_args_list: if "WINDBAG Event {} of 5".format(i) in str(logger_call): diff --git a/tests/medium/plugins/test_syslog_output_with_header.py b/tests/medium/plugins/test_syslog_output_with_header.py index 4bc69cc2..915d5802 100644 --- a/tests/medium/plugins/test_syslog_output_with_header.py +++ b/tests/medium/plugins/test_syslog_output_with_header.py @@ -31,7 +31,7 @@ def test_output_data_to_syslog_with_header(self): syslogoutput = SyslogOutOutputPlugin(sample) eventgen.start() - for i in xrange(1, 6): + for i in range(1, 6): appearance = False for logger_call in syslogoutput._l.info.call_args_list: if "WINDBAG Event {} of 5".format(i) in str(logger_call): diff --git a/tests/medium/plugins/test_tcp_output.py b/tests/medium/plugins/test_tcp_output.py index e3ea1320..b79130c6 100644 --- a/tests/medium/plugins/test_tcp_output.py +++ b/tests/medium/plugins/test_tcp_output.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 # encoding: utf-8 import os diff --git a/tests/medium/plugins/test_udp_output.py b/tests/medium/plugins/test_udp_output.py index a7cbde26..afe79e4d 100644 --- a/tests/medium/plugins/test_udp_output.py +++ b/tests/medium/plugins/test_udp_output.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 # encoding: utf-8 import os diff --git a/tests/small/test_main.py b/tests/small/test_main.py index d0575333..111d09cf 100644 --- a/tests/small/test_main.py +++ b/tests/small/test_main.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python2 +#!/usr/bin/env python3 import os import sys diff --git a/tests/unit/test_eventgenconfig.py b/tests/unit/test_eventgenconfig.py index b0391ef6..0b883588 100644 --- a/tests/unit/test_eventgenconfig.py +++ b/tests/unit/test_eventgenconfig.py @@ -1,6 +1,6 @@ import json import os -from ConfigParser import ConfigParser +from configparser import ConfigParser import pytest diff --git a/tests/unit/test_timeparser.py b/tests/unit/test_timeparser.py index 4dc7d13d..b4860f12 100644 --- a/tests/unit/test_timeparser.py +++ b/tests/unit/test_timeparser.py @@ -59,7 +59,7 @@ def test_time_parser_time_math(plusminus, num, unit, ret, expect): Case 1: input "0s" -- the time parser should return now Case 2: input "123" -- unit is the empty string, behavior ''' - check_datetime_equal(timeparser.timeParserTimeMath(plusminus, num, unichr, ret), expect) + check_datetime_equal(timeparser.timeParserTimeMath(plusminus, num, chr, ret), expect) def mock_now(): From 188ac04e0f0e9d114d2d556e833a922ffb9613f6 Mon Sep 17 00:00:00 2001 From: Lynch Wu Date: Wed, 23 Oct 2019 14:30:36 +0800 Subject: [PATCH 28/53] Bump version to 7.0.0 --- splunk_eventgen/version.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/splunk_eventgen/version.json b/splunk_eventgen/version.json index d04847bd..342b591f 100644 --- a/splunk_eventgen/version.json +++ b/splunk_eventgen/version.json @@ -1 +1 @@ -{"version": "6.5.2"} +{"version": "7.0.0"} From e508bc2b728be1478174b776add6c56b40b3d8c1 Mon Sep 17 00:00:00 2001 From: Lynch Wu Date: Wed, 23 Oct 2019 17:50:48 +0800 Subject: [PATCH 29/53] Fix dateutil package bug --- .../lib/python_dateutil-1.4.1-py2.7.egg | Bin 230363 -> 0 bytes splunk_eventgen/lib/timeparser.py | 5 ----- 2 files changed, 5 deletions(-) delete mode 100644 splunk_eventgen/lib/python_dateutil-1.4.1-py2.7.egg diff --git a/splunk_eventgen/lib/python_dateutil-1.4.1-py2.7.egg b/splunk_eventgen/lib/python_dateutil-1.4.1-py2.7.egg deleted file mode 100644 index 85ff6111935a0e7e8aa4bf376a1f16d8710d30c5..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 230363 zcmZ^~Q;;r7ur=7WZQHi}wQbwBx!bmF+wR_N+wR@AIp@y9eay2kuaP*3en}Hc( zLJGb0iXx^IhxkA{>|^Fh4aBq%Q4Pw|DKT!mggf z0icy`Ilc_kzUCleAj;9l9y?Z?9^0mgVIL+V-D>-^w4LDw)5|y$JejfR>l_VY=%RvH zMQx*>Nz~N}7cIt8Ia93C)3>0oK$&UnhDT1Z;1U0}WHd+w45?&w{=1C&#FO1YstKOm z9#Z|R%cr+iV!+I0Z1a7vQTjG{+o)o zSwp+bbP7Xzo-)Nt(;{L&tiSttP}LIYEc$@!1Pv?mojO6hJAeVMs#(ng-^&(T#grzL z9UIJ`)f#P*yu#&7*AIQX{r)ZD{|4B?(}hz5dvkCT3PP z&&@}BRyCEvC5rTtbQvq_B!e0{o6@PQ8j5THNRw^7H+voXT+}RTL9)o0n&n`g}6o=?7Es<$rd%S7y`lE6u z8D`*~v{9L=6j^@W6>YqMH)zr!crK2P z0xwxcdzVNi4}W5PixLN|=B*qtxFAfT<;0tll% z3ytex_~_rDr!@afzJv}WUanuIvl2|ToD8R&+6k4RRX757ZS6uSgDS}bf>##iyu8rI zbIuv3R?$56j#R3&zluDd4Ce3YZd!_+5LIV$JjQfPN(#-5ocr0^jnG{xTW5piO5XgY z($04%TQMzMIE{E;d6exgXD14|3_0gZ3W0bX*BrtGrUx#G_7LXi0&#BNwZ3?_M6QBk zYz8k7d6#dB9{1o>XJStLI?nehq0`{!r zKuYo|%N%5#89n5X6c#{29ik}-RFc(6I1yT*7rYq%%*UmPbT2y<|NW=uX&2gsG;13{ zr6D)mv#)k9Ov3a~pYx^Ko?cAmH7Tu&S_P#Ik*T@OyHk^mU45NzB2;Pf!96qD{QkB| zXm1Vn27f)%qRo-0rj`H6yEkvkDB|?|pZJhVE7rKVp#q75i-)2N4f~6ORK5HkHpi&X z%70T(%ZWcoUZis|3l$qusdcsp^V!}av?<@Z|0-Q$gUTBMlJ~zL(5+>+WLTuWB5+S? zP$Kak3Cg(6_Grc#vkwWbqtE5Vb-LkQh~ZvD(41{_qjAJep6VuC=4+=2K7p<%sRcp7 z)8O^`yNLw3I)V3!M)x;Ow{6q~_eTdk&j*%!b)B`08ruib1$j+J&n%Zxh`+_|Q^Fv_ z^`Q707xp!sHw|i#XQ|P|vu>;oqtE^%u4QGv;-~Ml+{{{1Vt4bjoT-=f`)J;S@|VuT zmyMV(q4`sBj&2zX7FPRKTuM{~4q5xPWrac>mHQ>WdGBLhm*~;uDJ;L8N7Eade1nm; zvft{$RX)avVEB~{YkyMI^RrnXLHLkx*_^Y7g$p;_TFs~98DW7n-#MN-#F>a)OHJ5x zP7Rdm2FwTzvAGY#QXOq4Yk|!@RYAnz^Xm7AwNQ_+w|Nnq_j1XwT;0Tweh!}N$v*?lrPCQEf2 zBMsz=Fu%6$2?Fukabb3AcXa$0RLsdZ#5>>T*#3poOJBAhQSZTc$OtFD68CMoiksBAe^5Yr z$?z$4|1##qx|`UM%G_9^ud#9?AgZ{$v2g+%FC@q8=2F1C2Tv+JL-)W}# za+>WHczw4ADz1)r1?^da2nMnw;hA?8>X>&GMiW9KrV&Do#6}`VY9=}@&#>k95F1plo51jDlQoG5xdvyER(8wpl@duZG}UU;QiLhkw9f(ye9vW;OJZ zay8z!e5KF(k1s+JPcZGY?;rjj8naw6(?rvW`^IJJWX9MSmDCYxR%q5j(+X_ZI`GRE zxr`CpsrtPqFwVt_I6SLQ{SV|nPlWj8uvDK{`!X+L;AgT`g7=%OPpn%NHV6m&<%MvvvA^eYpFf2nLCRd|<^q$Y~i`pG%-V*x??=Tq;OsMa{%e0q!(Z--qly zo>wFNF(-eTnHmIXi5QSN=@^RoJ5iig-C=+P|kf&m!odrhC=|_h@<* z8E%~%tFzdkmQI4G6=PM|9`<(InkJ;Zm|DipqnEsl-N^Bs0M~qc(;fTJEnXoy0`;*(2Hb-I|fC}XCWPMMFQu4JO&ue?_Yl|nTD zGZr%D49lSa?k`N?p(#NNx;N7B*s=GWU2 zPv!Pzhs&7s+Jb^;9*y_`z7`Tb9OqrPmDP!4y6SI;i zGb&Ct1g&-I>@8kO?a2!_1+Q~>5(MB z{HINuw`VSnj90!%lgVBJ_|IS0+f{3=D2qVCP~Ga0`I;9m_0PhcLAU%>%j*JsLAG37 z=(#Y~&0hUE&Lg?w=wxl7&ngZE4(hn5j5#Zu1!{OF^nZ5akBaFL;ue)fmGyPV^Nm&J zluJ9Lo`IBqP{j1e3W)2^jukba>QH(l9Ww)G@x+_O7PyP6z;01%NK_|!h>Q0gsFHcL zk2HyWj$CHZ#t^}A>nj%}{^OU-$GnFPq13I~%G5;4^TT*qnaMy%q znz4a^=KcR)EYs1*+2#LcnLGSeE?eWyd(X5Ds&(ZzMZ$t&Xha+tOGz;a75alFXSp5&>;N(v;1b@TCCjK zyH?3Ex_&0+V}oyYy4!EvAU!>b7c6ak(dZaOUrh;gY5`lD=dT{!clUW_emzmf|L7X> zI&B9Tv@ClAhHib!8E@7Fq6OF2v1WG0wZWEB$7&2YOpHt$qch$Uao-ZrA=UXTFqWUn z<+)nZ((n21YVmzMd`vDz=-e@7m)jx>ib*ZNi4GPLTP@hFm4jCJJV0Vu^lwK8NiS9W ziyIpc7}A8Xbej0zd80)4qrOtToUtdSh&IK zV(TcJz5Lhhh?Htu(Qhb^^HI9bt*3Z3CM&wNHU{zC^IfcWG`fnMsf-Rwg5AnoujcH%HOT4N=-d(Y8nEAb^gJ*d zJV@lvBo&K)?={1cEoNZzfZ&Vuo-)K9U8)F&i#0Tqam=~h6!u)i?Y-jn5%6A@Th9j~ z2E~vEd)k!eYV@OYzDnM&!GsLLd`5b)#0Z6@jB{Qmg}8I^X>#?o4vv;}_-&?S^dYTS zJwZV)q`Tex&K)fH*E#l7@e1<(0%X~k4hwEFt-F^e<`R#T$D^{E-9<$_)@#KSxj&m} zGDfuV0t^=`iA8|yl+zX)8~INx(Jj=}=c))DFTV+}O<|HJuD z^FPME%ag&g=KB7Rn>hnQ?91=n+uuRLp9Rj}H-(?4xt}k_pNION4~L(Z+n*o8pZolu zcY~kjy`OKvpU3{6&w!ZJfPKh-v+#gp$$+b|fJ4fFi}HX|i-4Pd0SA}?=jj0_8Ufd9 z0Y{txm+b-nJhHhZ-m1f{%$o(*H+tPZXMH?w*ZkPiDRZj_Y_|xEj+(<`S^Nemx1C$&Ajg>+x^+7b~ zjQlJcGi9Y~EF)YtOUAIArvq6WZigzxEXcIpqB#q()jTe(qCUM&``uoL53UH3FcdiI+hLcJLeeL({L_Um3}@dK(xa1r@+V2ZZf3KQf=~Ot^_EGAf*|vl<1m^zO?-?SS zN*B;|dCx0A!^4p}Dy^>}e9|XO{AMTCK#0I&*TSNU*A84@i^Fb5K!z97(R!k?10NPa z+8bJw3U!ByrN|Q=5xgwJVGu7h!NHibdzCOMlN6Yo^c;FUC-`5bDbUGXROrg&9d*f= z;SOZ(ZtmMnCsK$(wFX}hSD`?HH*~Z|r3_D{v8X68A0*YoVfE!xG9<2f$%CU{VIp3; zg!#(JArpv zF!=P2*Q0kA|Evq1_UGgAwM)!HEV~{C+6TVEgwAsM3o4h1>cqizhwS zy_`w4_OGQlfbK0y+DM<-8luk^blI|xMD|fl6XA)<6&b2eS{Q*#W8Vfn4Yanx;!d1M z_xLqm5SL#d5cc9rC0uCV<|NeYxWR-Z96$U-=u=)wcSJRv;P5AjXn*Q?(oxqLkKB&$ z|J2BJcPocP(#xnjJqh=I3h2bVK3D*Lvihw5Gon#qs)r0WS$e`!I4J?YSngQu-+*lc}#>Yn`R=E z!B5^H{sFK*v?#@kdyPF*>LtcJk)44PxRxh+Z1;sn|DiXeJo@UmSGmQRr^NmGOjq89 z>??Lg=gOkVb&=Yb`fNigYp(^gY3y7e_&++^Rz&8`+~{Ce|Ng=xrccDv|of1|W?+KI8-1*&@`om{0E zbRMA3BIR4|L+J5Y<^p&PT%#F{*ZeX)cbcx91?&9E1jRK&85 zy+nIiRtb8v6M{yGRUF*_ycv6hw3OtJ2h4Np&b7^sixmws(s5H32SWt`RhHtQDE&fe}+)mG1tOI$Gqh`S28tjdXdKh=bl4t0Y*@MM`%^1u943;qgqP8}Jh zs4K}Kbf{ci-A;76ASu-YC^6dor~?wgKP)uGKyh$dUgK#(<0sf*-w#^6@yakzWg;Ib z!Xn3C&=U}vuqT)lvih9S*OOxyomyFAP3Yn=RLQAcA8{qZ!jXcNyw2(+bDKOoZ19mr z1-e!o`ZdjEe6TN6o^zcpZ-MIm2e6(cBl(kX1>GR1AE?L>b9Vz;Ud%@HU0ZA%l>R_s z27o!1K9P~c!oRDl*)EiPk9AlbD|>bba7O zg>1b~QF$5D?J>AqPE^*yAyJqr1Yi#dl5wqKd(#u#~d#3!(pqtwKjzNUjMiy^l zu=-hWJU4-#2`UG>12ti8C_*Vax?+-jtwB{F_qXlYtKLS|er=HJyhSly#x`ddG>pnY z*4G>p#E}q%C=N5%cQ<{HfwM?TZ68G;wIsU!;LZa>+krte_~i+zTw{p#ZQbL^S|={(mdH=T9e=(mq5j==Ej9D)GIp=`j$@ia-TLRdJ{v6uN?SVLBbmG?!D^G$vgV z>`*R^n!>K0)277>7a7xFK$-^2%bu}s(o`7;QDm;uhvT?e@6=@g7GSS#A7eBZU=3pm z)R2s1CkCK56ldze#fn|;IC?l$o8bm>jz%5p3h|^^S~BYv4R_WD3QW^kyw@=gxUW5t!OZf7j%>*cV7ZV zbitr(Mb5fiQgl;v>QA(!Ru_g!-&0cc>qveRY{q=cd)kGJ0|*6?>SQ3}-NF+Hk{hI! z7}5eE@p;PDMCMD(TvXVtpEI#(p>=1UW0ycy^8_vnf)nZ$Ap;{}^PzJchfZKv52&_D z;q$bvGBqeT5G$kOp}MwYv9H&D&V{CEvomAP?CQ#$GZ(0Yb7^DCt!6|}ohhHINl2*vm>cCKopi`uhXqzlfwHd6h|{=~BZlB8wB5Yi@u$^!|>Pp#I5GS+v=+??)K7xqW~e7%}q(v1tqkE=tCd5#P8^l{W!*CEOem-(;`x{s*ABRr%}_=D|` z=L`|D)$O9-kFp$?);_1riaMajS@ofpI)bvso{}_41`s=V)cR zx71bA1fzyb*ft83E6GvK>aFsfo1v0S?|!Lbu{u$4T9$CEc2%e1 zHI-2%vpN{WunMAM8AOWOyr!?aKfPR@%;t4?aG3Rv|Ilj%9L_s$S_#i}hYgc7x~RQ6; zAyRCr3T~rWYS9FDl<&BGTxAPe@MD7ee8N{orjAU!z4o_oyCBx^uQhtsCGmpe(M2+q z_r=?@R8n&k*Kqst@{-ass@!{#V__L;@Mt7S#KzJW5joVQsw`_13#3}HF+XgG_|AfH zaP=kk*%Od#9?#KFl$Hx*yU?`g-x~01E_#o(dSR@zU)JEJRZkNwnV}2V$!AJIjNvn= z)*L7H%L~rf>+-G^LRnO1^|DfTx)Z4WI$gm66j*)Lb3bcg8ME)Va3*5jXLI>9)5_T7 zZVTHF@4}J|r}~UYx5bcrR?4dgag2oUN*AjdM;D58*y72;E|Ab6lZ0HGkAzef6&Yh| zX&HJzB{^lQX-U-gxQ%u_okR7ccBa|15Ku=BGuL(&I(6qf1x5hu9P1x(d)d|Ow2v~e zo#5oXF&7j5!zH-%oS9q}t*H;aDhqfU+{R7w#hwNYQj#<;VRqdL$6ii)N5hHQK8?m6 zCDOWzvrxLV_PITf^tfi=4o4OH?N}dTP0!V>o!u38^xBg+Sy&CWR7-_UO5Kc2o`+x^ zxDRuLeRD9!c%wQwylwnV!u_6XPeb-z8Cn-jag7U$)zDUv3lCb~<5rO#4Ws>hO-fn} zHQu+(5GZXJ>-UC&=eMrPlfSDuVJ#~k8FF`W2DFO7V>c5tnrg`=+Of6HLJbLYyV@#d zywST&XEvrSZr1I4B3hIQXJ}-fvncH&rUkV8h*@^fFa8+3+{Enj>F?MMW=K}IjCQp} zOE%h55m_XKB>lV`8^*@H{T4e6dWXudcj8vp(|x=rK^Ty2ynRGTjBhfK^I|jmkP2v# z{L*cxa=NdU;2^j~vsAEEEWGbBGfMXP{1e&cHhE6ru`iiCAWDIhuTBA}|N4wp$3Ba% z^)@x$52ipnT7noGc^5uKvkUO?vksO5#r;wFpdqqa@4?LDT5T4f)uu8>=~~{b=8x2L zgDV<4$dnoe=mrdlI_&r}fg@>UHS$>ZA!NUx9?{K4JO?9rgAkmmPzHy3C|(>;WHa4v zya{f;A&O)g=6T7lr4cF8*S94UN^KrgJAzb%#sG)z-Bzagauc3s5D$e*UDC|Z*kH-V7fwmTw3}qY+mL2`*XL4u3>aMo+>6+gW4e7 z)Pn;@4xX*17u$PPLstmV?WU!svUP5!y&VLj_ zglv2V`J;(pUX+YjB80V$FTPo0KlKIhHtA% zIaO-nnu10x&SA55LVqC%*fglfNtfUBukRWEvv<4!Qf@s2&* z`k_Lg#Rxm~4zP(*$or^u%dlADN0-GT5FaF4yl_8(C9qw_<`}Gc9`V7~n<~?&po&c5 z`$wwCT?lUt-N`m$=!D$a_%>VeKAXIpOAjA~ltB@ou(Tgt;-``wa-d?X^SnR2YHv za^=%vY29oK+M7G$$VCmx&|hF2?Dy$oVlJ~MRcYtCF#(lhw%{(c&|()9{0*exmf?Ra zJq%koiDLP_HMpU9pEa{zM%ZXiet=bmvscG%ut$uDvzY=Wzo5xO3RW1Nn?Zk~3{Z?7 zE(!#@RbIgkWlQqoNb8GIkG#?`Dn6}V`CN}O-cOJvl~n8b14~hlSI!duju=+5wsIq6 z?T-KT=C3H%@noD1k>sgqI%w%_&$$(MV%+hn%!E`{)AKQ&FU!u($Fz0`q{->dNLyzA-i>;5cS zf>9rL5OPjM)PUIdjBAiTh=4fil)sN9wqdL$7VB zwATaW-8ESFWml1^(Yqa3E0tgZ?|;ERs*Z^T$6`zjv#VSvvS7MVN5kb0BEH0kYQeI% zqpE*Iy*SN_7iKlq`ukJ7lU^2Phs*|``0wesgJtLzg_t+XGJ`e}QcMwOk3NA}7+Jbb z?{}-ChI>1kTeZS#J{;}*M>xc*yuJ>mAd8tp2S(5KTkhOj&>pRf_RKjp1#9D;S@ARO zqhg;BZQ*!#uD@)=lqXxXum_x@8nbn*Ay2CLM@nclmYDX=;HiBsYS^r8gHUY7(+A^r z4y?|J$&wQlwH5yLZOA*>DK)#Lq%S?tsnp`BRA=;yo5*5!{utRa;g+o&OrM5aqzGVa zi=(?)P84ECj)G3{7`7-H#p>%=$e$ffDbO^?hD=xwer4<~+0N#cTb&7)>rleyy58Eg z$B@(GzVNKd+M@EWanmws>gK}zKqn3p~^|3>bU=<5olLhR?;h6m@M9tIGL;YP@7S+F!)I9R{THTUJ zVM(K)3m-wV=t_z)&zbl~+l9(sb4I>70|>)s5~Z{xWPv=Abt^gA6z!$-&Q8q3ieKpG zI!Hbl1MJ3X2D}p@Ju@t@P;(?8e5vxXYk!4&wW9z?y$+^ALcATLA+oyQ8xc_=cSpXz z8TMBQj5kE!YrS*H%b|RaaH+OvR$j{1$pvATIKF!ToDRD%nv6t&!Yi_uKDYlOa=T7g z<)obs-q0QUD#(ej%Bt&RQ>$7uw74e{cJsw*`!O^4-c_hdZ1Ab?a~J2k3n?CaT3eDTX%&v!>b#spZiw)dvu#N(W(duaO0*P4Y8oHK>OMIPFW6U93E+^NV;i@Xw9| zs4i+M$gByKW^K6Ex7^fkEtkoi4dIl`pq%kvwrvTDxvPtVMA!R z`x+jdyY!kA=32t*zpxzVh!)XfHd;JsqJGxxu;(?f_Q})g)q7r6{#uFC`qlc$YSIML zgLqZPE!&En&${D(J3cBESnc|0|oS6(|7$zIdr#Ezw+}a%zEGkFp z(qi|@tII4tgZCK@8K5E=v@Wx`xSxxxrbT1&<@Xe-rr#loz3O-Fu&REbg|fF%?D6=xcRxyVO0buef8o6 z+^~ZTKwyn34pabb1uB8sqFzCz$YHU&@qrX0REvRivnX37G=oftC$d6y2^fslucA0h%Lr$NXFT`5p7nL5>^ptLA`4Emm)~zT1&+(So;G)z>DGI= zVC`RVEX(^S&)@GSBBHOalpu#b*3Fm6$EM}{xTNOOc6D5RbR@qs!|Rf#;v};YmHk@f z<*+zmG7+U$v-O_o<7tF?2_!h;L@|$QY`Ht-DN>4Q?A*#71se7?(wglI@ZWc z{@m+}lPw|%L?0JoSie^_bKPTrlsb!gRc}n@0_!wJ1G`%2IX#!#;1uplPq?=k6ckc( zGq8J>=%6?P@F0nd!5$@6sX>hb3JJ*uK?ycmdmvopJFVy{_oWADi5?*v2nJFZfCjy& z9%QA8Ji!20<+1_pWUZ8s604ApYLHSPgFqcf?1*a#O5c}C(j*{=F{Br*i#EEy%izC} zl}#&)ODU&a$<*F?HjgLB&KB%DilX(n+3dBr?W?ZqGyk0r3R%-04zKI>KmLO&3A^|4 z;?ei{hPvnT@TtK69rF7&NBDEr@Bfwj>wg-M_q~_@{q^gAKV`uG`c?jW&-%LaJ76LB z`p(GrHp=+DB#QX^*zfyJYw*1$+4pkRFZlB8;G4|rGv)Qqpy7Eax=qhFQfB%X_57F! z9@ipmTRCjRSQcUWpy;xL&-}msmMpYOCFid@FkbcQbt3>zV76*nehMs=pF`^b&!a4& z1WLsmZ7j~QOoRVW)ig=M#G^5$NJ#WRbqvxU592X8*z(# zZHrA0ue!?xcW$f6YU{7^sLNz_Ze-*P*=VzVWsg?WDn*K8T8&7h@&ioRvsh*HStQ&K zxfn?j(t|@xXsTw6)!@3xgQ7g}@~4`qbOKy~AzX0;T@xDv4qg+UthgcbP$Ctb1UE{^ zDAsf;T-4a$w@qA&upfl!NO=cV_#_7eg&_?9g(AqMd{`vs_zEiO)JL0WSs$D<;=muI z$DKR`(&p1}5R8+W2^4$8kr+hMSTP`m0+L1Nvp{%&3N%2riXG@L78YuSMB|lkD2=yf zN=62nCNhUWfp?+izH}RLA6al!iXvhdiy~?@kLj>VN{}2mVAd)bwe^&{`=#UnK`M&c)z4hr+1kaV z&n=65Bj)`t)XLVZX&sxAQ>TK;K_wfrV=(3=DKa?RN?=9Aa$y3-mSJJ+NE0D#PInF} zM9qfRl)0wtS!ROrxG^#CbIZ9XGhkkObL5w_gdbitfLb|#Jj;_@9}VoRVf+9C;StZ? z8Gwv-VlqXcFQduHHU!D~))2u&n~-@Cle!9HTA}Skm-U8GNfceyx*`lr&9_Py^XFpv zbiDAR=4fGradw&uJ=<+b)2;2$X*!oWy+@cr#LfG01imxT%p6CCK0K<}n*y(XK-QL2*})J$9Z3uMNU-uq`R$UuOY!jp3*(?CWfEY- zXDs%XiDLCdE#e&fmf5VsrIYct=!HyKPiwNncFr)Jj@Z zY^ojxzyq7B6{2B3J9wACc4^UoN;PUpzHq=?sF0|3T4ZqYqqj3IxS0xTzS*`$*LBAV zvR0MX@P_j^AxpNbR;G&7oQwxW6q*%T220nZ@TQ~LEFO+%53Y39h&5&1SBVu3UhYPX z-a?6=W#n~Ro7|e$VIr}{OC%Nzh0myHQ+6l<9=_;rbGM@7l-k_rO?2~vF#ges)(?o) zQ6c%8^AigEl+H6Bc+RLv7rnm^h16vOFkAbW!-~6ho@%u$OZfy*)LrP18;MNyU3)6b|Kp(=@%L3;a9|j$B}J$?uDO z$FL#iUxHPV@eBPpqTx@jU|WMxWYZt(I78IB11ZY&PwqwV7bMB9L4kopf(AtA63i_T*vLYTVp*38roZn8lPN>h zQ{kKwrLUx^8NQpNUJ&>qm$VG|b$?>HIHtG+Xn~*RkLhG61TF;}J$gsb?kk0d$DOi$ zXIK69Yih9baa`3JhuDu?+B7webB1*>^4$nK2(Q~JeB&nEuQC#meA6ajB#}qGNgmFr zgtUMsQB20Jz+8gow618)XEP687MC&wu9mUA4W~_h?zC1K3NMsS5%Yl$sjZuFZtE(8p|TrAePg%z}rKAT06WB1tH$E>=KZsB#u z|MI@Wv3PhSs99^42<#WFC`$-Op@8M!V?qO!;A6r9o#1U@fl+ZX;DEQ-nb1MX*c(JY z-B?-{70F=rDPLW9o{?%acw8%-M9H&N^$=QOLw4NIKJ^h^+&&Sd zuLaq8`G?<0yWRDAUB+UEmAwdEa!9GdoyA@Vv3OcMGJNpuilaEsoy%`_1gA$}l9w1H z-LC{Y43z2RkC;YIfMN)+Hp#g&p8v)jorbd_+%zFvuXB>InkUqgTjtoh8G@4W3+RHcnITGRR>2=} z?x&H-(x<|rYtz#TkqkGwbpoy4x>N{mXC7i)gBxdPn<|;=>UDoAhmu3lu0O_?TsPk2 ziTw;JXO0r}XllkQtBj9?#clxRq%rPK0~`Ze36x^wrW}nEGr9e9ubXmdVwgJ5-ffB? zSnDDNV)tsc;l!qR+l;d-luvyLyA+wqG9~^t<+1d2dDz-Ggx?V%Pwz}R;Dm)+r&Sx8 z=2ae8m9&&LjPH%{pDCE~4KR9f+%R1B92I!J_T&(SDgz8UN1=Z@jR-PFw;2kpsFY$9 zC)6b7u(SD$ierp-bm)e6ySm`250xio1$~%D4nN&3kzs&I{*vNy?ujFR6C443noLS6 z>gGH%i*Xp1>FW#kTe8}1sbT)ygc_ReHD!IF;eXQ*Mf-vId{Tq0{z(W@wMK$o5y6*V z$IoanZxgsw9SaI^%8tqTJ(m9wudR1)pr&yL&=EL;HTA1-QZg0?z!7BYYSQWAZ{uw{}^OW&ex8PHut zno`W6XdGr2dZx=#i5yjz-?z8r13-#DNl+4Xn^kdEBc688orK#dcxrH8M!1{#Gn3eg-&c^W7>s}y` zxQ%F^_T16xA4%8A!_c*^1>se`8z`#25YJ-`?@5sc&&9g@2CXx=$*6OuXUCOhz`4z~57mC}F(kENo2DFI`fS3{xWHpb zk4`@nBk^f_<$MtxlvENYZ zZZ$euYu221P3=A|ao1Wye$@1h^BLVsA#AG#BrApbQCnNIf`duLTlYI&1?sRrb6`TN zYmz+@1U;;)a{qsyT6&Gqf6VUEg-9iMpij6<`zgqtvl@WquN6&60O5oc_wV{4j3In| z&Ly<)Aa*-4L4r-r2Z-u7at$%n7k-*|+P%>cP8N+#slW`3j-t?Ff<|i(JL&h^jyKGepEbXg@Gn zeU0cugA>dB#AEPJ@X5$kw|1ZfePSo&MXztf?BWvN)Kh)x6`gc#;&6#CTr(qbA1*%B zL@CsJg=jb5Yh}jq;mz9?yhZIMIGSjFVKZ<45$)ONoQz}y2$&+YQd>H+0&gfU{|X;u z!~b!2T8FQKO`87Kp4LXE>(R$9vrbQ+X$PB;n)>wU*uSE?E1lqxTWkp6$<*E2rnb^f z36UrMqa3@qa3dp`o`w+M>^o#j9s%tbU_IIPL_M-qtR2jo!BZv*4$Vx}1J+52B8#21 zDW}F@Zd8XM9$om;y%1K@262IWXg~k~7*s%w)%|9kY+&WtL5=t^NtA|;2=0k~Pn3W; z$1I*7MVmE;d8i^Kai?)xATwNs>z5l8`&A>OqAP+#z64KENU!(hkW_ldtI0*}BoBXaJZ*%0g;sRI* zVDbYj+Ac_lI&F`EPFk7)FkP3+4NCXACJNxuW8hAuBugI@Wy`tt0yobKqpgPE!o5M!TGvfn zcAK8i)`QnaT|uyV+g=$64f+v@3hXS4x^@HW8fYdRY<_)-MOY6U@f6eg>v=dL;Kh{%#^iEiBw8VIc5yM}gk4rqele;QzFC_DbWY_1>`SqTBdbFq1{!0u%d`|shvS49#qm(Q5;?KW13n+1ZpF0&yt}vJ6@!yp8`yB zJC5R3vb2&GJIeSRjr}i>)P?m2d{0CSd_&AkkY5(!MVWb3o`6VDS7evhCJb?+v z6L>j}0o3yZqAInwNY{`n&Uw>L`x0!riB!31y%foo;$m`-D8xQf9AzOo8>wnY24=Jw zLwi8$W<$bs7-hoFlIGO-A^S^;~W(gZ|3&IOU=a<4b zRYiOwS}j;}L(h*e?tH7c1dy9aI;8>9J`&v#klvp7k=RZ5$c^fP{NugoqqTrodvCZD5K@MQG>~!dt~P9RO0K!Up$@J;eaFD#-cwTbGhu z(YjjD$g~+)OO1M=1j4tkk@yC)33feGfr*W#P^1*S(HC@LlaVK+rZrYB`FR|nyREsA zkwIH|`nHW})Yk-&Z*61z+J+}`%ZCS-SD_5_=2lB&{+|HR`Yf2>9-#Q%6LoORpdps> z+#D}6ags)NT%IzYtM2pY!F)<6y^lip@fdE1!s9KCA! znKW5I?_Bk<(rM)ZZ!~=37!=1H2C$)Q9=g|1u!Q9$b-A}<5{}!CHCV5I*GuT$7nfcw zKKFq;A8t+Pa0lB&>(-;%`2L4Kzpk?T&+$aIyQse?&vWXS>(yD=-y280a{1dSDa6Mu z-~OX5T)fx^c^ckMerF)eokrzKCSKN~<*}$b9##ML7u}{kZ-Y_yQ!+JyE7JBtzQ_+# z(PlGLi+IBP^jwh}BY*fn{GrFC2b3!|HT$^FcP?na3cq(#YdMY`~SP(KRe zJaeR0`?|zkAYOV;oa(zy5ivMDi)ATex=}Z`=$=F)t}xnr&cNlSFL^D_pNFB52dm<2 zMn%_1`{G!~n39BqnlttfralQKXde z6n3vUW0{KXtJdGDGn!dFv$~a_OU)l1^cvxPwTfDojx(aZ2 z3vj(|r-UDaby+cv(~w)MsC*tTsO z9i0BpeK_~rhg)maT(ztAeqVFWF~=t1&q$|e#FF#tXGJ+s*D~5om*Kxoii_V+_a!1O z=!wgh^FP$}pJ5>6*E!)Z(8(dahsnBl-Zbfe`vZQ)ba99S{+hco!7crS;qyz|e!G$z8B#4CmP;tEtX zX*pQ+jl%7gsXc`1tpQW$eEC}mI2&T=VHnO@>6_Ma(Qq#(C`M=p8Lt$7s{TjUGcL)3*XXWj|=|FmP1;3bTmGJtI(zYODL_H}cS#F*5I)Q3QBTZfk_i z&i=ClM2^7x2q^sa2o{x`_W)=yvNJJ7s~dCqRVYd!%2ADGAF1!GcQO86HApvCi%_Y{3GkKa3gcpM=~d-+n?6ged73;UJ|+ zUvwH~9^1kqRpp|wgQd`7x38`|-BCze*0XHEIjJ#ve_SuzfXn#PpC`XTy-vy3$A49} z@GnK#4Hayzo3};T6^UX@9VT=<%7%#9Qnep>2Ch}|ubNkIUj0R!=f?hi z%*@eFx58OeYb~P=*`_N-id`{~{O&1{yWPSqm9!(fT5h8(G~tli3E`5Tu%U!A<|I%7 zk~NaXh>#&ZJ-KYVSHUztnbmB_5J$EX1${7te%uanHJ>SxEHPnARK*A854d_CX55QU z8RR#0{(|`xUEu)N7>ll2N|F}zUN;Hx6(&{ylb|!=hIB|(mGto_jeP;DsFk=zFkZc7 z6$bfG(PlSfIZut4fN`8p8bLw`rB1ubk2Y38*2`7j} z3vhr-*f2s;Gkiyh!VVCKTqKMxjvw6I!I2)m^My=MqED1-%qsdFocgGWNF)FH>KoI` z{|x$?p>T6*YBiUI&&IyQ5ciNI4m>n1?ZW0R`bmlV^AJQH}9K#sH7Ms#c(F}G(w#59uQx0 ziIirRE^-9hscqs=rq$J{d~5RhQ?cDLUjSz4aJd zLU??-1Qg;+H*oAUh}i$lL43#L+qppMsdYUIi#-8$4!u)A?3C?pAiGUy_=u-&vEUms zBD6;rbDsIZG86>Vf=I2Pt!6P{F-n|f3fq-H8B~-})mEhe z%FC!caFeB_FguBGvz5E)6E88X_OyXLzkhTm8~R3r0@X_FBRR|bMf$L)HI@u~$KkBx zK|*Rt?lA?Q+1&4w&}T24mxa;eFKF8MEPUbn?FcU>&^If1UB=&W%cqd99bX?%;!yk5 z+57ptJyWe8UYRs=wcDgEK%iWIunbH0+jrAY>(6*mN7h-Ng30udWT1e?Wzcl|cPwp=$b|+ZbU#bVrU$ zy5R1LE2`M<#gGxg=F+nfj3AS`^h3rk z^zr+ml+d6jVxvoGtr`jQtFDi)-%BYw(Q|pgWFj##(7&bTm9FydSpq_)0=SD>bZHH^ z=jrEr)aH{(QSqjEuH?M6gb(aQa5$0mqIG>*0)@MMrrCsa9verkX$j-h5r_!a46=dJN;=tu zuq(H7`{VneB&rOi+Vt;RysUqB6Lk_3omZe7MCcMaSsVYYUSCBV#PDEy;1olgy*k7L zCVy5=Zf@+}L3V?HohZt->80=FiqDAi^SN5;2eB%7QZkl45+qiYM!hShE!s5jDDfod zEfUM9+EX7Y_XIf{P0WII_1V_N>Oe zT&5>hu6OoNvWRXU4y@ll*t&4xF(s+MJ5N5G(XYEMkIj3iKGMJ-SDuyR`k<=W!$VKalpa9%rs9B-hZoK96soRzzrxDjg&kFDWeN%FKrEBu z#2l^peR8nR9w^f_w zY-gFgW;lhl-x0iLv^B1y=}yMT$ywYH9w6N#h$ZnK1*B7gk|TH>Z-ocw`oONn?ex8D z1a!X<8xZLa?3A(e%*GgXO18OI1b_6IOW%HhMJuOJqg1K*!@^-u{3u7EN~S1MK*>ahV06!Q{(m zNw^BdqfxYK5@J=c!wrFWD#XJ4s(@q4V>8>sHlcL=)BjxP?U?T-#)$Y4?p{ zG8#sRYNny7ZAJr~G|VXL<c{{aHVu-(vHQ-|O-`;r z!scyMpUM*!oyKB8%koQ>4FO(lI-gO$423%Y6>h0tOt7J5SEXk%k*sqBQ{aK%6)btm zzV(!IYZ4Id3Br_zNsEqPP(R+Hfm#;tgdG}97klfOV18c^v%SJeA{_uP6Q-c8%qAn% zE(C*#$_T;!(*#b4lh_5Xa6W=c`|Y=9za4}{-zwg;lAm-+r6$G)`9pZ}#p_~x@n3N! z`7%sKOn%q2UA3q6{;O<8u5)A|qGMz`WRgqYd{o15!eNSStSTpqHvOS!jA3O|9OnEx z3N+>nu(66}b3)PmPNZ#%b7P3jSo8dO?+}ayF&Hig*CrTWLt2(hxmEo}diX+Wve5WB zkQe5`CwjBARHwMUDY=syeySUCzD)zHlJhY{!C5YgJX;Sr zHV~cwL01O?!8|`SifG%QV^ZgGH1Q5|S{IOCeL<8I(qN_pR*1W9cx8r0AcD*zEFJ7{ z=i$L~6i|4|gY_9RdgAlnTFh$zD!lm|Ek$^b86R*3#oJf|Zx!I) z@*m5cym4d)lWT{;%DW>BBj^b{0w4Z4sm6pX;saMu`d8ddt}FR-P}DOi)g^q^mU3pfok~Y~c;*>7cCffd$MB8$L(|Jz?XG*Zw5=SQPZqY`jVAWi%}r&& z8>^dA&fTy>?6O3&)G1yIt?1>axd&%9AulQianvs3_Zc+qY$P6z^h2KaRZ{2})*LI^ z4BRm?0hj41Pai|YUP~gRT?TjY4kWL*L+uN{HQ6#7D;oV6O_7v3+PxK3pH=IC2jAI1 zRN0f_Rcy@;rp#xjB)64Li z-TS#8f&6KfQ2wIhMr#951hsi8kOeuT2hJ!LdKzF@$S1JPJ!-go&y$HnW5 zo1N3Q^Khb$@SE!s>h}-HVpsBTa+q4|_A*?{j6SNZ)vESGS7jVh*XrOu33wbS3M_Cu z4Z67r6+HQY5YI<|Qba{#(`WLJ*fQC;vkS;&#XJJEftKS#oi|29AbZjgS2tV|7m&jX za9I0Ri@NUQpfQY;>)R1H(@Y9-OIW?$4Jq}89LRWDS=vR)9u zbEUwR!k?(~*A;25W!*3~&W`u+b=?voAs?Z!5fd+>A1LT_StOuX_}o~Ac=fcqQ^>og z?G80+iF?YQ)AE)lVe`^SF-5;ksU#JhG4Xc9HdfJ_ZQ2fBpKUBh?fP#XhoI23JHQYybR7(zKBBatnzlzCfOmkU`FxXTBh8veSC0vvq`z9*?HZ( ziL@MP-ttc`E)M;}J)@nLXub`1yRP!JE_J!9yVa1>yD_4aTy{RMedDPhcxBi9I@y#h z9aRD(m4_f%$Bv~4F{-TTOkQwkOiW#0GiuA9i3HJZz^QiJG@S#n=tX7VJw{fGgbPDP zY;CFtHvw4r-6I^^JhbPBJ;Bp^+CRS#rZsm3YtJx56!GL6MIhuCw}|3E+Bt}o1j(La z4hhoTXGU82WG_4=gtg)!mue3_iPSzV1}gfU+wE5s)K;Hlj3s*81?NPp+qgpkIHk zH%eVe=CZB+#@bnRAq1$uEi1z?RaX0uRw`v0@u;vO$EB(!dqC4Q`Exy4YcoNAqT>Qg zGZH`ocXQ$jR$RSv1?IKE#823p9O#p15^0=<>78hFMq-~>#da;WakVlRh9SIyVR~qa z3RkzA08vXT^Gbej$$sr=lw-D*akxgbCp5jM$ks9~JM#(RPDi5@)d@9}gGo-ZEFR`g8zuB6eA%Ek5g(>Hax9Y#AFMsZr`T}?S z=JsrG#KW@mC^jhLX&hCw*{?7pQn)JVcb}2STG6BPgCuo9UaXB!oBn#p;QGT*#1h3y z9*7&yzfLe&ew3uMeI5Nf@hCpSBzqYs#wt{7qdUQH5W|9niFf}D3h-?ZCGEydpN*K` z_Pu#&gb^kuLYeIqNV*$j5@f5zNf%u=jm`Vt@>$Cg;7|N|AVq42!(K)Vd@>zfoHl06 z*_JvjP1IicjII22sVCGO*Bvfmz0#SszK!Rol)4en5U;(Nlk7CR7Om2)wk~hs2V0NS zu)VM&tlBuXBd>jZcthF>e3IYza=WcSK;;OF>u4;A+2d(Kn^$9t3!L4^u+p(4n&fIM>+(*}zH$T1{FG9hNnp>vs zxnX&#P)eOC_o0~3z0r05&}!n#Mjf@UNz0kl;nv9BI8h>abQ}9oWwP@6YC4*>QdR*jAB?wRef6q&8>D-l z!VK0VHl%9pUp5T_Kr4=GJ<)fr+94QEkQdOrYj4X^w^MVhRd>tntm+x+kG8pYq|Iuv>}(*ldXqJQu$!K@(n8~pgl`pEt_ z2x}nmnY%S~krtXR;7fhI7mO9`5B9$yelt5$>BxTu*-cIm5ZwP?Hf(&Qt!sm?sn(;q zW#``7`T4yd2b7$W0a{W5a!lOzGfd6gSduab3`TI)$4rJNZK^g^?b5bt&s9`Mo-6SR z#(Ie3J%ohl{86FF$jBf=kde9q;&%~h)Ije>!Gf@gF&Ml4yiIqvR<0k&024{cLNglv?oBV>iSN>|xJlhEY!T%3$p z%l5ifC*jORxFGgoV9Tho6CK9;anR&Mo1%@97*s>GkvEz~+g^K+<3e9eoK@OH=P>(A zIw=Q@g*V@9v}{sCrLo3O{a4VDd_msi#fOg5SF0nYDp&Ebve1m2m2Bi9r!0qg$+O2y z)^KK<6!V5L{X#SEd4Io9J+AXMsM^zOa@Ohlzu~f5r#@Zjp>M>2{u4dCY>{#{>Df;t zwED0E_dD;hj_$h&7=h1VWh;&~2HqP8q!YhlbOr7~1%Ug1qTF?&&E5e7JZdjg+b}*C z$Kw;~699tFhSrKhMwp<}OaS%C}36fxme<+RoaVB^=JQFaM z$E49hZJoP~IZoFp;ws63jv$n2U_u*sps=DLq znI>c|x_kN@`9)1aPor_|(TCBFL(?%p7Mr}tl&0S~^OY9nu2bkTgCzS$S6~l5yW?8} zH3tjcxMk)6sed-s)~=_@uC6S$C+pfu#P&+(B0hbU?;K)% zXZBB5RbJNFc?l2IcuZTq+cHu9>Bw#%^bTs6p~39|SDuZhi9{H4$IV*KXzU0~2f;`| z>r(WV`v4QPX3NXJS~f7~2I*8Iv}`mE^kKkv)h2hq3FBC7y3GPTwvySX34SO^aVkkw z<4@xL*!~}du88RX0z8w`cQ1<#^arKge829%65K94%wHU<27rJtNMsQU2GJ*J^&E13 zmIS_`|FeNFeq{2FWbzFk9sVEb#gkyqr;8ZWE}e#ro+xmb8Lp~4KRunhr_FV@4>tbV zHR7bTliCuxmduOi{G=!`x2%Ke*$D8o{*%e>aE#k1yzWnv%<4{2k4!~_jnSCMh#@74 z1TRlVG!3cU(~E^O0hPr$);pZ!PuY8)c6qd^d`X_63L@$u2SWZT4`OOT7{L6h1cKqA z0;2k=3UY8;1wxUmPhKZci~a|l?hc)!S$vZc=~zp6vSGid);d9Z14z2kFsk2xbkRX? zzhOP+%Z6(6kng?f#9K?0h7{y5<0Rp+Jql+5^a&{RXT3w9~G`ZJWciGo9#{`&%axUo`M3Yhw&VmZe;`^ zdR>k7Yl=9|g2Pd+lnTn5!X!K`UDyDRiLC~rcD0cW=f*)DK(*Q|sV_{Y8+P5FqE2G5 zgWx|-d~GhfeD!gwIgP`b&&&W%ejT3)-3ALc&keYuRZ!N5VulRo8GBX~jfB}+irT!` zZam*cot9H`i>Z#DvteLKUzYUIz3q-6WIY79rl3&B6G_@gYp@(87s&9hmx$NjgJD;M zucdSN?QU%{isAppY|n~Jttn3p2Oq{R8MG5NV11xI)v!mWIXy}I?CeE;lZ`B?l5XAE;_XY zmYPNQ6I8!PI(#-?g)i7%b7Ak}pzc_Au&x_HWx4}ax6oiS(y%U4 zGIPJ+NcR|zUJZ41+oHMqbNpB}5u(^{O-g=&+Xokv2ZvKX)Td>urrtbU9OufA@SOmn zl+3p6ezpp37qi1}S0Btw5}IMr-TSX@Y1o;bmSk#tP>bb5v6YE~5zvMia4<(hds%5V z$UNjn^q~}E4!4iuRLFnYLA)a@7EU*NyH*k`#^O72-Q-1JLzHCe>pm{B)~k?fJysoI_jeeLb5v>aGVXn*<5%u`^49NUeruP-jVR$P_zySi zOIlN2;ApBh0e+e-V;CFS)Lu8!XF8qg{Pu7~DQ_1OsnYs3Eu|y!`yB>+@q~}hSlo|b zCsVdK*}bCzf~yyvhLtOi;;=*Jm2?gsm+F*sI!6@Z*4&?n8jDNBjU8T-9ko`Z(N8RH zZtT|1Fazs}PYWB=1;YFLth6}2GRx$&_O_4pJVZa>rLq(S;wibeHG%tt?N=BVe-v}A zppKUvky0WV$}l%8^?Ti{afYdH(~5r-U_6rbw)P&~&l%eVy$v!_kKKZdu``=a*j@dg zXgzS#-3dfgY62CF=;ElzOpQYaw9+3PmTEf}_XVk5gw+1a@q}&q?f$sD%!;8GF5)F! ztfbLf_WsZ%z5RL1IHG1f^1M;2CS@7UCz4Dbk}Sbe$&>Qhif^vE*D53BQ4!lNoxpPx zolK`zm_>4I!bJePgPv@lnEcjG?K2&apn(gY4nhTl>qS13%61cr!|3Aem(>2Diqeq* zObgdHaTv|&9o@7sv*U1NC3ZD&-K--Ifp51P|y^i1ZS^yX6uzKC#_aoMzUCIV3 zOYj=R*f01PTF7?+rgv`gXg|HNLV2YFbfM{dndW?xc}ALyM`*a48;wWh5U`d&K?r-Eli#^NWaJnjK^H8oe2<3cKYHHcb7nX zb>=fv9M!gtX6OFe8(?843|-W zQs;h;UVF$XdM8(w9O07LF{664nMw)n?!UEg z&Hh^C<-*_VkB*^UOcX~Uo!`UW8Mgz_CXEjk(gErA|$2Lu`r* z^&^x+aYy^HydsE-gcf^WQthGKBdvkXRg;#*!{ZR@$G$6e}SU3STM& zxjVi(PND?d73w=-DoL^~4(kASo8`@Z8vZq9~h)^XbB`VDThJ zyqk!)=2z?2+S4^CS`=++R*M&VY}9efb5*P~;4KY_g~ZhXm?f`uYBx6M&?D7in#BF0+&=_Ea7s6fY02u&%6TEVxc)uUAl~ zX%ZLt?zGC~(-7xJ2PrFT&|9i{lJ@c;J(GwkN?i073UR#s=Z+X%KJF!dM`URO1M4JFKhb~>Q!Bo;Gl>yL6^L%1B2uTFiS2i$i7i**!`z) z`d~jIkh2(UhdnhugXEgbP?0O*{Npx()j-p+tO*>+A`S*ta~ zLTS|T1D+ry0_ZhhT+%}Ln%JXI)+ZvbC#B%0>Ob_+wzMf4hVYYz3}PTShQ+7R*`r>} zp)lYTR*|IsQkb%^NmU?Dk$;ve);0=)Jy^TseUnh~yyNOto=K;3km}*x9=X6_bj)aw z-q(+z68z>%*NQWE_3~T1a0AoS7%YNYgi{cc9haJ~RFjLCl7sHyV{Is>1b1io8hP7) z?e!zX!ko+g?Tk6aZEJ<+^CT3sA{9?*CdBU2V&e^ca!RsxEr!5WI$oghD{cl(*Tc`NFTErI&Mfv?Uq+u(Vml=OMb%C;9N>ZoWRzlB$dcW$ z<;;@NJq``3P;P$b0gLj|o+|x^oiCG=yhoM34KI&jsl zPtmH^?>dOI^A;9XK!|VnKf*0Cn#6ojJ#lMo&|6XdB$~cSO{UcloHhBA>h7{M$^5474Tn&igX?3F5EJX-zEff-UF{_C`OTIXfhqMJo;0&tSrbZ3(HUuP#C zku(YS$*K1{uml(?LA{EJ%kde4?cn2-vl5^fs1|I2QQ36pq35aLoyG%O;|{Gnn*>DH za`5hys;z~j85MaNPR-xH?#AXF7LYz}+6`xI{8FWWK9{V_kCtcZl1UHm>O#DDZ>#hi zk8A9%dxBAkyxh7rtnI6%=Z2>-R2fcX`4{S^r(Nm1qP_rW9Pz~0xItU8Z4yV+NP7t# zQ174tsdvA+1sh0(1T0Nag`HY6etn*61=M05 zXDvy;Zx0ccft%e*;66DR$8w@O>~> z2Rp9ZKcxxkgP)W zFQ_sl17a!+s0b>`{}U?z1swH01s;}ypaK^YB?#fO2t?oc1WUq<;ZewN=nEZiF2aNu z%1jNf4w}5;AxdfoEdx^sNVo5>j}FhMYgqF;XNz0c5%qq+L!bo0yo4$vp9g zG3OF3Ez}Y3MVl|RFq==2%JQ)^<_QN&!A_Jdmz+04Fw~9_r)Y-P;99{;(5QeF0Bh)e!vCrLv{-C6l+n{2i$27zy}9 z)~d--pLY}bX~dlfVgy-eSovthW_!hzFAd;*<(b@@?_eH!NC z)LLN&LXHVDg*#Aza$AZoRc(&$oLR;7$y)4&CZl>zpY<4 zSRmu~SIfi=5#sXl?*EqjjKS>1lbavavb(99ud?`^slfWq1}}Dhp1I=ViLC030LEMv zoo^i9ltlV3xlvsaSpT&0Fhs-=pKZiy*IN!|rp9gw81up{C;i!CVe z%}9@-@JZKK`Ec&*)S$jGeZ?}TMoG`BX#p%~dgcTVgnDMC$a3*Q0s>RpEKR|3eEjq1 zGj5LYQq10*QoAM^2mtcXUleMn?T7so)X|?#qfVr*y~tw8VH{L-RloY_IYGX}e33$~ zuq936rc~0*Q#YXpM5`!z*%-U))b|q(Sh3&9{u-3?|Hb{5k<-oR7fLCXkhkrJEUMT2 z=HS&oy_5HG`#dEL_4=gkdlPq4KFj?1Im^4TwiYYLUvi||l&MZcYSCf- zGpv(F3-gVA6l?tgG81l^UNXC6B2ed)w^yz=+s z{c^04AJt(LM#D^%04`{E?CXKd%eOwgV#{!jstedKEz-0)kCUfhAD=`1MnhuH9GPKpAD=j+$0Jo#P6 zE+{2wz;oisfk_wAH{wguo%`Rhuh*Yz{_kvS{?F(pKI!=Qe|n2ZmqpC>22REveo?`H zBmU@fH>QGz&IJuJhf=^s{?7{oU(@uaj1!43e%hvrNS1cNUap!|;%!wm8%2)B`Y@EFUZvqS$s_4m38c zjM@Y)IPmuv-03bTf#y2JwDrAnay)f^bbst-k;uC*KLC}~dDA95RTDIWu7-z(b`~Z& z1c*0y1nN>nbse#We$T!;v-yQ+6sv=z9BKOc5vrfv)1ag$Qso&z;A0T3sV0jrST=Do z)3s)stKxpT_G}hYnrbEoe205}Ve3$NO!8nr7j0Dc>_K~>{#fQlMOOY{%^$|1Si9gY z>6f;UHL0r5b+TQ z-k;Uf^f0y%oo+%L6Aw2*U^>ueXyRnTMOS>pRo4tFE3-K?jk^)Q*Hg<*DW`@-H!Yhv zwg{HK%^GSU?`$#`r$%KlHg<7as9Rk1!=nE5rBGZ^ha4ZTY4V~~(&e4_2W`ksUgGJ9 zyX4~+_Lr~1yK2R@6!!Fqp9{qNH@<0mllX1SW>-07Z#tY7pP4gvZPDx z0)tmTswbwGM-Lr?07(V9cZ}dgv~)dH2f<5lN$mn(R2xdRasC2`w`R2oqQ3H8B-6WZ6vt;?u#paXywU7H3Sv4(rNpZ1{1hWh2Q|Ye!j|A}U?y;Y@V7T~5 zsRiMo6fqewx-WzO*o#_c}jAIjNl}s4@0pvZ@~a-l)0C(p&ZSM8BQNyg37JFnv#40{{z2tRz`^!gg>HZ}<;Ns+a9g zDJ<(Kn(N9rt1s?}i@%1FFcWR|?J7RF-3~%RrSDQ9!@_kCMNekqUMM#P`1QLiHR+y? zjqKW`NkhQ&C)FMa0p;w&1;O&tt-nw%qDX4dUAe(P{t(6Zw#Lm3gf=p%43ailu)T2< zONm(;wx+y><2b!0te3E?ONy;62;#tvA8(I=s~vQhS<)7Jo=w?lE7WGV0SOqHJlQ7? zrfNH}#XgMDvU*U)knn6>zq;X{B-rL~jrg*JB7d_Vi^3kYk&8#;X0r4?hWcDxojpDy zOqClP*+xiOciQ(Ik1~Sc?miwAg0t-wZkK)^shaCP))p+ErK!HUVeEnQC|qvcJ>Z$e`z0(h>5aFCNUp=QM)~K|R|LyoO=}oi0&1>piDG(flT&<1 z5gc(Wzkr2fk}-Q4sts812$9+-(`LC;%Xl>*Qd=wey|vk2SA%5u@d+dmriJ-Llju4r zXE&$05k}(;oZT1e26;q%+PtU2%;A`HjFO*?$h^L5eyUh`WswoTF_QNBY4ShZaN`{Me(^s)H;8 zckqa$$3a2@019u)&Z)Bx=#(sw%P0u%Vueg6W$&l{Qz6uyAWY}%3z2kfBhIr?uDJe49DZFCUeyiWzYBYM z!EPrSY>3K-Rn(Ru+tz7mWl*JZ3>Ub1O$$iEie83Tl@ixEO zw;y@v4m4K>_{t3If2tY(88pQGfDE>9!$XY@xjAGrVKyZ0VhCq7`OxN?=?*Mdp5tIW zzl#^c12?d-Z1PeJ6;F`>z1mhend-`1ajhWkGi9->|HB6oj&p=wF9pZ^pFiGNY6L+h` zD6;j|_GH+$LKxaR%O;7QJa9%Z-2wNnpRPKn*|Xi2Jv(xv$~Apy-2u0~MKz+`Z&T13 zTwdzd1!v=yu}5u>k((wFpO0VqrC7BI4gu7W$T)SaQA%7v`2zzdQ`>U!f2Mi3{q#iI zd@BdwovHB5qoTL0(b6@hSCP?}_%df#b7}@D5<*6h!RJ^a;)^35>;|9}WdNiax?#|o zUz2hLyWJcpQKtEZ*W84%A(zu?B*_xEPVCTgbV8z0TsP@wyF)6a!RPC95Tzxs`ZC?6 zqso9a*t8Y*s~GgQvLZt{VosT%Ubf$>!ggn(9@L2zUz@1H1Mp#T^|x;|IG7nNg96N+ z5OTKtma9&EPy!kh?v*E=-MX>)tw9C(?rlX{f;Y;qy51Gpzmnp>w0FZHKT)2%^bnT{ zO_>1zhpC5ekI{q-tDgZV#bx}IxhdvtDTuUFYB+YRNA$!V3~JpV#Y>(VM-&#EApy^26xgkXtRLPOC{!S|xIa8ok#_Cy z_D-+B!L`|f9Mv~rDT4#mB-T!-M+M9**vEx0ehCY^l=?*Psp5J;Aa~O|$`5TI^llu| zxgx3)?7H&PBHFW#d&U?yo(!P3N3TJjF-6KzvVEJn8Tsm`>ZlRFTGWPU!>A3&*F=fT zE~%s-|8`d0-0WAq3+Q9@EWmA4u9XU;6}@%K%xqp3mHpDLS;9<8Tt_c)I%&$sT=|*a zd?V&BcM*~Gs~a}>MhLhEp%N zK|dZhmlwADsMG?Eb*#xNj5Jh&xizJ2FGm*{6xLZ1%ZqslcgEn+;b(kpIw ztI#Omq=u!Vq36J6dsM%(%U-nOOp~@&jEqZ_9-YVo(;ikE3B8SAMZT7^T&UHOEUR@y z^Y@||FOlm4@`L3V2%S3FE!u1~pc9o(9KjE8WaoA>2Z@AZs!)V9R*fStWg}wKET2VS zfl`duu~B&xVqnJY|2t0(Gax#e$c1pnVc0sLv~zNKyE_bhwR~$AXtr|R&EWViQQ~~^ zQa%7ZnF7|vgugfOv5fi_OT&TeuA93Xh(1+WUfhPT*8z@8eOa+_N4U!5itF3#6MVHW zqSLSUqp6IT_9)hFp()qb>ybuuZ=rT1h#?gP*Dr$(q_vwvD2SV2Iva6Fxpfk^i}IpM zvmB>CZhhLjv|xzLhBU}*cP&tuhSrC9@Qh;-TCx4OZ|Nq{ptN6aYFFFo)UY&jhdqLu zO|YRN(B>S1547-0bFs|@jU*6cDlHi)WhtL8OULm>3(FIuX9cozXkQXtRv zf`$a)oEo~)GY1YGy6I?Gw#k7=&*$-9xS`>?QMs8%GyOG&%BuJ{Emlw=9xiEFTzvNr z)?XZfXq0?#@?#exVC$%;k^s8n&41^|>L%ip%}^XQ^rn24e+^uf6) zvVT7ZVq-hhPLJSfN$ccl`>44+plqp#z<;XGpPY`~b4>9yJNxPGGHYal8(WqR^qBCJ}ztavL2B)WS9vWGuY%`w5ujCx(H;xVCh;(|SK{jJY_@afPS#17%=w;?uk5Y0PRgU8u~E?*(&u0$A|*>E}U&kU~vJ{@o(^FDqh@d-GLQ* zmk*)~1xj$N{{&6UGTx_KX#QTb-}?z20H+NzQyGpAqB@pIa|m_|-I^)}rEMyVVfElr zfWzYm?goFF`>+t5(Zj{H@TuAyZZ0h8KP-=wixhcHDeeuH>*I@UtF&ZOAur@7?)|br z#f`R5tSq#Wm29Q0dLxuw-I>8x*`s@PXQA#3u@;}cgdK4}A#Eu^)ZZ6<&%s2|ZtB!) zh!k6AZnMXZqQA8IN(YXk{joLI=WJ*PCGOq7JNr0jB4Ukx*(lqplI2JP1VtNt-&+q% zC+@9_9Iu$Az*9gp;U%?wAku|iJh$guWat@`x9Su)ld3K*%F!Vy$+GV&QeJs)))m77 z*QtmxWsz<4*?%?4JAfz77<2%M>SZkayy?0#PdPF&AlX?h!n5f_{dJrdyCp+g2~n`3 z@YZg!olnX{i8vyJYcnxh$*f<1G(QYQFGeY56{}#Rv6|Vr;WbMEfRcyv_4G!QE>Qh> z>FU_*QEWIN)?P(t=Nzap$h~*&iAnVMdWd#)Urteat0WB{CEZS5422@QL$Vv?QOe41 zEit+jT6S9lIacOS5$~rz1>m?8lSG~611+U@%<0i0_z7-S_f=)dEwEDoDu?#`yaNad zm=7Tok?oCwo^z;f+V-hTg<9oRKQGd$9z2bw8LZjI`%Ct!PDyWoe~X z@!YmLbm9n==!dQ@CGQN78lV|iP2nOFAHeZT?2>0p3{zaPV&U~4MbYC`91>Jw9F38z%yP1~x%VuzkOT5WjjLM1c z?Fq240gBs|Fv&}cUW9QQI0@)8k>7u<}tMJZoBs|`?1}N)YWAq*n(x{q*>BKHqmL@ zsVpX*Ju}1}qK$&}M{lFS?Ub%_|Si%C(deKB- zo-Yg+fQObn@q9WVTqOXn98kF-ut_u+BsF8M-Y?z-AEI>I%NT(y?u>OpI)nAfrZTS+ z2=Kv<;YU|7FKKL{#&of}`Fdpbn>J7fju;$rYR<9oRj-0c7z9Q(N zzYd~je9T-4H%ZIWmu0>(7APqUD&&R}PHGPHRzS#|OBGmw9BU>Zw z49lyikz02bb1LN$nv{4LriLy)_|i!B*=KnE7XX()Xurk+!>TKs*^&AZ+L+3saYh6T zSZRw_!&AZ|BjU0g?3DR5l^Eu)Xu64E_6n9FOza^~-Gt$WlE`vQ3j3-ncK${?y$;|v zVdJH-Cch<1SY4Y!(6`lrzkTBD6>Yt(rue$;$`|@Z7_s1+)}j1)MLu%m7e!Dhvp`l@ zqoSK)iv3VCLv9UYA&4j;-KLvuprZWKJy^6W)n?m@xeLOW1s`%OIPb`pFK39Q`NLBg|Vgp#J_ z62G1lyGUm{$vgzZ)m9914EJg_eTNTjxSZ&uzhmrbj^yYYn-eNNjeVWw<`+Oblehqb zuRPI-<&3;9&k;!rwxA>TjN3Uree7_bkH9!Dv{AhrIjbDcyddoHj=?p3jmFx*qyvuv z<-I;+qmw);(^c^)IaDP5tjb85+0Oj_^#mFT^8G#nAqa(nIu4Ek&s`MsE|x$+Ac_J;FfY$890csP zz`%iY6dDQa!h=F5f&;beuv5Bk@#+j&9*aHVHXoPFs5bUOhC$F};E7-&$G4Ta) z-se)X>U=d{1K>Z!cf==={06ao$PbK9w9gYeK>X9=6ZPR@2ZVi-d?LSS>;Ui&luy*R zj$K;Bm&+&mcaR-0`BC$UTnPRL=bk5vqyS#O?M?O){1EMAoZy#eC;JJ0igt34;J0We zPZRtY?c^}QuhCA9{?J?@8(iXIYS}S-yq%ZVRTAa%6+}&+a4-BMdh&$bHhMR9YleWM zWpRt&38irXAfsY^Z$;iBW;xX*h)LBXpxrzh|Jdi+a- z--gx5GJt+~`{B(gt2hkwpTE67IsMDq_cqYEjFuE6shxu^2>J5(w{}QWcLZ#QJUM<3 zOn0drRsYS~E|93;u7JE8k(PIXWQy*GbX4ibcPIaSe>Li%3+bJSxKnzR8I?VAs{@62 z^~=`v@$c@iZ`EY_T#U`eEj{)dq;!bzVR${EsQa z46$_`_r%R!e=A&UI8_;__`RO~xgHkjtFLd^vp3H7)w3KJ%NG1adX!qvZMa{5et-G} zdV)zDO;0r68&nl$MqrvAOJNZp}?|Xjx0#t%cI(o+-JEB_g#gj~)+( zd*l6sr-w(+zI*=IA0@c!1hAJTL=(US)jLZBZv{X$aYL&Kl}F#5n~{P1#+dj<`OFpk z?b%tZ@A#}*OYUz)O}5;z9E|B#Wjkq2NcM7GUR|E+!=+{B{U(<AidR;TMQ59Fjby zxye7~#chsm2ukCIV2DM1DaV~f(ur|e7UP^h47*s*v(mAPKiPI+@`+^x=+xqqn4Hqj zXagE(e=T0%VD7kYuLuQ;vrQ=oNSFj=o0_+reKbNkL+So z`LeVQg$v`A-=698$j?|qw_kuSTh8ulQ#Z~L2k+f^1mgwq#vhW)ph`DI zQqCJ-cz`OcDiLX@xFsYR0uL?n3OKoSS%9;I6Eo(rgh80?w5LuX1Ms;35fn}vfi0cJLN1zx|P)xJ~<;h03d0si_$A{14OBCMWY9PC3Yl7*MYrw_ma?&67% z7%Knu_?HhSQ^Y$wef2toyespFrvvlC61QUh7OIBAcx=;REXzcnoD zMYCMFaX$^XhD4=j+~|y}gB@gTJ1AxX`{J=R9kz0{2|%<$*q{-js@)ujlN;0zk{r#T z8}`F-{Tn16TI8ZMJ**kpDub`98V{u#+Y8|U8_uh zgSi;B1!?+izyEI>CrU?#W5w-w|1uT5->?*qFKsiY8&zqu^UL{iDOW*N{+SoAIAa}n zXGg3Dz+}ES$4xMHqfgmtQdiTblj;`!Km-0SP)h>@6aWAK2mr;Br9dAmdxKsq004q- z000sI003lRbY*pPX>2cYa&>HFE^v8c&3y@!q*ry`{ghLXiEM6JySj1Rb5qoYpS73fJPx;BeNJ|Fa+E9Ku$2n9C7TJ#fcLy zVTn&pAmA8c%V0YJgB{0Fz=;i3?swmNfBn_n5=!Xgc&g^r+n4+9yYJrn?tAUL(3UyU za_2p>r2lpC|2z19^*=a7KG8J07EN2^+ca&H?~w1(f7?6yB;LQDWh5Il- zz5by1hNySI-(n&M{H@~M2JiMp?2bn4&PHsm;oa59u)7hvrxCli5v#helf5?>A{h!= zDaa7~Sqj?FMJ{r~HvWN_e;@pF4n59*Yp3ZBn(oA?0(RSJfPV&pjy1m<$8qyHX}(LE z?~>-brJ4QYA23Z0$NUGie^Z<9k>-1(`Cc@@!}Sl5f0+CuB4lA1D6=`6tOg zMgC1R*GAKQCiBhYkCK0y{4?a=LjJAfzl!{`)n&+737IVrd zbO-fL571-n^jHTy#^cDM#@m8!_0x+cZo?3}4TRFj)0nKC@ITIYIY`q(G`)qUx6<@B zn%+*+J1E#m`JA}B$UjHHZkpqnzmuLJxYaQjj(e% zm%C@V8s?uYA*6PNB~Kcc6` z$M0t^Y8g9sUPs>h;6uoZIxd`_c<9o&4or^U_29%92W+WDel9&EKU3o>bg%r}?a9yN zIrZ~U!Q!97PB@4tY=`5(apCNMgDw=%dI5bbpuIvGP6iGJU&zAA!TD1jdjXWY@F(zn z6V6!#AA_?3=Y4SgIh?P;`5$n)xgUiSa2|wHg!3vSOv92InhqUVzhue8=HT zz*&TIniB+?*Ms1ipI?dg5)AN8pc^U2b{h66Uu@KJ8+>DA4K~|POX=~QjP;d++4v`u0JP9y*;GaZT#o9>b?O;-%0C-MF z@1?(CT3VZ3$XBZ~Gct~0RPXg}`iN%p3kx$dz>I2AM$}=sb~K)Z8P7O7D@XKrxaSy^ zLrg97h%C4qQ#nk$!X~GR-Q&gm^vZl*sw@W$5J>(KQ?<$_z#%Dc2+A`v=TJ>npx{`0trn}zN?5a&hDFN2;ut&3 zI4M{R)h!K!>K1@ulDVo?6E$a3znWlR?1Tr`1L5${iq{4Q1NJ)LbfR*!vadS4@34PR z)>~CkwyL6RRYloU=-#TLY*j_s)Xzf-N)6O+01X4FZW`iLf~CsL%=yMrXyb+8+7rVF z={&{!VlXo!%XMZ(Z802F$X#71g^b)XyA!2i;Ps)S18}y%+0lgMKUR+g8YfW!dXluV zb~c$#rdy;ZlIe+jHCRtCnO16g83?rkjdVcWKoMP`nxr{xO)OnaEH%|^k%CDnm==FZ z{2B4Lh`$w-6&;BdT|`bFQADJ23z!_xOqoO`9XC|a5j1m6FhPvV@T7>BpC1)5kvb+# zF_Hs-s1-1qpS>E0H@wQ^*AIc6OdLwH49*$;xfZrY`BWIIaHbFj6*a>u+YfD?AN-=b9kjfLDvs?5+u-tt$DQtbeE65m$b`4H8UC^g@Ldgjw8!UCIO5>~X9dK;X< z&}1mw4iAMHp32*WTX3oJp!R@WBm_f4!5AlQn4$@Q9SEcNjx;S@K?(RXwDcq~RhrN0 znwFjxRC8td7MBr=$`e%i%SO$NO)H0~wwHJ=Rz;yQphZVuKBHwjkc%TEE zMo9C6LTF2+g{t6HLFJc{VZIXTlBFd5G~Biyb*Qn)kLb5iPWIgYXk8*tbRd2#~QGD8R;^H zAvNNLpxXU$vK=v8*OD|@JBq;A<^4j^1oJ?mP6=bG!;F2~F}BI_cACPkCgN_>G^tgI z%}lZ8!hMe$&};%oWJloLu^8qV7iMODhN5kY3Af$GP;mKwOM?lGwyi_a`1J<0hiYAF z;7Jhmaz9NKO%^je+fW%HI%`o5NkM{tU^}3wA{EP9NkI>Yr+bnc7 zrd$A%i~s`=`Jt?WLUC5sg<6_G{vvNedr^1qK^33X+L9 zBe6?`&w$8(4 zs(F(?DQrg>r1QD#pYu($FbrO{-g(JZ2(R+F(- zlcGdavTQrnkKXtGo|q+@?=nJgHx&<2*dy`?vz)z>XPdR^n$#?k=KFMTHD&6#!_x9` zKNWvfYDmRuc)ztOVhnQ<)i+o?FC288YZjH3EtdM(Q?$vpvX!5(!U2f?ToimFA>*87 zDt?T@L0z$msht;U)X&u5cB*rKx=kzJlvcWREB~pvl}|RcQvRgH->KCsEp4?|GvcPj zZIjU{f2;OapR&P>4+&Rvuf5tr)w4F{Yzx&}q{r;vO7%3ARnBda^L6&x`0Wz^l*IQ5 z&)kM2)WCJV)7J3ZLB-<&&z2ZGpF_JEE}Wr5;L>8?!rM@Ws$VuW>@+oGVl}*gk?IHLxCe)*@a(1H$E4bHtlHmmIEZ=W?dMnF*G=rjxvMH(9(Eh?dvF-5YkM36 zgZ?MYI5J3euDVC+;=SSb9rojp>-U`-rsPZH=9XPo=XTv2rhUd4ga57`***j9Y-|L1 zmxH=@`wX+_7>Q?c2qfgs!Jj^)00Y%0xSE=4BqwkG% zb~b@Ux}W9`>3FMrCc!@G*h-?&v3E3O#H!GFIrcy!Ru!GIP4X0=DFtts5$Tu%Cgy<7 zQ2lnIF_1o;r~1Q01A^>nHB$?_YdT&Je4&f~{Gp z2H{evnp<41hPfw#oMydpCvy3spF1@=dL&n>M6n|rAqgWVkKlOtg~bj9G-JV(GfpoGmTnX9ExYl(EcJN{gk6Unu4mMB5RT zOh`1Fekcs7_mJUFXH+MY4tlN9T{W`D6zT;>_ojj52AHmM378&Hyvj2buqLjVCQ@If?I zrAjy~Onjt(lF3BNdd{VEG_S~k*%97eKrJ&Xnar#xIU@$-7|_xZ)F9WTG<;P+LoXc# zkWWN(frHMafnZw2C`CgnQx3#9Z-#RnQyXH0Pr|8fj`)Gq^`B9Ji&dKJWC3c2)opFH zyG4DQmd}v2RYKkT$%<|b{)1MB%^$0ieLb9iP=u>&^y#*fk`K8WJ`upg-)*Puq^?&= zpl181b=1zrayQbU&%^P{pgw|W$h{#Gu2YU z^>~i-0mr8^M&xHni0SF$Y3RjRP^rtg28+}zQ?LNeOm97{k2&hX&EepqRiF3Ng+ZH7 zfVowN8})YaaKR4oaPdy@aQ$7Hw1bnEEu}8!{_|OI73w&mt~TvVp(^ z87=h?tq#$Aw@6ST8d-&uMD<`(8-=fr!hYo&z=kHiAquxdzO9jOTjbl$z6=f3w}4yb z9N128mUEjORNqR)Prwcad3Q$Tb5Z$Sjq=+}dEPPKY^fo^P9JRh;t zXyHldkvH;empos#6d>S9XrOQ&4*KoVIR?izQA~-{c*8*?4zadN=G|1^LABp(3HLCY zomt(YVTWTXF}kboqJv6kc z1~TC=Pf~fUBLf0TgBx+EX zJS;7a=#p!az!vR9XU;N&8Gz-(Hh-s>v#I>GySf!FRA_st_5#?QvCH43I)ibaIpO&g zCp?B=$ZXwKIWn>7ab9%$sm^>W z(LJbS^#J=0(p0gFTyT#Ws_bwr%PK$RJQAJ+zs4LEMtUY2<#Vhow-H$ zIV316A)b||kj4Nw4U9fa4_m7{sPY|`U5-4%*v-D`x0)*`f5$C9h3{1H_o()TCIEeg z0E9gTKrgy4IpW&e0j#mhZzcxT#>3}vTuU^@!Ty&V$BD!SB;xj^kfa)EPl&DCH+##FFaCh%9a5*60)qLh()^d2PQQlzt6L zg~=3))2k^d`bWuCQ7~b7`Yz<`!GTt_FZl`T9XJi`{uWZ1kh(UJ<9K7kB0WOIKZl}( zHwCn^FNH!FywwUL3adF2X(hR113e zw<*p5Ln{~3R4)exVm(brPd_R>{csDaLQfOY(*z2v?dft0dYX`iM_ztVD(m_|sU4-f z++s%e)|STTewbphFI)Y9^kqOtT`d1F#ovtveA;Y@Jm9}dW56%8a4J+yF7!;jY8br( z92pGLk_Zb+P>Y7XEGoHm>yuXMw4QpOuC5eJgww&7W!Ol>(9{JD(spHatVLctWBXp`A^bB3d$WxtLPy*aaX&2QX(d4FbV zEj1`VmnnbW;;%YngY{I*fIwLqH;j>CCs%;nRQP$8ft0238((8lp|hQRI6^G6MUn@3@G7#GNUN6-4`U$@472w-I5o+((C%-T_1L z3r{YVd=JKm-WZ&RjhqM>&wG@#Is^rc0t6M$mKL8V6ayusuaZ8-SH|<_9)T&nmnUu7 zA+oMmAPpaHlB`jwcNI}UtTjTZ(r%bRKn$xr4OS z9cV?vTw#G{EAYL@AfDxMF{OEXtf)~FpaCPVdvAdAVK{f2taF7LVBx5_lV6@d86!GS zP{Z5=i5{ij0oC36#Ma}GleS*w!?JLv4ci#==@Ka%Fl3ViI-C}{%A#ly=RP=3&rOiAW=g+$hWl@LF z!?bxP%qaY4r~nGzBu_?%bcVt45$_$OlpOL8TZJ4k1Ha?A z2LNp`L%TC+6kK&DW&-C>Wt+>hAIeGpsu(5?U~F<+AH{0BM@bPY#`%rUOUz@N`kw4IXYM0;1P~r;my!Ag<{$ z1E$R2hGm9y8JHDsq3TX^jK7V_36W>S+h$l-7YPO&*vGsJ1pD5fFAa-~}a>aA4!L*3P}%U=@h zVpfDuW|;oif(Y0uGjp0#ieeeum^cr@sr=j8Wl{NS_2>WMI65OsDtQ_PH zDL`O;J^NzWv-vy4J(}`uJv^}M!wx#oO6$AxZ`X7OBQdZ9EQX!hy+2N;zQvvTmd)R( z^01=}N3bfArgJH(eBFs-@Zef3?vUU8o?A zGH?_kbW`~o4)R3k6Hnu*;l3(+kOnx1Tv#pd6p;r&>A~bR0j2x{XR18pGUhAW`1u@F zKA%vQIrF>Z%GOd!&tB{#{vM zy`oR=Q%aaVs(oCKdhGHqN_wJsiOnQ%44XBY6@`>d7?e;GAvk#q2lZ=LRGXn5s=y$W z4=5p+*o!!2vGWl$WtMAYD52-+fSRYF1b?T>|BIpZ`GlUUVX7QSZb-2XQg}#I!$Qm> znmnIoqSH^ceY*JADv~C493^CmjTw%D z3t}{Q*z9!PXW-<`#$2c_1o@@dKK^jTMo*h#eltKA)2{)Mq~ zkD5qq@p8I35s&?lDLloI5(5T6mQxcCf>Bk$QpD8> zXCZ!|8eX1vAaMQ}`2b(Sl~@V6K+F2wjL#fh2fTTCj1R$G_waebdWgrr4;TCpSNol>)+Ha#b9CjcosQS9CZ7eWs=k|7R-U0sYv@pSYy&+dqlz$Ev5Nt9EA7z&>_=Yv5*wAL|5dnS^VE!&Z+R?X6sbM$m znm$8)!c^jl>1uCuBTVVS2i4EOE=_EcV76IG)`~fsX;P=La+clxZ25LEzgHPzBe$ZV z?lvQr)hvLB87w!$K;3rHY5?y!gZrBRns;-#gcDt)>*Q$%s;mdmyVigR;OJF0<%&&+ zjv%9}n)AHNsQU>3*f_|jZ$*qwN`NCEItxv#EoxbH=FH(Cb?D5*AN%Nex>!C>{1u>z z9#?>>E+7dR7szIsA-fTY7K{_2iVW#{Nw|A|5RsWX46WG&p#L>C(=)+opdVQkxr&F>6S>L2ih$PJur_A zgsJUUf99^hy8-#=my7XQCmGnjtB+JXkC}8EoeB0dhHbtg*#7!Z5FY)~1JIEUcPP zykQf&UMdXP?BbI2JN9VJ)=lZBvPhlb`e$bDYw)cKT#c13i(KwI|IsEcB33x1RLT(#da>saF<4MFZUC4jKhFZ**)q`5fD+bj9h@d9X+K>1c z#E)zq@?VJa!AkgA|5^zejqELzro!^q+&zBYtlwBaH)(cI_)<)i`M>JlAFuD9de1>% z`{Q@=Pq^IFkK4&_Y|u$D(-&;mgkJtB36=lxdil+l?ByoG5@%zVW4%P%pk8Q1830>~ zsk0h=HBXe7_b4=#md7KoQfs&A70E2!*yTPsERyX=Wr1zsS#^qJ9!bHTdTGE+AqudLBDLNhcJeeXql}Z z5cArU$dC?I-ec8HqQ1&wU|Eq7T6&6w=Uv0l>Q>5!EVP-eRxKskBNys=tF@X`B5P8& z@UW$BLBc*dp>IT*`XX?ZEo(|&?#!%x7)}XetLk)f*kmtX<#KH#fy_kKlDBkWe0z>NeBF|1HO(5zT(Jxn{`Bv)SLZDIneI6 zhOBn}46wgd4#L|l)e8~j8=IX#qHzR`EJ9~w8Oti!)MIH(#u>v5CGDP`Y>Js!`6w$O z^T=bwQUdzIQsTiv#Zneh`bn)(b-#HuklW3pfjp))8pu;xqk%l6H5#;li5+Fmm5ZrE z>``@!J*tkeN7Xs@sCv70R6Slhs$Q=h#gTNMK9bgl)LFcmI^8C2JKiIm?hserFU6Ct zS$V27EAOjj@vLp67x-(LXyI$4TSOZfMafF>qUVd`l{tCZQl6TVr!D2ZIZ^7% zqjU1UrMx^Rc4hMXoXCFWZ3%gz6e3GV`l%-+{;4HmN4Aj&nTtNEh85a2S{+tw&Ijq*swhJ|=VoxM<`fa?a8 z3PrBhFUWS9aode?i>xaaQb&fPOJCKcg+eHB@V-M$a{PDU`~{r9g!5nFoZpo779YHYhRpQg{DkIpaFk%wm2i_PECequGRkWMc!l+Z%t) z2R0Z)$_%2U(6TzD?OYZtS|rM=N5NKZ78YD7DvXlL7Yz1Y6Li&^O}NRL@4zUNKARm%hd>Z_&mg z%*_nJ0waYt0xy=25r3ytd~Ru4o-u0C`F7%-SqoKGp>b8O9q1PqM2Dze5`&UIO+!N} zU6hLbT%qV^8HW{kT>5 zgU9y=E9xm|YQkgvie52jw9x2fjOKjTl%^yUneQkFvQE_C(5F$ z`c(^a({ARbTipxk6d!{NxjN+z3%9e!u5Cqvn0xieq;|jpY5?o|2lDC<>=L`wRxxLx zWT%u|uxp1@$=&85m@c7oqc&>kj%pdT`8$OdQ`T10BNcsgeHVJ8!f)h~?dwbSsoP60 z)rC*5>B6UN{!XoStglF2+q>wj>%u@(*mkCPzptK@7k&n5p^X-^l+UUcezLfX%vCny zfL-Tub21G>0JhXKXJMbM{-gu+%Nq1^5yQZN+b(VgRsO=|inEpHB;i96_1uQdcaTCv zyph!_hl*MCKuDLo0@JB4jH81apsFK)_?$C^gWNXt(ny!QG}0wVwMA`4!(s*3i)XO( zc>C6lS1}UqRGHi<^rAi9i|$v_iwmywqT@&RqUYs%@j>pzTU_u+m2bOvoRD4VyHa<_ zdh1oMEN8U&-H`5UziYPC|FzyS8~;`QYc;#saW=^oDz#IOPjQb5?}VZ zTh+esRP*LwTM)Y~edeL+T&k3?1FgtDSRIs2uFAVo2`}a4q%v%Gi?`;~PC9n((s&gD z_QU5Mcxe1K1V=8Nzc9YOD3VN3bZq<)*@cJJYL}HH@(($^K zIyL%UW0!7ZG{d_!C!FQ5aDsAmDrFf7m!Z0(c4Ta_BJg5=tqO;!4yRNe*}Cz@w91Hw zJgWFR4qn#M3}{32@&2bw4Q;Hw8t#wS((Tbi9Wl)3qo;^&&{t?&4Ou<@QVm#}9Hm?hN&FT$2Bjh+|-%v{)&MmuIa=(kG&Pj1KUxvz;SaQk zJmviroc|6)h%0H%Ph1-Jm?AVu-giC7vUgLqJ0G1KzjX1zN%f9?6e)iyC+uDy$D-02 z0SdPQ5!+nr__;}Le@!H0t%(Qak8d<0@ke*oM|eE^JsaropAf=W6XOoU&;gcOu zzdBSHK4n()%#2@}jsC1femT5as&G)fZ@r-Y7Sha&cRL0tHZZW1_V&TKvr&iq-4*W& zV*d>sxupBANlWaa1G;eX7WI4Txc)ui;oVdZ7P8*w;ruq7FTnX-IKT4>aa3jj)+IuO zl^yRB^TDio<4T>4{OCVc;{6bgoK$1Z#Lh*Lam<2eZ{D;DeLt)Ig2?Ui%#=*8OkDJG zk*!`5+R7979`>EG?ugtG{_M1NAXm#SAb{P!W6bV(cmIp@?*8BIAMg3%{{~P?0|XQR z000O8w3S;pDN^Ew*Bk%<<$nMG4gdfEWMOn=b#!TLFLZh?aCz-L{d3#4lE3S(z$ufl zXiM^E(>}M#TpByA=aRT%JMX=V>yarDwpdf7j-)KhH}}8a?gAhI5Tv9u?Y+(%r_)g6 zVzJm=>=!_|-EOzuCbMN4-cNGY8GGztZ||@N|NEA)pXOQaKPK#B$ftbCQ})BXc=GSD zeA173{<_t=nuHmfCc!-7EV;YmX~y_d&f_dh;*8}9%O{*c%Q*1UfX$ZqB#GHU|8S=j z_&LwRDVMGLtu_!e9QtzrNryu=VC^-jz-Cc6hEBO?_siuw_Fg<_w_4$JmZUjT{i)ws zp3cX)`enRXX6iP{TCJ2vejYyZfJeCx=szYgZ_WHP1A^qQG{y1DA2^68B1*2?`SUy< zx4R5(fIS({#eI~F{Yc#2g%KBbsUP3NZHtu`JOw(%>#%6D2;<_Wz;w^^{JDM8YUSzj zm~h;s$&`jY&%>xs&`b>%e{@-ip~auZdRAO;3;bqqG+U%vmLGl|8;SRYs9128; zB$$uI%uPnW@p0ZSpOsRVGHnq2a2Up6UJMF5=`zWbk~l#OzJ&hpAM1@`nM;i`K2G8w z8%U&^>70(f^~b1PIkvB$dzv~`^v`6~z*M(CU&~%*UG_2~UF*~+TemU>>$)wOce@f; zA3s>IWw!m@tE0WW9pkyw7H=~>C&OB&=EE!x*3!^k@%$M~vqz1?J0>meGD4qUvT_Jo zxB*%}%1f*~%6J_njodZo3eYHc7-l##jCY#Q1x(pj0b^2AY2l)$W(IyCCA~^VD@ogM zl1sHfsB4SD8*SEzhYmC^*grTM`|(!J<{3m^4-|yKoa23VmNN+F=4cB>5YQ|pJO*sz z5Pa}Ga74p_VcjH|r!3}vigBLL#uJQR5W&gi6?iS2*?p~v-dZ_%P!bxCprQkV=q~{w zlH8vraXz6t&{+BxJM3y`>Mm>?#`Bz0E1|q}yWso*kafH++v~F4zUOJ!bpFyTB$5hJ zAsU$qu9j`I)>pugaT0LmkCI2umLZRVjM#LSL{YLp4yHVtN7}s8+P{ANx{S@bWw2@h z;GD{z`^rdo=G#h0)xm(id(hqgrh9PM6_#X>ds2Dy=375roLp_Ki|$o-Zxy!7^|7r( z+-B$wzT#LnW}T)IKf|2D>EJ_(lTsFhlM199LGlvkkW?hGOBwXpK~q8tgISO1k+jFQ z8Mz*}$J$Y|2EYzqHA0CA18}dJpu}AAoHofZXLd|CRL-FF!V|E;7#+Bx6Y9)ulHd7Y zX)7U)ZI`vMMSEBu?C6fIXjwk2u;eP8^S^AkJKUOeGP|Ml;oLAY|CtxLIk`a z(c+Ci8l~a^uvXmMPxJOoO{}hsn8y}sGF z$45V!W6E$}>De>jKxYLgOnj6mQgvL>si^PIW;_l$Ahp5Ej3QStid$-Z>s1s=1iH;Z zE<n6w0Jv;;JmLmui8 z{D|L!3N6BXLV@?xzYoWlmZbhT$I5j3>T`J4#%v`FAOH*RmS#pXWq#JwcUQ*pM1a;b z3!ichsnTef1A!3b^Yjs5Q2p=bFn2f--Ow1`%+nsW7QQ262_%Bc8D~)v-*?#}4f7oE z!{B_a`HvcNc!N8E)_ngu8fH-$}&% z8Iwg6Qj5hT98WOq@Ie52yT)D-&5Gf)^VE;CkaU*9@V3>T-ZWe_NteGp|R4X=;stfgZRx@Ia$Kxz#0 zCbDVIgHTy&1@al!%fw6o_dA>CNO!48T60lUGHsINl*<7TWy2`M2(cokmqCQlbD`C2 zlgp5=M6rG3=}y+|6AhBCZ*;Zz$e(g#05=o>)Yj<>=D_oNV6e%Z^n1opKNn|?8NMZ$A$wbc@E+i5CPA|X*jYbDV8y^EE&V}F(@dUZ3b??2j-JMK}$

J-^sj{jmQ5wtl?{UwcoQV=Oh@T6E$^R)^b!#v7|c^@M$owIl&)d_VYD<%t9ESn_rC_oID(!!8gS@t^gfNgrd_(B_u&~=~? zrGlmxW{OaEBw6XZWjI9~YB(;<)!|H#{6{24x29HfGS8eFWvx*_nwAOW5Ml$`k{!3D7a8boE2nWu`{V;xyn$$?`aJS2Z#>6;`Mwz2-K(1csx(D@1^F7*G%gO zbTkJOU4~p*TO+KLdOe#(A^Pb%P!U3asH7RTB9elCT4*36ifTZir&&vhPIqMqEhhzC zk@P1E-Req#nfgP-*E;ErY_omftzm8rEmAGNtHfZoB9&3RnFeUB%7maR^gzaoWLr&R z_q~}+F3FfETOyRkip?8bsvTCd1rnaYL=5jj(Z3?uwiy_HV=glW91rxUD-5p4LjzjY z(~YraYLu@hI8XwJzJdsK;Ft5uvw!xod>L`4jpFfRm?kl6n z+0CH|BXYW~RBJs=D4Uw9@DA5fqGi{i4kBSJv(3MTDrB8wb1qEOrDNN+ZQHhO+qP|= z*tVTJwrx8nI&tQinh$Ty%s=Q4yLxr?UTa-IT^}4pm^!$TH?%10kehVAvPb;8Uv(M% zM)mamG$kC;hU9+@iToE=(tch!c>f!AmV$!=iot$l497{*=YYX(INI7$isN+6Ju^7nM^sxO|yV6@8K~dY$#oE%xvB_S>OKb zoyWh#g#PdCMNYP7x@?@)mVVQn&UWfM4}^ccyStjc+iO##PQd??Cd#Ph12pMarNYwx z*$h~W1Yj&S$LQ2EGqF0TJDOjF{OZY8;U5ot^LAd_eSfrebf`}(_c$A2LMXN<-mqW5 zDyaDYrHqSg=f^TJ>$07$$klLXGB&&B#Nq#E<_qsWG6Q!nL+ zg_jFGsAc@Xa?!@ACB{lEmA~K^?0LPeEc?u!w?I*3`poZiU-Fz!?XIWB2SgCX0{;IH^W%u)g^QK@?>n1hzLopqb-6@X=lesP0o=0DD zkdYz7r{ei{Ph3@)%^!O2+b4zj_^(_Wj`?8$ z#7mkSgxUnI`<}T~Tg->wS)XZ=?e={$e4nf1CXu9cHMLH+58Wka91A6BdXahUYw0VB3wLBm z)ho7enhZ(yO(a&E@Ehc@&+mq~E#u_ufg?_$Og?SXd%0Kmw?QiP~Bi z790zp(NOPk=&24J%BT15j?qYiF zZvFQ~C>#oMN|O`KW4TfTIy+k)6O~0E=(Q~#y&-24gYV_F1{mNYAZB0Z#sx=KQt>n~ zTjB227Sr>p1ru)A79ZI5uih`#+_PShT8I&)+nwXCN2F}k|C*(>yFGb5s=m31kWtwd zKIL!k#`83b8;4py*|?y^5d(XD3$zT|B%$; zB(9WWab87}(&hTY*(z#B?&TEG2aeBII{2;>?$;S|7tfu_M6j$jZz z;EM_~locDpYIYGB>gMEoKIkOs2on#is5IHA%@i;S!WIBrqPg`&ehaIT+d^+?K&AhE zj-7kao2y(-Sln>K3Y{Ni(^|uIUSPe)uuuN9{pb9)~>d z;I4^=lfK2v{!6QWI?1En?~|;O|B8M$vZ|}lK8&Wneh{HVoXTIEK=x2jHyT400!X(C z=PLmj2_u@g#CH37Tt;~c+WPdX)u&F`Z$9|>73BZkEy#C}OEx}RJBUL+ zTM6j30n^SQX+YjS@GPqjrHpY@_}7!;j+E*vROdmnn=V=c&I@0;&<8?B)RQ%mR#=vv z@Rhaj1qk5-etiQic1wVG=*?J8KeHeT)@;#sgiiKfeDI7+M?>}7l=M3D4^WI`kFX!;8g>X9>#Mh-A zU1k^Is_8;Q`xwaJaPT7{`V{@(47@WNqZ7xfkaQC}f5NgIw`t%;oj$bZzb|JD!_-R~ z0xbEngohE~g)3(6 z=!hoe9>DfA`d#%8!*^FUD!B<}fmA8SrqV2O)gFUbd{mHpu)mEDEBkj=$c zDiVqHTyq4s{e_j<1Q6_{@-P&?s~UtWf}l$LCwdhMXiz;U9h$-#Zr2Q~*WXqBue3@p zv=JWLqCH`9-y_=}#?|FuYt1?gxLsvRMxAQK+B^?m?F7YKEXv;&2Pw?V z^Q1ESRvp(`U{^0)h{g)>!W93}%NwUjKEqA53y7N_?!d;^3DKz4+0u{7#B(g=U9l9!;zv~o!I`OoL|+IY&h9GJ@s$LzkDRq{}YZa_K;JZlzz zy$b2dmN0tI7W*1Aa8LQ4nw;WrUZD3p#!d;|kuJ5W5&{3wCc6ansT*E@s^EK}=D%x^ zor5Q9^-~%pUR$>g!~oYlL!w{qkr=+PkaHwbs!FZ%05m*)(ej4aRYTi3#v*X=U)uJW z*m+#1TWT*%Mkl_rj%kd$6Ibv&z?FMNpni1}&Te?Nvs1Q^3?{fOU^w=eZ4QD=Vs6_-yh-YFLL_sN>O}A`MGmx&Z&m!{8I3lsF5~-cku#D5uqJ8$1?tN;i$T__J{LCkE&5n$ z+2Ird^>82raK3CS2aMfB>PDa}9u=APt=~Lel`d5mMk}S_ypDDYJ`_qKzGMTd3J@SZ z02R1@nUg;9s4=N+B8bK!An2MBFv)(tgo3%>=(1rkIM^7Ih2axat+)I3c>U9$Qw2q` zPs8-h!)Pte2@+-T7}bfd<8MASSR)$)B)B0hjnz?^D5vmEfLl5kCl6)0)Ju_HsO81Z zHiIkNdV5(GMP1@W%Hatc`HZ%yshFyz>Tvwk9;n{d-{CPoT)Gu;r?Er|cYB=xTh(TS zmnnL+sZAHT5r$}XEF<#RZ!ueQwr6AfWG60lS0VG8e6K3og}>n9IPKBESS9Dh*K(6s zf!1D9Vn&BIKELN4UL`2=FST{edlo&^*_^&%ACv7U3+>&lwTO(e`F4Ks!fpGI$u~Hr zU1fhbYgd!nN$g4=&$M8-Xu-)SeG`s8`_v2j4wtw_jj#K z9zg@OyYW+;dYEDome|&-rs;J3PA7~qnM<-ya~ep&B8HonJO19YsxBkFOpPjh^L(+l zdwzu0bSW36v6858lKq-T7(a-lqoLiLgq1$z`o$c&T)2s#b2eMFxBUqvjqnPJ5E2iz zsVg}u@MRjDWQb^7*ycHsCFv)T_56sJL&U&Q9IyKo83!88$|g@lmA(3-5O{@@0Eddv zz9ToeRH3c5Y>{IN8lE`}=LrZcSqyj!^;YtaA4C#9r#Z+(289h|^CrFP56oUwi_ zt;ZN4&-q7_^5N>~y~i`NLi3(sPS8$@{BGelv(y&xX3ciSjV^Dq?!r-8q}sg~ zOrT^JBk3iiM_@xBT|Oto1nb72qzPATmb8YA%vBgh2O@ zHG+)|h*xMFBWvIjS16JJ9?&aGNkYQk^6zHSXHD^s7rl;R**-7wSmIKKT&I((FT-F$ za`qI)Tl~@Z^qE4WkAF=^$GM2KCcA>M@KTc$bV*Ht{%0l#FSqZJh!upna6{`eBU8au zY?zYfY0jFaoaF8rnS)Xbg0c!o?~X*|)ls#iq%0|#1%mp$lCf_;v#09!Bkz-&JuRv- z-RW;Qb|=5VUvlWEr(iTvb(zZDFcMcTXbCH2DbSycLw>vR3|cSx7T?+@=bb}Y2v&w+ ze-DH4CRWcqE$lL4;`^W#0E-ept`kK^%SKc};+^C&*~Wj&h!#dn5s{myTo+=XK* zylg~d*S`nl>QgG$152awHRPDYM8dS~i)exNYUf=uB}kPg+UB33Y#h)Iv}I3F4Z@n+ z>GBA_Lz(ircrf~yRS>FyqaZIQTFZz$`+{*?0YlNKq6n+n@3ex?&yw1bZ7y=C*NTip zYUmI=47}tX-INN#yM;tSk08C*82VlMNJtL;f3}3$x;glV$Fx6oD$ipO&=~Wr8Iv~m zptVI?KSDdt&j?3uZ6qoqP$Qyi5DZkxe~~#+rDzH_JpLSx4I)tGpRc%o@CJ>4Frz|z zDsAKKqiAa105x}Mt)h1r&1PRm=*ouYuCCX0-yzm$R@Sd2chhqi*f-VPtvS_rq}c7@ z8YqlXo_*I`vmWZG-dz==Ab6GKB(K72<8rWhTokZ#a2LayCKeOumf(&VE@}uU*lQ5& zt1~t%wXNFN*bj6HLRUmyKHXf0Gl4w=Bj78Vujrg+vOU^a{e{<1b(6tw|M!ZBkl642 za_bpr8)a@CC!aza6b9DBqRuGnUbhi((QN}NKCw!I7(IpEd5RNawaoC$>7|5>PLcJO3$>Pi&1322kT*N`*i`;j{`b9w+`77g~kl zSRs*p2kiSgT4b1^LFyDGbn^N<&E5Oo`|iK*mG|qwxFovc>+eSG$h>K~FXM`}l6 zeP#p15!aL`)V z4(*S)gTE>)B7iLP7EsaQK;JqGIcbOrKf_u=T|Y~9{$yF{`4iD>gkO$>hbj*_5RGom zc8MzA5f~%JYU^a*Dr-n@@S^bF#KF9RR8;oRPIXIjX*W5wG!dhKwjPjjx#Vu!VWhJgG=fsMqc+YtcS^|RX_Yh^H-F+&qGB2Cj z3ym6ry!}JVrZYWo-}rc4>FrAMUmvJ;>ceJQtPg5YIxt!Ip6G zxGoC2&nXns6nfNgW}F8Ue<_CHP9romhHWZ#8?l zCEMUc4Aq^KH7yK0`j{G2n^Dn}N*<=DZn=z$NMT94Hxd^6*)Bdk4nD~E@rHO+jKrI$ z;s2{hmrOpmjS_>f4HLsiiwCwh{(=C!&~xJ_c^^n{vj zL+Nf4JxmX6jpMcv^U8qQi*=C8f5Ziu1>qsTYD~}&9&y13Dr2f6+W9vkL<~@TTtMA9 z`CAovI#Cxcfs(AL?e^*jtfT|#mJ5&{K`J7y&oQ~w__jG}3!q&ByjZ<3Dg-M5kgXv;Mjc&ASv>u?%|FJdUIuAn`ZN{9gy#L=BqLg$`S1;sV#tEDu;mC43dQa$60S6p?1e>#Lmmp`cyHfwi9(QHm{y|+If0YU%6+Ysm#E@#52>lK2t=Z`M zn7;N-HG(O5p@Y3C11G2!?vcWpkOKzmyI&MPP`l2@k1K!aKX5-CT?mQRB!u3s3P-Pj z%#s!Ng@*5fCe7>6wWS(**=f}4XWY~5qUX}qZEeWmP)oF8r!=+UrTN)^=SBV6t_L4S z6q>AG3tg&%_5-CtX1_z8wd?E8C1%ae&6U=edv2Ad+Us-v))_>~=8uczL{GJFaF?;O zn8$`+Bwn1!Cx4B3OI$f+>y#|;nT#6RM0?{@;zka7j0#Ky>&TP{0lo7jUb+J@cLhM$Npf1n2`m$lPp z6<-wPT!C84i`sOTR&J*Snc~h%u^_jzEDgGe4^WDhJT8-e8_$W|Hg{I=($1nyQ~DBU zc=wMy`v*RIADgr9X%nyy4hj$5jyegJcL{t9>3I^qB{%5HiPR`K*id>YJ)GStzMPP% z6M~!6k7Z&!m8L|K>W3(z+qVzo@PW!~?S^fCc6J$Omp8}2j?bN2B)z1UstSUwtK>)D z+UkJJnE=Ll zHYLtBsS?!WJi*hBl0W?<3%_$E46nkfI_*#Kic&5_bb-_pJ-&?tMCyM@)bP^*bpJbJK}! zUD)R+R_mexe^{DB&)*;v0Gr(q%5k{b)r$^11Et1DbiMF&inD!EsS>bs zjW2pt+#f=u>t0n{xf!tc)ikDzCljBN!ZZjcrJ6{lpt3_TqcDNQ|8{(tPK-=aY94*l zAg34W;-$?VvP+7T=PguOBg=FeiV`h6G=pWQwHtxW3cZo(sj=LLp0omFw<1%Fb2%nK z9L-HT^cSr1vXouRAfF>c*WM*1IY*De5uR(Iq`9l**PuZA z-S{!Ev#4G9KBkQJ8jJ96pETJ>g?NjZ6nV!ue~AC$2Ek{^ZBvW2?}YsR!K@S7$v#s~ zor+R3cx{PLb4|Ynj1x8~HOSl{4c3xK1NSx7JPRu!v)MMZ?iDrDbhW(^knD zt0Z;u(QM?eE;=_&yofrO4NieB;R8ty7GggEx4Wb;3RU(JE4T3>@giZ9HpelblQ{+i z4dU-4*vP^P^w4%-iy4De(2tf?)eGn>l}2HB&Q0vU#oT(Q$oX?28&dVP6{9{9wac{5 zt#J>K&6OM; z0cy3XwpZnq!4tNfylRqkX9!r!)q(kQJgYLWznO7Zk8C7e{x~sgA>A+^F9>7E#rfD_?X)Tj%SUTyhN=cGu#wL z+hbmX$suYUf5aP@C{}*EyYk>kEUJsvwbDDch*El70T@=-1d}947(-I2vqc}^D$Ggw zJY!EmFIV@?XDpC*2%0~Uw7_a?F(tys`L7q(930M(jodr-$3BtOl=6{g4&*y45^oXOnPD6FDR zFX7ogl;pa4-DX2u3WMu8hl&B_1(3Ootao+`;?bj1dAju8FO6)vhJWoayCs0A-EQ`B zV4(-E?wgQ%%26FNqqo9g8Mw+|+b`QGZ48`@y_-No_|0n-rwUtR$s{KwwNpbpMuCfA z(+8nN_Fv9Qn-eFv#@QDv|EOuKS)9`X+qRJN!|zvZ?EFAqR37&mW;(!K*_7e9xQyyn zt1-$%>Rf2{7V41_lb5JG{`YyVoqK@P`-1dgkxd;>U(diU1i{@-L&}~cob4`M=dcE8 zeiC3_btd)QL%K*yS9Zc1S2YLt1h;Iz<27bZX21TJIy%BON`{gy`PMNvLIZYwfrWjn z6r#FiVvIAe*6M9{-kwTP7?DOTThAzVbMHSQ{gXZm4Pzje`QKkpkpT*;gg8#K052`` z$Qt`*WEdVq7EiK45!BPf=x>&F9ZFJ$Ne5ZND9;S7)i=mBkG`3P=i&Se=0|*pHeNqz zCVR}70_*s1Dul{RPTv^4g(k7B9c#qX#BJuh+}s7b&ocjt&7x$EI-|dDoX1tm^vxNI%(52$O1IU?!~$a}1iZHvs1{H?iRM}dAn^v* zEVO_vWVI&^pUtBm>;(uVe1@3h!P*&tE@c-)vhwWFnVBBGbP~c=(d_%}UISx^3~*j! z^1@s+B_%UZYfpY>VwJ^)n36$e5IRr9O#O5(x;`(d@2iiiBi+C(WnMDEVu8Z)5O>;v za1abN5FQG%cI(9#G0sMycxZfCM>V^@0lJBynhs9W%pmcJf@alKEDpobKCo3u$e~81 zRf^d1qC_-ZxBQ=bPcB2s2{#$UNQX(bCW;6VVS*(!-a}vX>aQkzC`+W;+l)?JC6K9>B8TFW6x+Im zrz9V+!f#R;i1hD5nGaLWh<}|Sjno~ZLz9}kq`woy4g8yig56_X z-o}OQ<_+ulDi_|Z!d|NCQADTs&)UWTP2e(@Sig;(N4lgO{>KUiu=(Xys>0Jo5oHxG z0+t|+&MoinN1G8pg&UN;un*a0JB&lTrC%1EV>Ka;)K3{>uFT1or4D%D$6144Z{#lVtz1D7zD(o!s3LyS^4GZ1R!g!v zep2jP>`9w>socjnE_{=sAgV6*Nrz&ZHMsraaugg~H552n2?JID$+#uFB47^1ipcLv z+UM&4Qm_Uj+k?hgpc-BQqIumM=PIOjM@miMdW!q7FI^v)k-Xbqnwqg%fELf&hN)6V zZ1=qL^4|toS^2G0WG~|)7475S%||#sY+_uE9b9JAqqM3ASsg!Kbj+%r`aiJ^u;~=v~<>J}g|Vq!g$_h^79Rkjz+j z#=J}kLPEK^+9naVO=kv`t<#ZQCUY-5S|nvVxur6;Q74#SQNFJTRv53;W;%|DVy6MP zR2R8x=PDd<<59$E78ig;rf4kr_ z9C{_nC0*5n(l#s;!UGwzYBAbO(Dv$Yhken=cO|4SM_x`RFwKsqiN-%u&kax^2ltI* zo<)D~#J06iIz(7#YJ8;wAEtX89yYGde2thadQqxj zEzYdz^36Hetd|s;Ol0?h(qGqy~*0dOLz)$1E8CeO9T^Py6 zZoOhhKKeUIW!@8huHz`qjyIBm<{@Pi;qM1Sp`5RUlTvWIVQHC7)f3H5H6v-9KMzn7 zmxM#RkXiSo2Syu1l9J6`Amhs>qs%cC!FzjBe6uN#2qdwB4VuS+qqxeSB7M^&oIxt_ z3c~zF%4wHI&Gi}o4Frh6a_gVU<4<|3Fp=<7oNkJI`>&a&7SIozDx6Z|#V4CFHZ$!9 zOdbv8m1u=S{QsuxC}Vd^l>wAk{P0(?*ezEDr~xO!hPG4#B1Oi=+$c*HUC`+ZXu9`E z|M@DjFXh=X)8hKaM7)}ca$ZhbUmi~RPX_U>_y=!RNCD;)WTLI`f!mtmaT86Ni8u~A z;nF$t2yM#OhKdg+!F|F8T$^LuKgaYZlTtJv(0eE zY<3C&oFvjceKGlrDoK-5Yn$!VFs7h;(9$W9L7_a_Rr*OX_1di=9BN+ek< zQ4+b){cqvU)*=J>f`}bK%OQugx85H>NAwx&{VhW z%|(bgSv*B|pkowmy`WNq+bTSvKA_wsHf22b4`qXI=%cs{pf0-4!}7XK<@d}bB~C2E zlUGE8u^>Y8GK__--3^ZpLS#*)E>LA7sWLPbCcf_Wk?q~BjM|3FS+DKUY|+qPY$J+C z71$4=a4QafMtP!tS0k=`ZoXJV^5OemQNMsG-*U>?9EnG)-U~5s#(X5aMCI92#JM2c zv3pvP{8*7$`r-gQx@&A7kLG^m#C49$8i!*`%kZ!L2o01wCrE`X zN@gtMT2j|^0!(&c-6T%g#-}Ix5jHVTV#=iNB<@!D%I26v8Ph%rRO1EL>e|)KZta6p zwflf$|3wGDqTY2$4v7A|henx%w^$1#g_JkV&DDbL&%}y6Hm}rOx%3YSQ!c9`OJtle zDD#gU1oKDjqQX*V=FbPUV+}0UL8kPw*N~OK^gHydfuk51hcU5jE**&7FT|kQ!79=Z zfdW#v-6`NxtaYllT8;wusG86b_$_IQaWoH%dA05<$q}k&w{D!F`|GCd3lWPc zj2*SYW7()e%IO4ZEOjXI2CSg*wzFO9-Wm|*@?uWR8&H52sZ;aDQKy+nu5u2aQSiy>EZpSX}g{AE~g?)>| z|B74ETj8G$h{gB&(AQ(a_&*`jI2}m=6g`eltCKh$SyY- z#^}I@sohpy2oT`l0 z5z{b)pM~Ni^Vw5Z5Qk~E&T&s(rw`QG>NAE2O2_{Qc)(#=efz%c=mekapOpygV?>); zU)w>0_ZV}cmKTHGD0$wH^gQJ&4XmUWu$Q2Cbcqs*NNC&uhrC;IJZdrd2aQ=IcK;%~ zFYP&ClAoN38KyNsVteNWv5N1T?KFq#{#>)`sGR${o2GA$?N-&pz41|bK4_~}UmSR# z(t+WrXs*dDZ5^FjRt(vH^@eH(tqg9Y1Amb#bo^aZBiedlP`9bC>-H`r`?pZ>Fc_b+ zff0o0ENx?=7%P=4s{$e-{Ww#@d#-M?UL%NZI!7A(NvX<>ErbVnkftU=NNTvI_#siVetr{ zF9Uju&VK zRc=#!-MNK23K(IOUYv*f2YJqyM2sH_7h6q%F)syz5gMJMf?VHh8r+|7yE%93(g{xN zqIdSpXT!h#CH^ju2=zLNh0~&R;DDn!U?F+qJNK}5Y9V>UId`kcOL9y*unDUwBjQVl zYdtl=;lv^P3XIZ)6<&mU_k2+E6rU}WuihT3ODMmn^4U?&H07`mT@3biew3=} zhYi-My25{dxJKO3KUgQ*eR+U!%j4eDw{vH@a|iC=xjg3KO@X5QZBhS|oi;ooAXNER zXp~g%%W!8q(8;mGZc$f4b0B_^cMy)(PE_hdbDf=QqDuQ-U)az|nTANvr-<;lw5=%(W}tbsWEz^mt~ z)I>RFUHrb_cPW-F(z_WwPd}5mKdXvze|T60Ts=(kNF9{v`$d%NRzb%D3ISg($O9Z$ zYu{~4D>GG1Z!zGsVCju&t`kTW{zP{Gc$xg;jRgD~t1Dx>7@+oY4!0D%!<%T-)MHi3 z+7?Tc^q^!u+ElD!l2D2NXwDOxa@i+(bh;=^V^cAhwf})qzZaUE(UQn!k3glE(z;()v<38XYjH8FIQafM z7T_=I@z6v0)WjP4JJaAMtK}GFxz}t8JA1qmyEpyW3YKYc7pIQEs%8W&zr!C3`?u%6 zlU2NPdHB5UM=NhxX@{Nq*573rhijM*ZzKerF-HE{AC3KSFpG6rMpb7K z>?-7;VK717pDK(-;eVY?X`Yaw343>plA4TV`p`q|NXh(Fn3b8%zw zHptJ}M*llZtX#BJGW#LcFjLVkAzZ{Db-ZDVi0j?7wt#=vJUK_Y99B7;eEJ1vHOGjz zdyXVEzMvj`QN7`p`P-L}H#PpdnCg3fo&oM&3)dlJW~=a|)qF~1dmv>mTiQw4vc2=5 zkW{?R>2GJnkMi?A!X3L1|G+>JFq4Uqcd#ZrAAzEJtwW;wfS<_ zAIpy$nb+t;^)ZTjjQ|%_=WIp(fC&DCzRwK?ix62FV$U;hW)S5X+l~2WWE#`}?Q+tP zxvY8w7IlYot$kDwhpU$X@chV6B0mi2@W6WrQSpzm>?G4h)hTEBOV=S_P0PPRn%C{- zYkUR$GhEJZll{$(_EqLS#z}OR9(2>@>E)*nIlb+ z<-q$zriGic?~4a`ZwQkxR?B1NV89!qA(pZV^FuE3&s4;-0rN=_tT+AE0CYp?awZh` zUnf5XjL~^4W+0YxXVRG(HusLNYU<>x)j0L_gGqq_>t>VdNeD*VN24gdw$IQjI^MCz z>Hm<;`CX=VIq$X*7I#>ROP)Hz7!IgE0aOnS*{0`&NRKucwN_b&d}@X!R0)k}-jxN+>S3olkEj&> zPL8sF#=IBdK38dwb*KgL2Jgj6F4S3dglJNLDodDWx+*Mc1yOmDek(+*t*?^_Wu?d7B;iIp{NV#y!dz%`sjeb_7Br> z)r3hcS{;j~@zcG1yib{Ah?0Yon!;h>TMXSCJra~m&tx|%AQl9yxo^pD>T>?02;2$x zVTGk(lxK&z(Ys=i_-7lIIqlJ>!%$-wRaYDK>8vWchAg-Fu7d9$3sDnXk=GToe!njI9JQtz;8+Q}9XuAu;JpN6XF1YF& z=PydBwX;ab2>L5+ADPi!;FfeN(ZDQpN$l$7vC|o%~!QPI_ zX`2f++sO8^Rv#a_YG7yz6(=^TXEsGt)j8Mr+kYHeiP_^V%4zpuB)hUZ(sR5knom&p zHw^~u99kHbPHFCv+nj4UgK3Pc!ZMr;nn$h7)+MrGA?ai ze zqo0eD^S_@X9ihvnI8S;fReb)6+9`}+I0=f0FmuqU_fVK@mKCN!A_XoIVTQoB{++-V zK53kkl97&dX}}*`IEV?dWu$E=-W(*4D|{~ouxmqDBp_6W^WET^cL6QurT9X(uYkSyK9WHiG!hnWi}R~ zn4x65re@OUeGUo{_pIVF{x!4<{`2tt^;MI{9TM&27OxU*&09#d2V%aK^*S05HOMXc z$`uTrAZ^4E3#u+$wA|nIrp++jxEnRzJ9dWNNe{mQU&g?BrcT`pRs(M5D-aG6V0$}9y-{UW@7u=y~Y{+Kl)XW=0C?Rf+^90 zm&oOFBf*N|d%L^-z3aE1#`IQ6I@xKa2UHA21HH$T00n>kVl>sFR&+vi6XGM(N__ynPE_pcm zqSbRN>2<@e*_0(3v0V-j>72K0y1=~&l7GTuj=rQl&BRRcEL~c& zmb7Zr2sQKQc!Gcmv4`X~qE=28O+P7AfeYb2m@!hDE99xyjh*BDy_eZ)_SweMs@;KT zaDdp7czs45gps&Fd(P;QOfKE99Qbx`bk9`aX;k5_Fqezk{a2YaH8fZqG(_|<1X}m$ zapj|KtdG^5o@YM<#B$iJnaYl&G0ivL8K5(vdK^_~QU@?K#QkXprK%S5j6a8DaP6qx z<_Dfr3pHPETNiz==XwO4iz;8PWat7W#)we zpSb^{MgQAqrvoPq+yesyrbYTPc!yH<4SGx9E(F* z;)-^L!syxA5h4klXw?WsO(3DjXx_f9>zN+Hy+MA7Y`k^Q7ON`(Hpp|$lotI zyFcy0o1%Z6j4Ezmr{6^2VRG6FU)N3PX{IT$)(6keLP$o+8mzWoO{`P9ckdlcQ$a6i z$K9_P*4$6LAt4s#^5*la12)Cx$i*H>Q z9vpYP|25w>(E4C*YW}XxW6cbwhV}tpwU$aTKmI%}%p2$NA5Z747T%|^LOkjhMzpvgdp%tKL_JQ?>lDsx6(gT+eZvDa{f9A~bW%TOwMd^>(X=v}L?q^(|IlLnz1A^tOFveovE& zd!}}a_vn%%?Xw$ZhEB^_{@E5>%UCjVe_purDu&?d3R z_iftoZ?w+tg*lvn;iecreI~?pM-dF_cAmN{Y!oNl|*{Ahz1HI!wJB8CRQ;V_>H%u~aqaz2?;?FlNOG3VJgb{wMI3WhZh2 zlhl_r_2OwSz;(28%uaS!aVA=&ioy#Uo%~FJnT-Hzk6CD5PRhp(DM})`Niq7HzTMgg zQsSCSj{UyVPCPt7rixR(xG|3}cy3u+&K*oGSF)5iy`_r8YQqHq-hAEG4%*g5dD?z` z(Q+k2X`&Yfdap#&Ty;$RVkNUNwlg!tIiiq8+yUW#7Rx}9($|*(&8xFNp5P<7u_%>@ zPw}s*#DTDfWSbISOST!)N_9>t{1!Wz?129|>+v#Tc!2{Ef^t)24h#KbwbewA#XSa~ zYYmc^Bj1;Prf}im=f>@C(`{U{**Y(>C7tv{RA5S=y2hAJ?V`&5SD{Eqd;yS)5I{vQDjh%b22Q`&5zqYxb zOPQ5#<`Z*iU2f@hMKzV{Mk6_Yrd^&YSCN&EdQ%B?|AKYq%WW`#=bGn0#TkL`?abwW zK6l%*?8?`BLfc+J9tgnnSfw4Wrm%*Xa;54di0-r zy|It35Ka&x2;oQe?UAAjph)pcQM%u8It8`)!s1zb_f~3Rnt(_6vd2`dM6*J%&66$~ zi5~U~l|Up4lb(?}CA*Pr+)f{mmJ~jpDHE z{0+AUl}cn;Hq3(qPheLusc^lm9^0*2mqL_5wE~gsg|G4TjYm!G_CGBN9H`NWM_`E} z5OibmMK#=+^gB<>&cJ;wG#tt_WaTOH=mURD355QqDB z5rb170fSq`?}M>0Go>kQL>hQ^Bzd8&6#ZqzoQ)3i4qpC(gQdQNJBgMVrlM{gan2+_ z_U0gF0NoKLn+q)vcUThIJK4g^nuMk7a@V}Mz9>yP!A&nFnOb>!?cz`$JnxRv zaR*{WAK0Z+wN6gh;?)ENQa6;LNNByZquuq#0GD6Pnll@X(DT*1{6HHGR=H>$ef?GO z1V_Rjd;ueS@)>Ij-SslxEDAhVNMQ;z4%Ym=schq~0&}Dmy&gWW+zC(bB$%wkMFm%>}$nnjFRBbX9 zpusd^?y60k9C^H`-K3U9m-G$tAF11>k<3!j&b?It@^7BxpWMa!-?oc~t&Op{jfpLT zuCBR_xs$H$KMSTvZO3wx6Vc~ImkAP>-C4ByNJsdkm4vj*j*PBRfv%79*-bs#nl8Ti z>RNXPOX_+3f)rIAK}X|3gyq4TMTbb#p{T{PU>OI(JgM&@GLqxu@fBrvYr6fuBZMay z?|QCx!apZ24jmCfqKrohvU zZl_cFd_GSZPuwyM61tSB9VGcO)1rvS@4R`P(=JrJ33ITT^^-Y_Kjk>Y_)c|$lq$kO z0wXQo4=rqGQtYor1vNBY;MRx&$1OiDuz*kXqJVg zeGaMmkM@`6)cTA5$JsMX&$oRXsj8MHjhRH=;ne(@noFv(^I7W2Hx&R8R)vdl{#{6>SnCh#ca6*@Fj> zrlDT;UEo(yfW390-tQ6(qSM`+Hj}>H?_Iw?Ump)lU3rF>DGAV9dV1hm@EHq_$Nsr= zt{@mk7e9QceX6IzTO}nXXzID2JNkkw;wj9Oc)Wo362N*NHAE}9BY+t^dZ}B=#tWoU zo!?=pA$9-PP-X%4$)mpaq*v>Ot5DZO!CjpAaoIK^MFn2hF_B_=^tQ$`r zPDOK~bSsv6b}$3o0{=bm)x@+;n?)D-R86-R*w&(#tcK48qS{g z@3mWB+LN`f30S6(bh}7#-&Pct%s5=|xJlLS4dFeC*PVEVNhJAc6U?a86UBRC-358dKM}QKicGAP$)v_ z(^!K}xyuayIo<#LkTqyhg_izjs#3rK0pb6Dr`xbeP01EV3^AK;!}P;y#AcyTtZJMn z?ELo-eKZi1SP`q7nb=W((eH@7%&@%Q%SnzMo9o#0yNeqE+b6=WquZ`P+fNjBdO-}WU)A8|M-VH9?EnkziFf zuv_F^nZ?Uxv)LhAG-T(BTUD@4GW6x%Do4}ss5UsenLe7w5}|>uV|Rx)9;>J-I|o$v zz&z%Ru8ZTU3=WRvI`lbg_xtbsL3fI;x?4Rcs0T$>j5|3M*~OPc4ggk z{T_yb)x=O~@H!0ex-wy9xT0_xkd<(4M~A`?eZ6f9>rF;&38|xKLW^B#_rw;e$`5TTgW{$g&)mqyt(`t;0<<+y`yS;Z$4lROePycuy zEqHM2h%*&;A5V1fd6;+ojpl#Cu6e6}cA?seC;qyT!ukWaY{|`5&ZTfLXAEXT{)D`& z&blI%%b}UxJ}QMp!l`es*oFvZuSE)y$!F1maojkISMOdY;!%TCf4K}D^eJMKr&D3y zQ{kppxL3QkR1_`Zp`b&hgmP`A?8kQ8zF$Umy$`TD;B@1V%Z*!R#lzIL{#$<0;`bz~ zJHc=iIYGg`2U3aSekqZwIMHxO_dZ61GqwvbP)HUFfjG6;fnz1bSeu}o1hvmzktVlU z7SrJE*enW5rg3h50ZWKHA63F74pygRdkUt&N|66gdMoWV`cEZrIi_qt;578Pl^v&wJJuDaBU56dE2UQt~-R#Y&1m|aVz30st z+N)+@@Nn3?naBsW=)~n@?3m;-EiL>g8cDjo-qi1~51Pg#6x2_;N>ZwGM?pD;;Mo#($2B*p+@nP6EI z5-xpfXf}~#spTVv%;RtSEylVWYyL9EnuPj_C`r7ZiF|5lO~7HTI!cI|SXA}?#5Sx6 zpQdmty$RHh9O(&%8olEAJU>NreV!6KI+!H0EEOGMxE?dQ2l?VzZ9XVE72O<%)4Smb zF&XCRYZi+IV`xPr@?>FeuAYOkUE-eIuLtHRSZ3!Nb4VDFWAG?yCyidL1l{^PALhXh z5Mf52DVhpWp%F~t5?nDVE=_$2OQ*He3&xsSic%@uPH?Yh$)Vo??FU1xNyrS<{d*Xl z-SWcPLxY-u?r`F8@L)D=bhqe%$i&O+mRu0N3#udlt9rQ^9c+JUfa~#L^sxWxB<)#v z5suNBXk0e6ChcAG%zzhFQdl6tV3{|fy`fTcG8eVkGt`4Z@I`PnF8TvI+7P#+YDi5} zs<}5^3=~zB+o-mGy{R@!w-b5YWw1Zah>9sojcIkqv1ZiFSYEHCj#nc(2ZNDmW&8n@ zI`)zbFR3icR_qJl-N^oJxs=GI2{K=eq(bXLx)}V7bMF(`u{#47ekj30{*hp)bkQRr zIX(qFtNGIy`&Fm5#>_V6qMj1FLUP=Mk0maPhF6LUBzqB|pss+Hn*Cx+HVs?hw2-EF zuYkYK!@)NfX=FA0cL`m-&PCON%d6jFtf@xD+F0)hmN%c_aPFX0d_6)c6FreuW!Qso zXu27p>0%t)`D@L-nV7cN_8+0=TofC$ADaDuKlACJ7~7539r_3}w&MVk<{}d)U-SgP z9DM@reC1=vk?fr~ZV`?3hQf6jr+g!KcWeuTi8ZlyBs}1PnQa6^(!^tX>c>v#;L(cQ zytB&mv)oV?J3peNxO%A#FqJZ4OjnWwb9B@L^q*VEH5#1jn{PKz*a8?R+#48(;{Uvb z{zq)mF*7o98q+)JJJ6eYh1ytbY@=MvmQM7bPqBgo}b;^cB9D1&{Di>9N~~XkFSpj zL!2D!93h=~Ej!9lm}730(^VQSQq=esIDhH@oRh2+#jPx{N0xNQ1gf zt5x3=S|;V90o8=@#Z2ov{D~{feV+QSIk51G629pPUH#eO;^z&h@L@@W2^_;y_# zVRwLo@thNv6j#vfL(8q3YyDmwo4hE>J3f1?Tsu3vH#g5)Uwy`W^kgzu(X&GYI4b^w zK5dm|ht^2h3F;Zj*a@PvHKg_MH2wV*HQsI^ic90i5D`WkL(E+iHi9n1gV}lSLF@Hv zNnLZ#vCX}HXtaO$FB)q&)@ysJ@|@E8yn$L5SLM0yrK6j}*6JIsRuI~E?)PLRQQE>C zJ^f1M`9?Ex*9>X_(uLHPx3Q&tO_@o3i(#OBb-X6tUD_nOV^k7W3>JKs0(0eI!RBLL z$&=h#>JANBc8zKMNe#!*jDvVRhVggN8#*PSfkf-=YN{cZYj6sMkj1TdE{IWwUPYFZ zZ$;6~!8QSft$VCQ*HjOM^2CZZRk`Z%kSln7qHpdj*2c*|Hqx zqE32+CCZ5*%NP~%Zc`q;2rSvEeG0~+unWs5dmIY<#1}i)csy2==_B3M}*14bFhPA0R4KEam zgF}kHy_caPP^erx=6=be#vt~i<8cZAzC1cjSfP$^%A%K*MYYnTqPs;uhTfE{v>4(Z z<+nvf1j5JFJ4C7Gxq`y-ji&C{AtevT?k?x>*2^`nDGV7)$y(V6$V|wWg9x5E)wk9- zGrGaFIRE`bCDHZ5_=yC#e#Q^l9)(`koS!p!pT0`TD3B_(sOz0h|6c>7p zM?9FQpJ=HZHCw8)j&~bNb>Gamr*|`hBr{fwlvDEFl;TPDj)Iq4qIYAH73CuhVRe=& zt6@D1!O8y8d!aNk1zC~uC~nLKuai9zS126jHv@C)q-kMp6I*;*skeWnrv?6y-j^zTg zWnDM}_eAzO9*p*F9*uQU@sb1hd*p@5%`n)w0kb_j+Fp5-BDlc)-WH#mWfd|2eNse; zv2mQr^*RBzyqA=N9J6&9{{+JJ64rZpTEBy&P0_$h&)6s%7AYT@y{o$&0h=aU^Or$& zd?GX5qaOw@_;bO@d-H&T<~Sdxw$2zx;L{!bdxiE*de-L#m{?JkxD~`gklzQq91@Z{ z1X@6-LFS{JFvxUMWH7})-pnl45k7!BG?|{Tgc;R@;swk)$?e+Xm(v0A{wRNI5M?AG zGd6Z;aDdhzO__;D_>tl}ljuGIyL73!ALbXTCpU6Ykp?4C_3x1QBsQL>9=}Ablsi2B z13Wcf^Y!0*l0(*+)_FJQdrK-bbpXeoE%Cz~>=CoW>o#=@v&z!}W!J>6&#UM8I}nT| z|7x({Deb$}_Oa&UCg`R3fab^E`>Z4gjR};Eil+rUC8O`fMAZfj&L(jQ7|jy71|Nh) zjD)k4#f_SL@vv5|5nFEXNl-pO{amHV6ue%I+VU4^F>9^Im)8Z1!(QibTQ=1|$Qrlj zwS8^lM@jI3Nqmb@Ds(&KvG7X0d3lNhSRR;rb~E6ea&GRKp&9`Aq$3tDdmH2nAK|_1 zfHo4L$g>e4ou)-*?}GZErB7jv%k+lXjSBrzVG` zpw^=d!C{$7Iyk0!Qt!x>xAQp|q^?Jwuu$DEdW*%b1%GitLf9^pLag&Mj9gKQ#1n{EU)j+2*x^g|&i(f~A?`U;x zdg}c6#0`~SUkF~gk*TO@KBWv=Ge-8d&|f~i20iHjg1cjl-;_bejoE!j*-GX5h!@+s zzbm^Zt9VVBioV6ne^gEqml_2pSXwlLll#lB&fB{KN*80rZm1_+AFz1N=e@v7CeAHq zGMOLg)OA;QoK{~GM^vH=a5yIr;I@z2G?FA#adQqz&U-AiWJy~1+rq^(Wh1NFDm_)u z2vYd% z%21V8)Rog~Vwu{j;Lh1^oFL@wMT<1REr)(0XnkLz#sN*CHo99|L)|g#OCClYmwoP$ z?%T#?#y&AlUoB88^M8yVs^{{53acE>MU^*(DA{WR^OH6xt)S~4PO8cp%M}bCu=xo8)uC zJeeG$8)H)&V>26L(;H)B8e`)cV}l!G0~=#KYh?5NVU^@!N{a4$q-u4>0?_%-Z&U+OLTrxVwG;gEK^V6CWWH;Wd zw->{n#h+btWnW!%5!=pH+s+x=R$ZYGU+##nQh486G3UuN=F2$c$~YCsJQm5o7Rh*t zrC_>NQ3ca8U1%y30SveR26zAi9KgP7`=C(!pndzG#&t(FNawA&&MP^?X8?wLAA~8{ zGxBjD4YKFiT_4@sF1^>S9={tc0dF9J&NzhaULeA4U&-5UhSx2O0RyxTCrx+?qkwkY zpiC@*jnw{8KLWyW=*ay5%j;gz=PkO=E0BO!IzjgsLeDM(qDKsU+qkNfyk@1{(Z81G z_Mote_~1s)unIv+T!3&0B@!{6jhYB;I^uxrACf5V8XWwcIVPguwA)gR5<39jD&Zwv zmp>(w_x3p~&8k~4eOoxCyR@b@aH6CW6i)tN>|@`ZPWn|E*M`k-f`2uo0f+FkP4~Zy zC@nGCrbK;qR!#Po*qiq4T063LZ*mK6&3L#(lP%=%ER zwwJ;*l z4gp6sp+R%lmswgNE_uKWslx3h;wLg5eBp=bF5oqoYTXH5ZB#0cK9>)FJIeZ zQ$7TNq{g*j((vHYcN(n-2RCg(bIh?`J>51Ghd3bLNnXHP40YN=tw*%mQ+N^@2?XYM zHsC8^U0eKZLLNztr)bdagKFck-E5@~33V6kS zgwUSrMcwKo`E@5(?yfM<8u+O>wZ&(s4LL>d6`I9ck!*+nNXZ6fPyK><`CvzWq)ZN$ z!0KS@(7b2%ghpM1!@@BS&2QAW%%lo~0j5#8_}V4ab9dh}?Y1`dPNx*Dv5`0BAslrj z?co(ggsSS?Y&hft95$;pmzNHPW4Lal>pPkGP8DmI)`$<1UTj}%htbv%(8%*fta0+b zl&H!5LJb$NxlJ8CkTqFT5iK#ReO0U z75warYoRh;o-8%;m$}7bc@+bp1wm^iVvP?cB0D;j4%CW|kiO_-Hr~9xo(N3rPnl%_ z;q&X?bz?f)uFJRcb`wK>H#?ojJ+|OIM(3D44n!vOh1jztm$OA2TP|Xk%p%N$-o15} zgx%re*x_-T=a5`onWlP^MAe^P(X$?1M{fMa&yTi#V<{zjMceu6U$G2frZJA@rt7e2 zyz)K#sWb3}i4!`}ZTm`V_Ei;dO|DhUrx3~)iP#ZU$Xd+Y3{{Az{Aq2X7J&Cf58HC- zX3Gb0Nc*ApSiMB`edl0IJfmsX!I3M<+y))7?+3I zJ;j~(K3cu&%Zs(zHu1n=if_RxT*Jk z(Fgb6B?zg4{;I5mu%eF`a-c}fqR|UVE4KNvXeISEsMp}0*&blgF3PD|U8+N=SS+Zu zkki~lE?PWDn0*KsRAJe;9JZ)bzBW8&g<(J-hLCk*MK^mBff|h}ngVTJee)ZMIA}f1=M|)rdEx&B;GDGen5` zsRqLI%?UhHrrtd%b?R+i6Gp=H#jQxTb`1yihoWr_sn9v>0epQ4m9A_o9Sr}%u_RX2 z<81)5`txbIvDo=e>+$rW;8BY%*hR{KA`AxmTmw2BN{tLQJVFI#7lOIr=U(~CFP2#E zP8eBmF3XB%vX0&F_pXol*2R|y5GXMYgg<03!YaK3)GwzlrE@42@Q2LEbHJxsZV;*U zEQOHFuHoN}i>@YWgnjOfUluK)wOvofj6dW!vfhOPdXdhtD(7!n;Y6c#?bw(CrO0`U z4fiTE8`C^+i@2*mvi3%P#BysL31^Ust}13m8%loyrVU zJbtSR_Jc_GHmpN}&Q;!~V+;q1Lu~+w!%9C&e+<*N}kaSloyy`4b4&;tSUMTN~qStl-ua_0X4hqa`L6ult7}wnI zSQxxQ?2>**2F-{k0(L51Uc4Y_T#@I~JPYhJW4<^0&tK5t41^V( z3pY0s#%&95R+U5yM0i}_oJ5h`1|ewT_9K)`JOOZ!jtU4Xx(ke;R;XXOO|)m%RNp*! zYPkqLNsP6}2(KnX7S#v0)VVY*BZtrV212y>b}Rw*4ceY~EkGsJJbymGnrS-VGvoI- z8J{AcF#nm62f=2&dg!>P>|W+11wEEfR-EsVt@)v?=R(CmzezH(`*yF&#brEdT(^y# zzImS}S}Jy~+fut#U?Y|H)iFhl7EIg109eCHct+hcg}kHM6ARPDHNR2ww{Z-A7u~sAlMvcxLD{kDB4S!;XFq@)1mmJ4=t;wMB{(#XXJSR>Zp@ zUWT;5f7eM-w*5eyJMa@gU?2}As4yx9b$bS+m|s-h2p?Se!d`I7fb?QW6@4$*Pa&#< z5KC3X^E}@VW$r&R2D`R>DHdicu+`*c?h&vB8z{X6DS^rV#;Jn6*ga;6AzEz1C9;qG z%FRRcid{tsR1>9B~xF0d-E!6k1y**=TCs6eCu_4VQaE^AKNkIk_OD;6YZ-Xi0 zNbYqtnd#jQpqMdlkye#V`}O$|^j7l$;UED~PLsJSuxSCsvb>%^^=`%bP{BE5ret2` z&x4lSSUz0xg2Q9ybwe+A!`}hTf>Xyv@gtEsvMA=rZGXJ4QPAFMcmnNa?e%wNaZ64w z;1;$`0B17(3uN$_|L9>nX6nYok)E)OKFP@hAPe{)ILX`OTo67?Ubk#zUjx})If7IN zXy)pE8UU#|3wliB0&RZ)_$sb|&?gUmaw#q~`x$pntBu<7>&IZ-0{X_1wSqxzDZqAh zrrkqufHlI)(zTo35Uil5)prRS?j9%{v;(mTy(T~!#qH{GW_p|1mbXE72M~7Qcse)!KJLudw;pBz>lYSIYJ0RrK(%=lLbWOC%w{Wi`Rr0DS@5O zOpegpkqF2jja3IU@SOrjT`vG(s(|6u6@ptgb^;ZgEy5d2;zHD7{G1p~0|{bpmZnt# zsv9n)6^(JoW+!{=GVXe?OKTnpAgdqKXcc>!pN^Lp5rzXN{n%>Xewku9KCD0O0+|!o zx{|{NTp}9@xqv6;9)Oqq_(2BI05c5O#$TNQ23QCy^ik~ILtB0&Mh5U4$bh<`8v%tW zL!7rP2Mnx@KTC)2tn&i56(WP$ua&^6C3)}5>NZ_ng!REC3cD%}ih}@rFO`LlIt;ERBRo*1?;%^uIP z1e6PP$zlnd%|on9iT1SLYY$)qttkhq!SR2$73=MY_VmE0rJJ#t{@H`T)k(8h6PMA` zicY58_Zy)dJ+2}-WoY4PrAxn}?%HN*Rku~QMU?wxd9NngJQuBm7|1Jw5{2)#%VgY< zh~{*>(deWxR)80&B#4Vn_bEh;FXnDVP^hrGEkO7W1wPaa;jGa zH@+betlv%zENuww>1}#SyGMlN0Yv=!xF4-op6Zx;2^w{{Wv_r#U^R zw`kn+@oQ9EZ4cix;)$rdtmw^<+=#`0-XPfJs;Vw_?9F}YEWf#!B9=ixs+yF%S8h*I z;=3|ZVxau}CXKH!7I}U6*#jA5cP8(>M%602C3ReKQRM>}R5kboI+oLRQ!mY0*hle= z;RN);7n9Ws5>JndXo;j0k7wiq$RTZGblZ56gPQOFbimv*%u+l#@E{DjXd) z4@jL*ZSBkJ_7vL`d2zo$s|DEEN^iJWtee1v5W%i|GWc(5QJ$mxwR}U0QncaWR|rfB z_yKh5MNY`h5;2xY&enTa^H(vORl(o_t{R4&E;awrrUg?%+~)cS>Xgpm&RZAtt6g`@08;+)wKOGk9bj za#~O-8_|~eLl4^f26s_t1A^KRLbp-D3;zaAePp`V#9dJi!6uc8AlD13na-N)*2P|@ zRvk3i9;e3Y1PZ3OGf|(^=my6b=Sd$@BQZCQQnkl`BE25Id#EuP~4=L`G zXIKt@u72xGn}hR-XuZ!}=l8acNXjxvjSftJI~EpA9s>nH!R`i6sM@3a!=^f|kvRjWov6LP)2FReP=`Ccar|BSo$_VBH(*G~ zc3}F9Js{z&C>FeXcMY#gmUzDl)O2g@SrIazN_lIvnzxgYVz2xt*_OK`a_FkKw^gaL z7n9Y_b@IHX@*QC{H$GIjy{x|%8rsmZ386T}hL+_R{Ir*0ka2jVb=@x=pXA)ddd6F~ zXk8?p&OFz`@6aAGX~Beso`s74rMW=d)t!?h>-hLS z)#zf;nKyW#M6dM?EX*0f0=cakG;^A#%#|Z=gdKN8W7ks?I40UcQ{Y%lj7quK|7znK zT2{X-O?@l+n#xbGty`h)7$uAV2sOSVMva5-rSD7|V%d;sg~ zxv{9<*`EQ3n>1z@HZ*w~Vm=ejZ1^C`k$g5L z0rhg31O1E=)=joc$&#Pn3ujXV1fhrLjR~5^92rVOaDa924VCVHCJ^SW2K5Dl+uu&~ zO0_bZw!M$Zc@vUb+J!H9K46j20*~!kvVFf;&zOcrw8CD5_-X}RzLc+I$;gJziW0tr z#B-Kd{mU!F(BO95GlKSu3q4LN;%`$F60cXEYacxF;rAS-0ceZry=+dGl7eSFlNX;n z`i2H}j>W4e+?r?jouFh_P#nUsj+9wwpH>Z-VdEn*0)&B;(l5Yt<(LWz7Mt}b+35|vY4mafvce}M4au4wkX$Qv(s{QOl8Gujag9o^yWE+Xh zX@-EiE3&)rrlPeb=I4Acr@jm=eaCE$hB=1>vn0}slfqG2lsOJNjcfV4!pkr#LJ=!L z(d8fs*q6dVQIRTzDSq3#QnTxgM5<4{ijSGj8@1u~smQh20AQ|qLC`%k#h8+r+8aVS z?eB}OFaLsJvzE4OZf6hiNTyQW{bSms(s|g&ugG_OQ!@@jv((83Ii$09-J>XUSgzPu za(g>r&kNoa2lVQ77l;E7a4_Z;Ww<``2f2A`)FEG=mbB9i4R)B8X>HNPNY3d5Yh=-^ zmDOWw->!H!u8{=7Z}%W|KDq_M>lhIbM^1_6pdG!{z@}Wk%JQ=b2T@zc-#;@cp*1eU zx5E?&#@E$jo~$yh#Gm?Akt=wt)H%eLH!Z3eVgPlH5PR-d+y}AkPR#Rf?`Z$>ajQVG zKhCS}RmHsd7U~=%7zL)@;O(OiYHi>FN$ z4QE9mha@lu#v%w}hKFzaFku7mXYaq~u@pzYL`_dMMhEp`5X;pYo{4DqJJ2O3)Nc=? z(L?|E2HBHq)WLV@qv;B5duQEXonmrf2670Sk@UNn0F+s9@CvkrNJ63+hr8UI==!G? zG8Iq2Jf!4k;}DlIY~V^XC$I?hq~{|AH%Odd^>2$W!k-JQY1i`-8fiLk6v`!grRF!W_A)*;dW(jewwYC7tF z?L(cHleb%(?(8u0RPGIZD zz(=1dy0uw%j^A{$e@flOTIt_TwSPJfP7*0pF7QwFk2}Me21)~MCmffKSkeW(LT5k= zHNnFfd^YskK1@pM^mS}8eD z+i-lQHu2FO))+iQX?duVsq|F+0-QBPZoQw@OVX;i_euXtbR%uF>lm-DIwf~qwHzRG zb(NMdVQ{h?&oN?c<4pEQ8fkE=`T83qD6L-@%lZN{K=NTi6gi#_L;^eDiTiq@uK6C- zt!L18bITkw#w%wvN+=FGrf1rR)ab#gnd-);{4G`Jqh!++zu>wTwcK)G}kaT`7-m3NIrJX>?v%}pBB-1MC_&T>T!*=QJQrVpkPuFRsd3mup z_ypj2V0|<~#}~82k4vt|-Lh~-xjdiBUqe<}c;l}d2eYRHhY&pmQYBtD`kaj~7qVR( zBImK5^%7f_``z9&KT>S)p0gJ)695hfnDEF7zC?l3n8sgbqPgVlF~6P?Lb|cq6-A$0 zu2!Gm0nsbe($WslEymKcCG1QY4Q}#vE|{>k2kY}B*oz8l5J?6GTEOR+kQ}P6jV5tr9;PnGK+5OUFvQp8q_I%|!Q}a<^&DPvW z9w?RAsDQ#IL#&V$4uhjb|81T1skc;@VzAra>rEg z=!Gc!Mk5BR4Dm$#zMR<>J)%mt z(@Z?IdJ~C{y2+oa*>`L?JpC>k!XNqs)gef1R;M-WW&BWT7nhUVg+nRHk<0pHt?BsR zXt4YrZ=Wl|m~K(ytX4RB4^DRObr{9WjMu^@NzX=Fnzv4`hu}{= zJq!~VEG2WAKD~RHRb850tJL*v#c$6DBzHMR_Qcn#dj^HKf$8-QA2+QY1L|ou!Sti2 zS}yEfOUt$f5Rv@bYbbYkUSS$?Wj^*)7B5LQY{3>Q>!QPpONu7d1E(|dloKE7VWwkdr#Rx8XR zNu78-9v%2kqmQvyoHO}pE^5#p2GO+!?v`BirRrPyZ12QH?vgxUo)YZW!8K)eku!-f zMTqQD`StC~MdxNNo*At!U&6ZyVJUl{}u@e{L(5TSX6V%hJ>a z;moUQr}nGv&SR0u=$GM}=o{f*=WBcYLv5{FV$Fvl8^czzAonC+zw@Gc?d8TFU6*hNxCZrDsTJ1~hMvTaQW| zoFKFOZm52BXX5R}1O>}>#E>*Qqg9rEzfqSMmiaUDLc|h0!7%2xxY2LlGCWpT|rpalnZ66#!vo10urxW2^8G3_c%Ix0PhW^lj($^bFYm=?xr;ei$huyq?T-Uqd2dT&R2&IU#TCXEIohN^lg#Cl8Rm#o)X%k z-JNwbo9wGae@fr{cYALlGeD#%T9h|nKM1aQ4{CW_Wqb$p%89Mhu|A*j7IcL=j)HeN z$Q8&_M{Wj(RgKPIrjFB~r`omA3HTD00UQWs_;{2xxLdeA*Nph`1g6ZLam+T}q=ps^ z8M?=7nW{|zIOolRY2)&{7ynQoK17Yz3iHk!zI14tiQlx5WJZj`>e&|3w3|C*u>6%Z zcPnGJ99|ijUHDFBIU)2oESq{SlQK8y$2>9LyK;iG?Mut@Ln+p}MP6bL6yBxI4mRJ0S;$Zo;mYzpn5fjQ-CQj$w;jYE_&k$1Hg_^=!q}I|Y+lqy zur;p=I@lq|47rw6yX03XUcHO?;(EH^akV3`nf5AjM``OBXHSUPXGiRm?9C>xoPlKJ z_5g*XOUhf%9Wo^So~0>*cxrqSUa~*v)i5o^3}2KS$vdf5R-M9BU=kGI5KW43&{~5s>!f z`EI>+ql%%Ty&M*!8%A2%>6m&g?OXp`gUlKTNH7}rcR)8z?JLJ`8Zs^r*q4EuyAH>3 zZ|rMoeySOZTY4R8ekm#QAk)mzmMDl)9gWzT#TrM!Q`uS4kQaK{S(kG4p5RPXuPSNy zxAq@g;^MXJ;pjOYj#a8gSGm*vm{$E`%!r@vm%2BX805h7QCIQ@t@W=QGD*0Czi0s! zlsAQ{lD^0(A(qHp2>crr>DFhF*D!WF%1IX~2UJvz0QcMRilO83s^+yJEYG<<&z{5t zGg*Lsm1BU%U7Ew?@C)DwJCOTD!rL?@ql|1(g1-jm+Sp6+)f+Wvq9__r$n-H4695sk zE1oGO>%lH>sM4TR!_Eqt$MU|%cu?t+Yyh@%9u!@Wxa>U=?$J=WwxtcGA%?|EuOwym zVEV}e=VsS}oFY2Ze12asTdnbLimJ1dSH3^B&CVAwasTf${)3bcWui~9uTL}B zif}yWV7~8GzQ3&vsLxmS?kxG-d?q~pc_+|@4H7{!{Ace?6tPs?Iy;|VVzARp?W3X! z4ebg--Ghk%=v~3h?u9DLBrZ7ChQ6$w3P>2vPS0Ihe2^I3kjn5lYZ8u5mtd!)t3kMQ z*Sg9J?oYUKK5)Z=m5sAvLXd`p5|CRe4ap>QK0aiK;53j3wiJy%lA4?DTZ|ZH=iD{< zvxh7Cu$z!XS&#>=g+msNA#J+xubKoK{LZyDpf>6t`npsQngX-f>))O(>*CyhYPP6lp}qQPyo z0%HmH`43q;Zp?Notdl#bfG_pl32@weJq;un|5-7WeOvUfu{yts^;R*WU-!OAr0PcP zd_7QA8TUa~(3WfYlp7gST#@xif@%YYdplASUq*#?fl?z!yZkEllUg_u#VPyl_denv z*zr-onw4fmm7{d@6s6%?Gy6lWv?oENGk5Y*5@)-YaN@Gz(o$m(&}(#*6U)-lmE%cI zzCl`p8u`3~u!No*&CM*rEmzL5bQa05oD%e88=C?3x+2dr<%ZvRZ{%Q~eRuVG>i&+m zT(g-+XU9zx`mu^@uVk{<|=J84*CMxX3k{e-0hQi|nq44-WwLU#m z%RjJO3%ZM_oJAK`4(EKWRS{0>ZAFPYPWGkDtK`$vseZVJ0^O;AWBVVh{1E$FAz|^;YQ&mzUJ>*V6>sLemc&6y# zP3-kDl@EV29EyL#(mTlFMuz-tn1N8>w4YH!&ONjAARzIg*JgdmDv!>PpJJK$`Qos^ zC-cN&b$1FjHMJ6CNM(bIsk}&D9`qd*%Jg34O7HkZY))KoSIyb`+qGf| z^b{NYb7MgrxAgD?l2j`(E|BRV<}wjA%;4eBNqO^Y5xBhy7lC<9Vq$&cd{9F)^Rpu+s z57Py+s{dv&`H2b}lf_{EyAYU(*mGZdzQ94{FNCLnhjnMMu7qTbA~874kcQ+BMW8j zvM9N?GQ+ziwI-IPUlYqG9Y3>A3FDz&GQi6pdxi*jw~G1vEGNc_S?#Ox&bkI%T-OSU zzZ*P;KOG|>iuyPtaDj4xf`ft!9zuOq*$|KNG?Y${N}Pgft?a*iylA~t?Yw<_{9v)x zNsMyL|NiYCH=o@8@u>0s>s_{I@1#$PZT)6lBIOreQhMv%xUo|AkndJ4%HiSJv*ynM zlR~zAV#qI}XZplry^p?Bk6gZ5e(U@-=96w@$`o7VIFg;xrJ7x`O!v3#!(jI~+u5P* zFIef)S5%T;DOrnf@OLQv>oUjeT&~pXg04=Jy1y;>i)<~&6~3Ho97eEj<8N=t1@?T7 z9hcZ@w)yk<;)$5;@NSUp@U}yJTjWW7n`0{3Mc5JTiJKE=lWM>Hbcb8cf|{$~LTBE? zj6##lK?(Qbg~yxszI1uIxkb%9agEZtcbg4~dMGy9&RHIJZiqiF2^RJSUb+0zBkHSczduFkY zGFHNCqG!O)XXym$sD-jJ$MyzU;SP|W*bngY&phQ9ezjv{iisR| zDI2O^s)HRrJ_awL1HIM`NdFMRutI&oio<+JrZg5vep)vUx`>X0t`ZmTi%IA85fyfK z9BIfl<^DRu@ae=sY93x%U`3=$ms(I7PeE?Dh)U;DHplmKN#lzsfR+LVvxLR_m_9J@ zCNo^@=m3LpK&SdYGdBymQ6w*aovHG{{LPB@JG#MS!#~G5F!)MoxCrrkcv%5!H8cyP zOazE@!9bo05XA$FRbAQZO+0#WkRo8T(L?MEfrT`%a7^b_l*$5a^D6OV(RjH5KWmGN z*p>#6H-GunZmUPmxSlBxJ2PTtq_7*scgGpmPzQ>=Nv5qc8T-iE8RyPW&|nYzw68p(fNNx#l$WWYd%4@CM2m&TXA2juc#TI_Km z3zS~SYZVv=lg1C+?RB9p-ruDxV9{{KVN5~hHO`=H8$c;r0-CD}29E#3ZQ+colm(Q$ zfKqh~P{x4)6bC|Trvq*#vcPSg|J`z%M5=KOPeh$Bg0AF`&ch+RaHrO)@TMY8#sj|`_DS&qNn9orhRMKi!U@%#)kC*P>M!jj7uFy z=ICKo))UUQrgN4;#m;C0pU2GCRuV%j-_uv0W4e+2$K`lCmz_3;v{B&{n}Sg5$hUU` z3VwcMQh966pp z9#-y{{>4;GHfdrS)@4yQZ>=|8%2YOPeXGhC)BfFJv3A7JyV%5Rb67xcdq2b1_apH` zDOzt|->B2k377ryA^u*QO-o)4Gt8usCA2U+5t(9P!NS4u%x&v0(y8^Yi%XJXlgRxd zs-Mz|D<6+pf9p<$n25T?#{2qAaYCc@W#;!Lg-#+SI?xEFpI92V&RF4Qne4Gzw~E^? zE!-}*w(G5{>rt0Ag17P-z#~x9XOxJTjp=-ayIIg z`g%gfW$Dc3R+|!yc|Nz6WuA9sZFlXOZFFGhf0Hd60&ctB!akvgzLO`jd-)ZZv8Pjb zN$eVN=BF}txyCl+PnJTzy-8^wbw1UfVSTSYscdGUKNr%PA5lxr4yii7P5qkd@Ur*; ze*G28>~iblu_)ulg`?Kzi%vw=y)RlnELI4JZ8h3-QYbaw3Cz`&6%adHBVK;RIs4(! z6)@mdlNDgPc8#Bj00UHYYg30itfWl5@B7N@EYmQgsqRq)O z+diY{>TbwgcInQLW97&L>OGB}+kgA9+&^$pS3pfMp=CB)Bub#24P# z7f1LaDT549-2hZVz~CV;XaWW={~5dl2E@IC#CMWdsnHqKfQ^GomJ?X20m}$rNelDv zl0;bbQ-N+}&;c4gu&56fKY&GDu$X_%s|N9NDUwKue?8?5MdO`OVdk6HRw;EW?d?f3 zU-Gsk<^n4t^?97TxQwH|vRt9ZdZg1ME8!Nr_K3tEoB?((yX$h42MP>qZkL2|iW)42 zQ0wVXVkIQc3%r^ zWq)dHGn{re2nSUv-G80ebWAL%whkH3<4sMvUftP1kgS%HCpdi&t)3S)8riXC8UJMQ z@yY!IrIm4`IG7g*GRD z+^C89=!|+gYOHELXK42Y=`uJ-RaX}zQazDvrEgjERW$4Av!`ye8y)K#>$}fF$wP_` z%Y)T6FO(3IAAU=?y)+W>jmtbW2jB5EGVM*jM(y<}T*hYUZw*E^SzIz|S-$u0b8-{^ zSf^>ZTUF=alU#4Izi5hruDGeKXg5#nSWitf#IH}S{3vp3ag3{&eYrYy(s5a~>9O|a zq_m2_mmLamUjMb|GCOHxV6H%N)UZKZREQvS9AR) z?Fh>gSq;|e_D`Zm)z;IY3S-B=H}bdT_Yy^2k%DdkJ#Njf7t(9rN~1=O$%SpBg`_N) zwjJH0t8yIU$^!=Jf*lI#3{tI9Ri+W;q3+Prb=k_|I%sb97J-&=H8n0Y*YCdo`q_ET z9;Ya&%1;*=vTcg&e$K2573KC3GuO5sN9jxHBz4^&;zu>pkNwICBMPxlJyC=G!4~B5 z)Ku~mNW{@nwLY@bAP}3rInp^Gt0lE35&xF5HIpFFoWI~89<2WOw|aWBuX8{}>-OG3 z!dr?*1h2gwkr-kY6q0T%*XPkBO2IUn&PP4`DyTH%D`_9lV zCa+^Do8q58(gQRGK#YKx0kH&P1H=x9QU~NX1{>?b2+V=FFo7piuk@RC^$I(#NeS3GbFXw9 zumw^FnFKB!cj|hTNq{*uSmXeUs$lUYSfqh{@f1VU4P27;Do3M3$a>qz zfE7Kk5&%|4pWZZ@-T5(72`n99{hk;^^5WcgybLd8(8;JP@q75;-wt{swp%}l7*|Ls z)M@^z{=S(IfFq#6rp1Jf3rw3F9e&!%4r+^XKeW+sy{EGp*+u(c7IEzK9{Po>*x6lD zb+0Q`g4}~vRR8^B0KMU-6`sZN>xEv%d?wE7*v6o>W-hcgu>`2uAV&S{vvHbr3$$rZ&WD7|`vbVAc*(Iaw zEqlAeEo76Cb!Ur^?44cq-h18l-rV=~J3imvAJ^-8o#%NzU$57BUFRy-BOQ;kk#j;~ z_2cdR!=_8boEbFmcm>v^GV7kAzaIDO^e@I-|CoPW<3!4nb5zPi4Xi5qNIETK_<6ijnN2nJHKSoRvKXBPW*!658i@tVIqM!%wSQ`w*4;&p6i9ODD%bXKGC6w>P`>P-t zP_yjTY+TmDNIV4^HiYNyb-R79Jgr-`nYuW8>wT=%!WX5Odc@v%HQs|_f9}JP4{E*~ z&|&G3XBaQqw7L))Xm}b06#ch?N>Yv&jj4M!kKnw!f~xJlb%=m7ed`bdXY1A>SwZzE z#!JTB;hjemkph%Fz#y{5spCFJ3B3-wB6=6y@_O9G-rRvB#*!t3jj^SP*NoibR*G0u?^JA#q$H%988%⋘#Mm;Kkwy^YBCz3>M#)vEaIA| z%yg0CNaf!X`RVBG&s`gBn*_fG=gF)~U>?yP2c&owcQ2)xaW`9V| zqx^E&(N^HqDx)4<606aC`_Q01;lE&m=DpNPO37W}IEft}IQ#nL;U|RCt0_52eaQc6 zvS2WIa8mml#?`{f{!)qwipQ}s28ksu*M)OmPlm=fX?nook}){wC0Cg|$bl^ox{N&g_m$!`WO`DN&LI9;DZTcMO^)s z5*UPjDIGL0^8XKm@an!&=R;k?H|phN^M`rSI?z1FI?Tl_ejm!sN5`utFgGf~#K<*ee@O^N8vmBj0 z-Pf~gh&r}^3q2@CH*1;q+*9){We*#Pz8apv_AOlHMJlK2@Ew1^=iJqPHO&dxhxnNu z1+8A^b6*KMKUS=~l6t}~`sfvc`y84z&?zYY9suH*#1|R$V*XeJFpP`aqo#+I`{991W?>Z0~1B zt0i4F&(A=drBKr0G^{jy$8W}*);gxnZsrPR1nvJ{S9KC>gZRgqOPf5bt}3AUa&&%3 z16o0TsQIDO%;q$IarXofY(?Ln#mJ^F*3)mhir`|^&!m#C$yhwN_ngS~A_*ln zk~nL0GkkSEavr9Z!2BL6ghhKCBdE@?K}Ub0J?aXaecqXm*gw$BHJ+eU^iFT~tMF(NC9Kex%u2nA@GZ8Gv+}p!kd{b^n=1)-!8=>*hbZFo@1o zraaVhh${$oIvf>fhl#}nR{CQGmO5^K8km$Cw^;7_qg;3+{1+GT@u}r>$?B8yhkhAo z)V17+QuL|uycXo+?FIJHfYm8fX}&w3=j?43#J>T)*^pxp<9sTg{lDl>Il1T=S=lbx zlfO)t3tA44r7&v2N=Ve)#Brn*IzEAx8bwAD)1VPesQ97D%fRfD&F?Ue{d>nS@19fG z=aJ^{Hyl4YJ7w2j+Z9n_B1&Kn|1aR-3=>uM@nG6fmZ_==~>GsU>%Sg%=sRQcFDC13PI zTF9f3v`3G+;>ZrY#q>uOj!($N7z@G98?MrRQx{ByL&s6x!b4))l3y zKhCip;;q)7X)dfak7VD{rT;G(MVO(Dr|o0yV1GV2`UnznN$u%UwMTsG+pQ`QsBHGs z9r>`QJ))N?WFtzL7gXB=Mrk`4DqsB1Hsk3AIg$cmni53$cn2|A7(q+czba_ zle^p>lSML^esMC+3iCpR*LkRA=bhIMZAp@3^ZN;%ygnB@ZmfmwN}Yaki(L;0Dk{KJ zwKZp$E&gHsSDNH_#@uV?p8L09Pr-jV(@VQ?R{s)*VkDhfn%86ewTLvUbugezYqr!S zJrm^XWqw9DWVCu8tt7`#o$FbKca){B*unUhOVncKo6BV0ceZ z)JyV~fZA91N>AXdR7^>Cng4x`$&pb!VrC^Ei;KWlQiij#zl^)b{O?Olj-2ArL)I{| zxL$lE8)-rdps>D?t_~M;50_7Nnf18EiX{LU>H-p-;wl!EZWS&sj$FNI$yfi4g~;>?Ug?Wy{EwkjghaCFRPA7@P<7G8#<&xW{LkeN^8OUD zp@=&Rfr(A@$=t!QPmYrA|JJc zqz=s2o(J(bzQOU8i~2nYbw8hK(Ubg$YrJ+q>O0mlvshHhfMZ;4Cv`E6b?($4^(grd zWB^ugSSoyQx&p&n82msGRn2Siyr#B}%vUSie=(I%Ho)=c+-gej@B8XewNrzSKv%$wX!4(Zs{im{uO*0Y zGEr~n_B8^i4;1uT|1e59VxcFhC>Pb$bZ}sz1NU#H5;5rZFGJH7dDcAxn(unwMccfQ zJecS!U-2Xe80fQ_)%CV|*;?R%YvB3<)p4&w=o9DOPu%JOUwy3EbBda_;?u=%lq47) zvwRe@*2DKi8C=l2vz?2IZ~5-`0FcDx1yO+>_f|G2P@ zwxeCvp5SC``;r~ZMn4ZgPCfpFl<>t8Ux)}_yqpkQ^u)n+#NiR%74^(4MFvqZ)X+94 zws8p?4Mm0j(J@~SpenkQ@H~9Q(xUQJKNOWzL;0Ukw?Ccd4|>%W6=MQCo?{iZgWoe1 zi)#^)P7O!Lvwt{cbumhXA5wX;6%%@{ebl2XRZj3(+*%iDF)XLY_9!}`p55Iv+*xQ_ z6RznZAgn>AS1uaaCI44%Id*-9Tgto1evq;|9Wb#afzjYTM@?6% zM!PDK$MIRbYs;H?pv?QOSJdS_xeQZDsu*OlTr1jMrs74Om)CBkyl!S#x&^!_B?b>}&V z{z>Mq%gR0x9U2LqybCu1P_eLLkdxnEuZjpxVtGXoSx1O>-Mu~2L1@h)2zHDQpdLW| zOb-faV`LgABjXecg82YEG7VHO&jpIe#sk&!I{kIAvttKw959aPu1hF^L34s()3YNw zzH#Xc)6OKlmX-fNhii)dy6l7?0e@L=Rpe~thu>oSItcOa_Gpc-WMDF1OC1KrIO@`?wv5DR}^X-JVUN8pf}--AYB{=VDN)UJ};&L-Vx*YmL%lM4AY(g zutV-g148%h5fJArZ^#phQ^Lf%bW7Xg3heqwfy!MJ#Lae5J&?Lxznj}HGtPGl1YS}0 zkHf{g1jj)X3k5*?@k`<&&lVIJj&(#g^BXkTzVVBAR~72_1K(?~{;D>xyWIO_GED0q z#v&*8$Ov1`t!|IlSae#v9tAp>%c_XGG|2rl0c@U*aq64bVDau7tVeWWzEoaO^lw4F zs6D+P#%qwB-Z)acOYH=NV`m3z(AwSmgxCub@9Ni*qjB6WMbM*QvZ z8l#$v#JB%P|5Y1+x)3o)*?gu|k)c`yp{IrMstB(N(^1-n<{dA!UVRc^@+OH&o_-c* zf5O9cXP_kp*-qyJ#A)E=Xg3H4W;WFyV}OqsFzZQynVh`pZ!B>Pvzr>Ex-uwm7 z(^+g_;%I@%LTGLh%RrbpE~wr7_M8ak zbYPA&{<@e1>jTqqdtQKYFsP&(tiHV)3}xM3tcqy(-S<*+^#jYVvscKVm{SKq7>>b0 z2c))xeEh=2*}VtkGAN4NZw(P(6bX^G*kXe$0!2&%VU@lh;`9`VD9UzgJixGy8|0fq z0n<1G`Zquhn&rU*)&*T_ED}*N51<%*CNLT0#{AghF_^L6NP80sWad#RzsDkglT#~h zuu&)~zMR&4$&-CggH-R2`Y-txcSC9J^FG|z3ZcI5r2YEdVA*SJ`2E+iFQD_;YQg{} zRnu1J-C2?ZUn-lJ*l~X?a_)Rxvi98d%*A=*Hds1*z8l7Ge zDovc}D=>?9uSMSe=<`?8y?f?z^c^uZ@Gr<72re3dfr|qyrmy(`J$MO*AEJLF;zQVh zQRp?7VvpWIKirR|Y4cZO*zE#_;vX>Ez^K{v0wdlHOyOT(o|pi#7{C}117J9ROR0?p z$v;S3i-@?917pt%j34Q>h}SaQq5(k$Wc_rv=6M++z8nRrnmXh0T+mEMrP&SHH%M&? z1L_ecD2k&VOlnR1Ld3fu`j9AVJ}?|%d@sd5K;7c6mLUQZ;ze5j?GG??g5gLS7z*7Q zbqhc#ok5%3DZwEay(I zb)A(X;*WQ5_odKJ04Hn!gs^4wPB@=vZ1q?3cRpijX!a)pGe6A0vq!%IxjtI~(@93? z1#KpRh^i7mGT;eTiLeJ7!vAdM@jOoq$bz#ELL;CQP(&*qAW|*ZwOyy~e~@BtK9RjY zzYx~}*j^mhBDKGyK!SwZ4Kscw*8tfV1FvctNOo`AF#`jwZQeVj+iz|H`^--4R=2_i zIH`;v1LOmHw7>5e3)1NBVazYo#sTl_4XAeL9^i=0f%5h{kIB!Ed{lv3UkCP^2MRoo z`MFT=0InA~=AlJ8uxnwdK)B=&Gz!}wEn{=H%50AhB}j2D*$!O9E3gY-bumCxN(Blo z2OT*=7F!;n(YGB5*a8IyaRDWsI%r1o$u0X56Fjzy+uFv!-;1lI3zh(M;Q{IyOHfRR z1z?gafGsci9Bd+A{&2uF1bgxF8m-_7h13C!>FxVwddu#z1+_%O>Y~ooZ@})angN*9 zE_ibgz2JEwyusu4)dwDQ^B6>+N&rI(2H@%O0kZ=-a^xtv>j|AZfPgLupipEgNIw7r z8wR}+ozegy#deUhxh%lU9Ze0?_BfU!Ts(|h{rl%k!JBpdU;N}it)4-ze}CgJ)0xX& z2F2}|*h~t!H)cz;ulJ(`FlP{`wEA;(5Ti)PQVMCb+m_j2jKaPa@fgts8Ue}o06cyH z+{z47DWw0*(_2>$WGdwYPtIrV0g!IHnE)0HG~yhW3nUSW0IGRK3t(!v0oZ8__>RsN zc>6+r@gQF`$lN$Z0hFnrDYex#EAI1--?{*(gA3^6G$8$gKbS^ogKYNGWk5Y+8C&}* zWX*tNVg*{rcEwF??7qC0w|ovoM)#zr8T=Ri$Iupae(*hR8&^8tgY{c#Xt=o(N+fdlH?&*KpqWK|B3CSflu3v4-EzzGuZoW}_Qta19svLXDz- zFITFT{32%1$?r$QtSm=GeoGfmJk>2ecmFc|r?_}}@yEPPTWQ0XyhoJ=?MzgWs%BAv z>apus{zSA5-!4w2m$Z4d+?7MN`6*+BF8$o-xHCyx^_EcK{sb3MGvk8hOvBqDt828D zO@02mf?Y(d%I>dnnLIedrf3L5h90sPZC1)`#n7JRYF z7JMkTj_mMIGkatn5z)_M@xaiiG|$~oWH8isw8TA?+{zVk9&5uOAstCm|8{psKzes- zSIWjUJH_Tz`~#cb{roW_ScJ9@;;mV`|B%k^v323ldF3OU=A>(C6l20@4c_zvtabll z;xwbN*N<*K+|`^Z4Dxv(-!Sz~)e4ozo3rWUX4T==IVk1kHo4T%dB9`*T4CoEpL|o3 zGX3f7LpN9d0N(X^ZMU7c`|>7gj5?~RHmpQ;`BCBYHWBBgg^#Y~s5!>e3Mg#uYOvqv zcPOGb3{B^)pM#`0eEY&Q+%UZ)4x(axYLHZOUkU;tuCcgYLa?VfhaaqcO@=>HR)M0GmyCKP~ zigrtzQti|kXqlh44jpiIZXG7zJY5D3D{$;?oyWJFTjwb+Dj#n@=tD&Na{#@f7=b-G zlBDh$frYw>qe7Bz9Y5eu{{>F)(};H2+X$;bsCXOU)_HZC<<@yEZ^EID^)T<~gwcdk z9b)dhb+~}De(Uf62lp5_yuf*M>+mUvoA85y?(IMjIPS+4j3&Y?Wpwy?$0rEB<+{K_ zaTHR^$*{#Y1AY9vrsOpSxPJooJ0PdcpAncMndepXlM>Fa0r^mJhg^V)sLCNxi5`b%&vb3w)HlhKsf}IKWNmU+O@BJgg*3lf(}y8 zoYLFxOfl1AIa>JbET6;HSJhNiw4I-8bx%V$IGL;k>Wj@E)S!uNn5vTc5zpE{P1%)W zz#B-6m48qN+BL36qvAT(EE4xlo3~KLnQY?_fzud_a|FGRus3})IN)#NzS80SzR2Xq z;Yr=n^n$@Pm)~KKmoz%)gtq*dr$w^URlNc2=w&s>?2ttWsh->5@Cp?A^&iJb-0z-H zsAZ{zZL3)W|Knlv-Am)X!#3R6GDAmG6rO3c?_TlfnW1TsmZOKX;lO#H8_b2X;JneS zs9B2tdrOI2)#n*q>PSJZXEajcV^+VPc36p87A(b2%~%PFwpz_MM-FA#Vr83OKanr` zjEor9@#pvD9M$uKooS+oZ%od_w5NtGq8%LSS2{kqr;@yxF7;U${xD{n7)6LEzfkWh z$7WryB=GJsA&jb^VGiPp80?IX{PkU!Wyd#=Z0KVajnlK%P0$3+2a===wdql|=RjSc0m0coygUU(H!YU$kkveYBtwkZcG^8Eh^S&2dn1 zv#OUG%d~t_*k&ul{~6}2P49Ho!0X&ozf~Y%Oq0zhW*0Ga^O@J(KQ^|;ra1GF1ApB5 z^r+KZ(u0*Q@ytg;eS(?~sR}8?lL~)CvKRbNC?BmjF0=6-l6G5esj`vx|DIA?o)lY| zlbF&tKk!!x^|tcq+4Dz4g&$t2iCDZM5H^1mIc2|^az-;EivF%v-fY)faMmO$V{F`Tk-HLC|g-){|R&= z6E}U&nUY!h>B*w*6vxf-)a8E}n-XJQ_2aoSC;eMDYF&b@9ihoBH?ng%k7c8U73eXA zgj`c*XJ^s!^$G-7Voee29ygegr^q%A?%+g6%zI-S3PNM zA?u$Pf9k!o_@AZYdZ>}GaNAVsP!<*O-p8}g7r;LQK7-R)-j|KwAoYlL4xlWh4ttl@ z!R&u{$sXlI8RFUbmTsBG(kavtfc>{X*OC0}%1)oyvIWh@m~x?Z86ct(v6b;PV^wp0 z&2IFZ;a=m%gZOfx895$0@wFWyrv$*#11!ra3sOfBADP)TPq~}hiDWQg3MP_n1VgD3S>K z$fW#vOFN7sKaATy9V7WHPX6QBdc|MCmz-eAyJd=Y_-_Ba9p#j~6`rM|ZkQblKOAUT z(i)k$mUf6X2`n8IwE(lO6$GIK5t^RGb%F>DAcC#1kIQSb-0vcQJO{|uBq1+`8E;E) z0wsikFmhl%Jt7P^wVU1k=Nwj&7#ri%7UV2+$2PlIJ2;G73Bpe7o zq%~eLPVn-0>xGau6#E(c`iqT&i&4sVza3qwpegUbaSoNPL#=_W-1`tt?Ktd-7Nj?J z*mlpuE*`3vsvEsoIBQ9|X#T^C_!jucQIMed{BFlLwtJ>~xfjo4O?p_raE4cg1|8m0 zAFk-1k>Q9s8LKN{%#mxFHrlQdezT2!Ezl>PSw&g-i@xIvpZ>sV;XM|~5fkr>?~@0G zyyFTAGp&Z`w{aEJ5XXpz{r4-hG~k=SORrNzr5YvUbYFH-zMPp0MdQG%N% znDz$eL^%Vc*8fzK;<%C46yk_sP1=RbT!uy!6*0umTq<)b3X-$o=SM3Qz3D4TE~v_# zQ(jFqeWJeKlS%wl+5BEF&8?Nchq<2O4UfhT^Fzwh{MQV_L;DpEt|*5G&MV&H6e$TI zhSq;h{p|BQGqRuWg{+Q~)Q3y*;&q<*Kr1B9WHS!(EIbo(WkhZ}WMk2Y?@=s=F5g5c zZChY|+0gx&ssqn+g@rhV=o%M+XguyR4;D$ot)+}uc*whl1SB4%tm3n>0quNCOO&>) zZ5=ADZMoL1ZDDu0YS_hRnN&S%&QcCOwnQvG^buapKKQuHy0p}PAX&p9K5Gdk*ul_f z3?6d*nw-G_0YZh}39D{xo8uuM=?YhT9ugdq%1%I1tx3TUrB3Cd?^z=PL~Z;_OPw~Y zZEnNx5I(-Z?bbGd33y2OVl&o!c?ioMA&JKuz^AITpeJS6BUJLNk%>F_=zH~+3LYZ% z9YlLU&afu{D$5117l@}03Z7$*ah36`F$3^ja2WyE30x~|OG`HbOG_r6jCBvi)4*a4 z6y&Y$`qW6rZ{Wv{IB}sv z;0YU0Fg9-8h4JT`(!~poCPU;LDxA<=Ct2HZ0`=k&?^sHoi^R9ROV!LMrL zr&AlRAKQ!IvOHu;8Ge)uF1w;%Xzh@Ogf)-o(san{p+ha-`T%MPg9f6|Hq7N1>djxD zg!y%;B*lgQyNaN+*%ofeZcLp311NtRY4hFr9?NOGHd!hPhG?7biL? z0?Ne1EV(LO<3{gG82H$f=hXPhBC#F!J`mw$hDy?;3a}NZMC$ z@bL0vKGr|Y!`^FXeB^`n+AhstM6>w4r_ZR6`Yy9AFg7kRc1{hYe`=hx5Tp0y6-j2v zW%F7H)NjAfCNjjeuOePjYH-qUx>jIME;%T*+*=|wIK+-kOx0EMI zDBgv{Jp3l@%Y9%WITP+~pwfxPflT8cMRPWuKmwPHlnh3#5MJFL#LaWrOcS+Bg9Dpw zEG&kywhc^pO(L54RMK~UQOUe`2jyJQGtSTMPQdL*!ng5|#loEC} z-sG1v&Yr#FpD?ZD&sZrv>mD!hMS|6u2DzTcd{$V+Qk(DBvPd+E?qQtr};UmT$Nk zcR7_0oxLz0Zgj!c5f_ZF1EH3Z!)E6ds7T}*!hx-p7ABlVZGJXw za&aJ$nk-e>nUG5^>a|;ZKLK0()WhLq8uEA&(X5Pp;_)h%VGx?vyFS14mqU|$OG_2_ zv2z5mT{4nW(yTATE?1!ORsFY!s~bZ!i9fW9R_G9SiTO^u0rp~C=*;I8oJD79D6ufYtdKl}L~ zcqlUr$QM1je=4M@6>XlE4-k?ypGfX{ZW~R4)?wb|?XBc?;7CC!pp_gezM>O-h8T zTtH2sw87g93ne|OP|myUI=IWqSWcAzHOtZlZcHWO6Yp}0NMs2Y>q`pmKhspf$P##_ z8IF;qgE92{lNG&m1u2t00Vx+w==dGkaLlYRj3MVwRua+`Oh)qw`edYBVM!jjSmjc_(}2K-AAN7~0a-8a8yDh}DU|yD0w;-Lc9soW#TS zE2j`_V%=2iW9L#>bDJL0y53pvD_6hrFRfk+ zsV=nYs&cJ{JlE4)eNTz6!v<~yu6vES68eb}#9<9HnwN`)DHD6UXZkZ|%GJT&cH(B- zB$;&LlHb0Wbu(6g+iUqIf7o)UPmY#F;)J@iktOw$C>V$;&^c|2JGJM!bO3-#L0{BT zI!#Dk)C0gbxA0#8vjAQa0DPSR&<%j1&>E71eJ6jBOzx zyyXVI;aVhs5iHz>SOBIMULLMo3{{y30ssX73~zy@zj6~HEL4UpQE%`%{0eJy=>j&q zov^cP-LW(*hKG)Xe2*X*XeF+bJ;4Ms#?`2OEj^otkVC?599g{uD5tuJj{63QoS zt?`vAS09EwH}w|W&S>{T8gq0)4eWnRE@2jHZuhwlh^FQiowgN1`9)^>v+YKv`V`Jp zhEFTW#cZpI?Cip*R~{Kw=f%XSr@8!aYfSzcMjI-UjpZmzHCN@yO_RSOONYwAL5vLib_`_;ATWGkvp>ob7yblxGoK}t=&ui z+Sy>-opcHl`uGb?qR5Dlc_V+tNT+53^FlA^KtE;b%?Gk|w8j2dT=tl#T^%HpN<7b)`Dn z8Tvm*uPtKrp%wnz7MRx9=NZbaEZtFT!_hBa#^j0%s_F{vuN8;uYjnA3a29@Pt;}jd zX;_ydlc(Lg@RaNO>c`HuGxS7%K6x$1ZSdnl-7piE75OBRrK@sCJ#s5t74sdt9ZY2iUsrAIiAYGX8`;HeX=ZPd~u_Q$@Ettu*G&T6Z9+s$_N z$UF}-m7$urw_lK>hw=>`QT>dlFkc4^V}`Zr#=?wf&K}10;FK1E|U!tHZY8~^cz<@j5iZPjv*W}Ec# zb=B=zOUAPP)Y^Vg*ro4wq($(o1bQfhRUmS0kp@G~aTQCBYQPhxj)8(Antj}ryJ~se zoyDHEjBGbG=s~4Z)PJM>BFP%>kniPp(*fW`Y%#hAfSFqW5%j?6Q_zD$a5)8lJ0`vw zMhhWRO^f0UMj(sy0XBXKMhn$qN1Y~xCuCyyJGdUBa=_siip&^O)&)vzK4-1 zA-zFDN`nn2#>|wXd#M!tX^@r4l>p!V&P&9n!N*LlUu5{L?~{6WD5l`R9eCs1F*A*& zt*A*wap8%WnU;2hN+ZcP)deM9Lk=NKH?42Qo?N`#Eg&sTJ%ACc4D*fT=Oo2Kg#X>W z%i#s(+oC<*UP(E2k@S?C)}uSGZ2J@RQRU@y+vf z%=GH?a<{2{vOzaLKbO_CQaox=5w{(8J^mQ zh0AL?YjGi#xlh{FNLuuhLyW$pQc)EaF8aDR1zLJXhgCbeIE*+tu6U%7dy}?Mc%Lk> zXL;>Y-xz2L?bVg&9)3yu5VhO6OgydEAJ&~{+dmhV!1Y7tnJDKw&27@CRYE!8IHBIw z*vWfKisLE4EM70kqRe973X>a=8@=2Hez2-=EWwT6P#m1%fenRPj$FXUhh&0FvYtQO z2#QsN$&73aUvg5Y!ISip2B#;(?ACI}*(yZcACeqtZj2|8IU9px?FmgYUGGV z75F`95mQf;bnp8}%de-<9M(}*l9=n!mGKI!IjuJ$=ZBJcC-mn}+;KDb*_USp*1ucW zu`T7#U%76h5tM1R*`;Cakif%{>q+5T%Dl7f=dV|EvLTFr;P)&Em*?#OJ3z$0Vr*2d zP1tZ$2flhe{YxmM=+86g`jI)SDz_Lq1Q+k?A}rJvt@rk*+4FEr8GIAbT0DJTHGcWh zZ~qiWh16SWw{C>OB|)wEWSX42;t7RIyq~b#2G+y8fsdnn1zU8%^p>$t&e@)vMbe}& z5pA=W9JtY`vvrX~ONdp~%4?q!_-QjmMRNWfIxLg>ZtAL!&5N|jw>+%Xi89UWPxO&P zzFT>X{Qi=CP&f}9KE^RCMQCRoCjFgP8>0dSEZ%8on2#iJuUqZXguE zTyo{O)(53BNQrdTRrVWg`>4>^F2)}~B55r-mL%*_f9_}4+-IjA5<=(M{P`xk+r_Kf zf^3YquwzF!t{*pU;7r_GB1gk4;@CW#HQ$0y^Fi=c-T^J2iwjpbR*!p`R!ZL7zurvB zP;`l(W3>)2m6t52wyMMr;q`O)+At?_>ZlfRbOrsXM=TN3M>WWIK zY1$U2j6ql`CC~%PjjjKEyZkn-Ul01`5-vGyTwX9;q~5NJm~OGtcOZU-nw&B)RkYFQ zOKu9TY>mD@H=gO9i5RGO*o=z4SLL<8lvPzKz++df6m1Vp4u%r<%!P~Ec-+52!n<%; z?W6IvA5r3*{-VOgOMm>Ib;3Jp?|0#ozrN5uZ1rc*zvsUy{h^)Rqs!g9>)z01!d97K zMw3+5c96uEhe`MEyQfNURr!q9;va7i$!TDr2z^}l-;DBobosDfx)2sQC-y9B;2oC9 zbyEX-*22|H_*Ll-ZE6psN*R`gTNrh+dxheI1ViBhW8I;}zgoeEyNuDQR$H{ZIa?W7 znZtiuGug#WL;NbmO)f&t+oL8mI89awtFkoI&z{gV9=Yx$ca%TuT)(1Z%ahb9AfD0w zIx1A~(Ev^+)-(qXUB#{ZqS-KUFYjhv%)K>wUm=HK_I_#}Nt7aNcOWZlfagi?U&5$9 zgF|K6@m-29zw_xhdmo&?|7jko-ky`3R~Fxfh)5px{x~%1ef^&cCs$M-s!z;hdPwg} z-u}Lk@JgeD9AEYRjKf#8>JTmQLqjO~2P|>tWX? zuexceF=1mWPDR}-9TjsDx~qB=uN|q1D9<0zrGFgZx*Hd*+esX)Pvajpjw>6El{Lnz z*@KmJiaB&A!0P{hv9MGsGU>CCa#4hi6UY+cWF_1g(hRUF_Qn0*=SqN8t#pMElm0W( zV(QSuUd7@Ep_D|jMYvh!SVMIIR!hP(l^!^^{ z&t)3`QSgKb#C4~SEem3C|L z%%A^zk%kbH>W}MpZtohiXA-WieNt$5^nVD=8ksqq^v-k>^HuRM|w;WZ$FuW`Ez%s!GSTY)bdr<-ui^3ug}D0DBm> zz1INbuR03ZOs-TGZqtZCC(*u#N!C7XuEa z*zHZZc%A4j{H>H?Y`ms|&-@oMB*jk6G*=_uY_n}zgwUTp@>3)+%U*Mw z{T)b@tlv`hGxnLB-`_0Pgv=9moz5r5ac3`M%oj)bQl3|Z_`PnmoL+Pvm|BrB8YB13 zek$YOBTMUyF7Vs9EFS(D9-#3=a~{#qH_H8S^2BYWH!~}cUJ$31z&edWLP2GAIYSm z=hPD)AUdu2j2Uddo&IhXybh)~6CoKLT%SMvWIkV^HGT7eA#u5Sz5bs$>igh%5&nGC zyAnFha4&kltPbol$)O=3e%oE6%{cR(`5PVCJ?f0WJKYCA1+s`pM==8>rC*1-%*(QH zv&(8PW8)8n^$?Kd)})aReF^y$rXL~8!_6XlZ{_HPEFZ)4qe5Qtxta)4ji?@cvfW!J z8aSw1Ck8k~wZMr54*jhY2OO?jCmuKwmwDt_kCR9qraWi3U)>T|5+VDR$*RXGyr73T zcmrIIdx?Xsz*PXQ4RAf~17mO%fNKL>kBw$CKW**24&4LS5uIgvbs72TRA$2yIx<#i z;d@J=OgKBQEeI8`mEMQE{QZYGI46MqzkgU__u}x~AG1n$8%!Yhh`3 zY1fzt*Y7*$mVCnX-c&y88aNbS-TS=qFWjg}$CBY0 zl>Pg+veN~WvI1o?vmG@vx0GB%Jt}CAI@9{C#7e}*ef9zVCtgy;o>h1CBw#l$&kLUo z9bA3S(_H&Y&~|-n(6}YWHEP2-KUU;e@IB>`<1np6xt{&a8}r70Uyf}?#O`)j9wph7 zJ2^h-!)wm0+BgrMf9{n!U6`)wNS~2?rR(+byy$ty!F65f@cPWeXw|+dsozPysT`wU zr%D~vY9{dhB3}NzGmpUIEmZ;GX>}s&Dm5be&D_tmT)J(R#oBG^+yW1qGE7~s9`|hA ztiQ?bA;gL-QBRKidp|(FS}cWjPLD*pZ$&(!V8=Lu0hRJYqCQF}Yo@k{R|752Qz~P# zZy^vdztCKKcDPk==H(GIeeM%K?YN>@35l_f)g-7tz!Kciz*DR3dA&ECMf_#p8E$^5 z?n=-~1iU3RlDYCGGV=w*`#EE~l(z&o6L z(MK!aSyv*z4x6BPdmiz+e1iMA^Y6NXM4TU;IW?VItA~CO9J`Qq{u$!?x;HFW&6cr9 z?fSBTx2?Wd)O0_ov)6Qf>u1vZGmB;qgLD^2-*ivKsIy^KuJcBxD`LCA_aOu1ii)?u zNpkHhHH20>r=OZJ>?XSGj=Bd}HP7B+ z0LbkOa{mP&D*%&jb2}>}Ui?#}TZ?*n_%*hs8%%M6sq)(ZfLqf803HCe0f4~;0KEX< z1Hj1aeCWh%Y%yCD){O;gl#4>5XTPU39o|Jh{j~kE@E@POK}l9H2O44LL^lZ59!1`T zDX~b*WqrrPO5sa8eno&-MGnbq<|%0xX8QXZT{^szK&KF4;tgC6tBJ6cPyV1!{BRlC zO?(fl@^&X_^%@yk_9(2nR{H6)W#R8UbfS$NPSTdtO13pDD&B21dUSZMpjkXXivAdK z<`1$mpBdIJ3p)r%)HN_@g~C2%GiVjSzE2b{LeI2rV-J2(Q%Ug0^>wJgVwsF6yuc*1X=|@Y+NyF!vyeS%c z1<5Z<)xT)uQ_!8QM%r(2RZ z$#Qe~DHa;%n-yLD zT|;g6Y9lbT7NgA`fQ+fhgo9I$*6&o5ht*<4+5t#M-r%X=BiEL81B|vu&vzN0FE8^B zquTnK_HnpKk>XGsibHWPF2#yNai_Rju>ysZ z;%)_s7ccHE!QF}lr?^9L2qbyW=eghCTHik=Gkf;THP=iCCuf}vmK=s@tf_wt(=bUh z0k88rMz7xGM3cAEuiyTY@hd@kMsTOuz4{A_*~@Uef)8G)mXZSwXy_lqTuhuW93?{m z67J3ox``9>dBl5uyUB@(jO=V3W+=^TdauXYeF3h{FFH6s*hV`o9>3(p<@XxTp0`^NE@n@sS&?^3+n zu9@-iE_rf!UR7!KTOO8Vc#J~*G!vQh>{T~m#4F!=$v-+Jc`!xx=kQ5 zdyMG)u5~a5eYYfiq$)T5`vF65gPZT%dem=9VraK3Mc0%_|MRuxb20xADaW1D{lDdd z3a{!(rKoDvw-AgbR0T&Igj>iM=fYLzzg!R}eH$G11OqE+dkz)<&_!@Z`hR1BfIKG= z?BfsSgO~l^kCsXvz~WUpJBZv#Nf%%7PlVo}x)PLQxTRV|ShT?$od#Z}xG`RQrz{@1 zxB{L_c4fp%Z%F^t{VW4TydFI+^e89jqQqUNA< zD=I7;#uNH^Us6S!524UM(HOTufP4K54UST%KaG5-zqx_{8}99Vt=w;f9zi}FT|t14 z;MgrGd_Qc{8^c@v0kQmG9)5y&`7*5%K}xuNj7Yb6t~r132q$wES~mct2G4x{Jl%j) z07rny5v0F6OXhYvu+8Qi+{5V>y`uU1rdY@ba9GL-(EDT6!T0)NZZ&pZ%bOq1+9h@3 zzAhB`7@O#y!T%~PWPjF+GcuSwB!Cu@*PHa_ydh>M4rmn<{j{O0BB|&M3*<_-@h7S0 zC~939)UQ>WHZe1GbU(TaBYr^2g4()G{F>P!bxQ!V z6P34|RK}c0Wphi*%Ce>r0;7|Z@S1T|ofc~_HD*b#5-JBl_Rl+X> z+q?8JM*cBx6ty56IOI0U`|Af$W@)C(M=7Hm7Xa6va&hwargT6$; zL7J@<2=lZ{aOTc1xkPys95IH1FhPKqy8d44uddvzI=eO;Onz$?%e^6R678;z7Nn-p zAIywLe0>Ht8tX;1Tf5G~+MVN4u@cI?;*8Be$ct51gT4jE3ad@rnlk{92SLt+u{x0oOZ_elgB_ag%M_0Sb} zdyt`Q1BV>#*n%O8D=So(q?d>@DAE~Z{byef-O~_h~XxzK?9sy z21~ZqX~fB#)I~(Z9ebAx-tY#5-#-L~O`BK4e78P6UZJy^#kRKHv84nk@7n(NpdsK( zT$5Qy#6Kbj7x2dOjlL6nedLxPKPdMoEmKqh@+R_RIzM-%hY5K3j&bXICAkucaeLI6 z&`c*2D7Eo{a!XU=OfD4KaTle@UH&hBGE$S~Bcn{1z020|Kly5i&7{Q zS_gcBsHOm6usUsgYD~t=C9=naG?ftkFz&#mr+DE-&n1WPM020-)|c=L zhU>{b*Lc2+xwZkMK+c4i7VHi`QBvxnnnC$7l_0@L=e9NrgGvNHKZ}^Rt(K4D@;@nI zj^6{%oA?kI!e$g`2chro9AGj9a*q(^GFNb&Y2R!*zvMRiR9RrPs*NwQw{=3t2WsU= zdoc>T`^@}3MEx#Rb}_Z#VUyd}Ukf_V7lU};%e08V!ScBTb3Ag31+}L9UAlV~XZ4~x zYm^$MZhX;O`xd?5br;XWyS@Te2P$eTKH0pz0@?e&;(qZ{>)kURZdV@^RQ)Ig@L{hS z-s@s4P5*4Z?Gd8g<(< zSm=7$tkDyTbNrF}0HHn0a*$^DjE9NcCi9HvbqUTMK-k2ejcF9c3N21`%NhzvYv-MI z8=7y9LB5RncIR&H|42ggV~zc3j)^TS6??m>?O`*9U>9)=^&5$|jxNsWDN}&}ob(iPw)dI_(!GF9` zJI=3r#2g#e+>0Mq?i6AyR)3_GMLAYP68WE0bC-4F7`+XOOP~k><1MTz8!D`>r+V{( zUtDATe=ql>;!dc8>!sJv-*L6Qa?LfpBf0iT|H6?cxO-qPf0}=L!s7Wa>QfNXId{)9 zveE@XgZY^V+)1K+!AG#<0Jlp&Y2zW<%Y&-?Mev-tW85&t`jA1e93v-mW}|Npc2 z1(N@F7XLWNRBg?kaWe2dIV3QHH)w*5sm-A|iBLo><8Paenv1xJl0;^4NzO%O5h;!i z%Nyq+>s|En3^9hAN~Wgy(_5UI?D;(1@d8h1(R;zzQC9OqZDe(FZ7XO0YKI!$s-5>X znJ^nGg(gTUIh4X`|8h9h@MS~XatG;K&&5i;1Ies{i7XM79B^lgOnA++xMq*+w&fe* z==6WBxw!u%;?K(CkGsR96Ok}vUc7%r8;OJ}@{JY)pV5M`z&&-X*|OIW9}Q!chtT62 zEgnAV_5g8GB$2)xBkjll>=XU#59;*9_lr>mYh}!zk=$CB2nZ4yoFH#~Q2DN;2?&zW zqa%NaYswAm2Gi5C;JqezFH)z#xHmvSkV@nvr>9;?VBz;GQgIwbUe4srTl%Z37X$=9 z(IX=VV&$yQ`4kHd@i8V*Bhfia zM^fS=qlb8LNy7ac~}oBm^$_vzvb=yQZxfwei3{Yg1BQ#m(Ahrfp9Zx2ep zd^8ktmFv5u&MDm~eejX}neS&I`wrQos56^~niKy6_Q1z$wr#K7&HY3JN}WpS zO#zR$=mor_4Xmoo-6y$a*_61`8U}}{pK0>a%!~>_kbTgX zFQ1|wLDiQCzCF;T$DL99ng8^f>7RDNru$@w-IHTYp5yOMUfYzrp$+jp@H+Mp*jRFL z^Qx@4tEoNts@d4>=qw}Ww!qCt=_1zO6y!Ucw-R8g!FsVSWJ2igVe+}ddGH0^{VO@1 zI$RWXdDj<>va!WP%c`A|DH;BMHyFiXFXvo$+25o7YYH{E*}paVcT0A}j(jOdepK;! z!2{YJdy^Xcuo8Q-5c~j*^?LOMMks|RbTyBfb4LN-rwM+ zY_@19kvau(@-~^Lzc;;$Dv8J1`CYNG`8x4b!}LbLNz+7%Q@FlHut*l2v(Y2K$zb8w z=-^`G?P0y2lNrRj=Cxe4zi`cJU&O0+jhu8l^;~28)L;9zk?I_W3M z*Zg3eJ)0Q^9hmLhG^>(Nmr)t_;dZK`khFI&wTat5sT({G@W&dRbPULns8F)CU0!v` zmA~R!*|1o!Pfm69L6og4s((EVwo-6>?~#=Dccj`#F`^`GW=O5Ew%j#sYP0Jn#O9jW z3F57BAYX0dde;0exAR<^Q_u6Wx|vSjC#QtN_kI$xGL?TgL#{0;@c7n$V>3pWoc2vM zhz62uvO=Vy^WbT4nhWk5#J3$Z?7Cwz_1`m~x>scCmH$(<|5E}dpm2ki*eoW82s5=q zuz@!=i}@iUgd`u~B#@6VK9xbVD9Rx8SHs|*XLQ{>G6*{*8N>%*nLuJ5&Glmj;aUKj zdr1V9(jtP=I1oTLb%}tR68al(6!VRFWpqRg>kTMH_Yg7XCxb{g4ufx|u-t4~5<~L< z4XOn;eu?KJ==Aaty+DAK63558MPO~w#HFz;qxiD_jbn<$uy{hXZjPdDNui)?0k*<@Y0aCp-qqf3LQI@K})Am#3tbF=>QDwh_BUmkviZgNT`#!sKTm4 z;ZG!KtZBrC=EmktL}S2#11uzfg$x>=0gb@Y-6`zYtoxd+_3Lbu=cdqNXF=rx<#96T zw3y>)7;k?yLCRfA^y9yRLCJ>HySc!KGnNnWiRx+&pG4MeGVZPuhdICkW%)UnPa{G5 z(_}}^UD0U`=3VSCpXL;p@8+B6gY4q)gGc19EAGt-vy+fpg<nK=%nt*J!oWLdh}3 z;6ePsf^DGLY*s>)wy{7}K?@c6ujb@-_P5+i|IZWQ;>lbq$5kA)!IfFxwy36JBB8hN zhE+18I=3UF4{WeK=&Xf`2KknE@_c0UALW5O&p34!ov!QjD1~%9|3g#r1U~var!wDw z%CJAMjp%m2pqC7eH)je7bsu7LKXR`YidE`cIp*s7>po2!dJ7ECK9$xfIRBwl$xG1t z9e5qW2{ZdTeh4mVW^!FH^p-v*AMZK3*0Is#eEPLDO&Nj`)I97fTEzTQvI)<@V4BPl zG$?uV%rL|QqkHqn5evM0>Vi| z7~hfKYtcr&LNyGg_^b4lt!Dk#&LP{c!nxN{7nxSHgn$KQlwDh!18JT1a zHnrTcNX+KXlJlB!z9Yag4*3Uc&H|f}(-~^?;1TC&XaPRblyfq^yKTyVwDr4$%O|>5 z*~?yBMQYnU8EOYID~yCdO8%!wmby=jy??O}aJYWs@FK}*wo=L!^eOfshqk&i|3D4r z&u8R2HB+=->PlSt&k+GQM6)rOwzmGNZ}(?t64FkECS!Oo{|Jo)b`+#B2pOUX?G!+S zSbnnq?=8R*Z582ty>#O@d@kAnEDR(RnSpQkeEWD&Cr=H zkm!7*G2Y-8pojiLq6?D7pv9*jIa)Q_HY16o!~c#E`WBfkHr70bR}F&^pB^(*6PYee z8iN_X05jCR-j4PXRYp1(|08YaYYg6(GMd5ox_69>m9dP~(ij~0^jM)Y$aKxp7+m-T zSfT%r>AIycc<|{>RA1Bxzk=$IXU@v{)I>&{!V@GTr^e)AE(0lp4y!47VXq+gH8D@> z)kVYV@413#lsDCbTQQ5ZhkwOatM&3F?1Aba&j=B`_Ci=)?7L@P1{g;d>$pPE#R|WXV8* z6%{)(qxo$}3~BE5XJ^Pa-`2AV6q6+{pSg2!V@hDcK>`1QyajY#u0CWI2?sb6G$TuCSJhXW;$?p$i`_wJT*d{LGyG5!O=si&h`jay5ux zU$=mz--Gg&DO!q_B>tvWUog9`RNtnzd>_77Z{3isI(=a6K5Hcx4?2t6z@9Qxg^E5{ z4Z6L$_X;XKX=BfBJ~6x9_{Sb7MkwL`Xnvs;`M4>PA!4}md5jGnftLOcv1I|=3SIHR z^+!eEz6T7elwCsgNt@MAt9yYzD&$eC)1SXHD@p!Y2j4RkwEF4%`nHko#2Z6*jY{kw z&g(A8h9k>{S*{~E#cRERE^z{f2>mtxsyNv2kT^EO>12fL;jC&aD#?rV%k)i3W_*hF zwO3_!X6eq}>sDb~yfzv6dd5J%>^$7{5Iob^e*xF|*QC;D$+%r;4__DhKje>oQB}t7 zmM`DnK@_IcCzr%d=V-lqF6|DAg4WEVxL<;^ZxA%@`-nGEk2kR_uJ@3|_`rdBuCnku z>z2o~+kNd7ZM9^MN6CODoYeglP`huf*XQ~EG;QtY#yQh{!=N`;v3DwSs81QSHAVcO z@a1W}Clk@9g^Ih)sUc-B*st%iE8=PPk)_lKzQpAK;tCq$lAxe(D?V=-xz-lEms~h~ zqcq(3>P&Bi{q81-%@6Ey=}cbZ2Y<*~ujtE3hM-e(>c;!4Y)pgg%b@DMRYpS?0k?75 zjM|6*j$3sw)52z+jl(|&^RlhGlr&~pBSp0fN$bla2^B{l{lI|hYVBeY@`M#*w65wV zM`9}nQ!%6Ks=rQ`Fq(ANGt6T1wV8NFTUQ|p4`p~iB4yR$)bkkM=J*a(n3{EKUqI(A89v`u zD7P>fFP!ryob1P*JI)`n`X|6s;&PKTSV{NMzC@6Bt@>#DBdBVIg1WSh8iaJto;Gv* z!Rg24FbJ&V-`cl9>YLzeM@r=tEa)#@yV%}9T=vVPJaWvl;}cm5up{l)D867G#v)IM5XYe3L^jCO0veR;zG1vTXRp{-f10k zYBTG&xvD;Ng(v&>^(%_uSCw8!I;*Zp%8q_%9vS(1;~;SoEbDl8hU7nW)1o`xYHc%H zi*vl2i+8BqmtL-I++Di6IH^^ttMxjU(%maqvVVlfw)?uCZ=A$#V&kDpl4W}HXMje$ zq(+6s#I%`lw{k8qchKURwwGp&*7K=?Fe9I-#Gh!fGQ|puqqhvckamBA=n0^0O#uCs!l(EKnn9v% ze1Z6dE^Utte^2^t295Rx2_x=>3=5eO`?nb^T7eKstRpo16{OIh7u>20wA4s4D#Big zxwRkY)GQKY+L1$>U*vO3+hgO4qJ%E;ByYG^{}4dOSVxix_%=gBD}aGPjVu%MZH9qX zAWEXw{v$Hc3Tmiion8Cam2WL?gE6}-pAEmh@xsD5vox27z6=dV%C~YR%$Y+|oc=Z= zKB~adtQ8h>`-Qu{Bd!rk|@HTE$xz| z-0I}BytP)pLDEm%@3-ix$9||I^2C@pSp!U1ixJg+2;Wbd_LvBo-7!RZ`m?#juuoOo zEvbERGyJ6Eo()td$$Wb|N#$E(hokDR;jJhwu-IP2;;(*a{JCzN@^Qt|aN*?&XItel zIKZguP##kMwQOgj<8nTf>FLJ|Mwj6+z3(g9EXtP<@H8Ci#HuIYg7114_-M?q@d{yGoY-YNGP+g9K4nQdG}XPM3#A3m58tGM)T%Iha}ze;M(oxHf1d2hoPJPCcUy?Md5)g$H| z)A^Xnugwumf~kMG_FLFt*L|Y|Q!~s|`K1PlZ&;Lt`%_-_kwc*vcEVlK!u?^}8>FIs zedOx+_uVr}ucvtQDhp*;w1xYd|Jx`##XcH%W%tF5bV`%a+dWx+wmh0XdrPD)_f?L& zy3rpcJB|~g3wf}+-!R`XB0)u&yKA~N>Jda2%!SZDf8N|-f$Ywy|C1~weC~qM=s#Lj zLun9fh#lY(#B9fC!;+4PKnVff9Wsj7=M)HTH+aqE5Hx091x9lPLL?#rVEi;-H;~TC z@0ypbA{SF-nspET4TKpnu70F0iXJCPzeGZrB4)tMjY1N#g=7{ut-bX!}$NKP4NH)YFmEIE*baEA1T#dj~(w^nhsvoaJm|Mc_ z>_7c>dM@oe0(AjDe_zs89yB~);Lk!o28W;7agY2XIKFf&4!XW&$CWUx76UP0YEJWt z@A|3W;+ZNz$DdC-bP(o?h^6d-T!*75kE3M=n_mc|-h8HgM0oo-f?FHL-TgJpb2UIo z9q+7rURD(|J>;Q#ZV1O&f$5yL+) zm0G87+pWpLx!(?WBh1^z5rMg{w?YaW*vw#0_XxASgtl`8O#e}eo{7Q1acuYI)T1p$ z8{rK{k3fHE(hc|2{N)6L)bD~@b|R(|tvY&l1oP_Bj~^~#n;f$wvI1Amzx2$zHmEs( z&;5$~%UOQJW9hTOwMp5KM%$GC{uGz6zjBrkzQUET{zoR!=u!WMWZo1{Y*Qt{Uf3u~ z_^{L$+Lbly*O9(q15TLr?y4btEU)x3HZeSc|80pD*+XY?%rCl+^fN#D-(w$7Wz}hF z|C(3#tX;;HkN_Hx!o^Ov+(pBaS=WUS(QVMQ`s~w1yLOcEn;N{OAJy0sxC^QO_t*!G zJ%nHE?d2AovJB#QEV14DNB&umRyh2sg0Is@H}HWakDWn1Z{~yRa!KsUb-pCDE)e>2 zlKa@|z-@s@f4!z63hzY8(_C}NJFhf>ra@WBwfvYhY3>JRu4n4}P=D%tVU+U9|Ug(+W55uD;b3T;)9^ITRe3D~T zC?G~RDj>nXLH=$jJ%f!=j`UtgPq2UrzY*oTzw`_q#w+Cavb4WP6uoxYRexAB8X><| zqdmk#l@I>Kim&tXd$qJC3C7CjR{CS)_vW;3u~FlKfAN*RjhvF!%uKS!L2V+B+K|?y z!pKBWoBZ zDm_B_52V@G`EM0B|9R@pqnBD%46QK%3;P9hv(?5HA;jaPtib9Y>+}dtK0$<`b{tFn z_oo*@406?09nc-_Oe|$iM8X;(_9->S={xw7E7-NFt-oA%KOT!T16HcekIq<{i85SS z`%-PH{^bYtrZDU`_E;|B^9o%rjh7xVjOF@~_9H`ZoO6Qkczic}rWS2LYMv0_VwGg9 zk=4NoU0;LsYE10^2-*MrlEp>B(oE={3Or1*e(d3?I?dT^MojWEpvA!DB=DKnnC9NF zS+c{enTaGQ_?J#L6UH%qznOD)^H@qn_gj&M$AemHxqak;U&SQ&B-ntkU3XExq07jtpk z{%kzE=DqK+&e?ovMUs`0h1*;#SCYZe) zg{Ef?R&3vxJLKvn+Gll%LB#DQsjk=TA~Fspsho1bohgqs19Lm&soxO$A=^zsC@7fO z6l9SER{qi8vMfKYMvHge^YLz@e<%*#;#|u8Y#&c|1od*Lt{XM*cvnpH&2#c$X$wtq zsG61={%j|HXK`Hb+&8b*gsCkw!>qclnZ@HBj2vavH;==Er48v}RyBQb5#rf5zaW66 zt)EBmY^D8y>UGIE5rBqH-uuDg*h~mZyJZ+i-LbVeZe9v4GyX(U;@YJ4vwauN}6kz6tN(fn=AlM8m zTeTr6IcEoWO#?Ac|0Vgl^vz2+5S6&501Gi-!P$bT-7*Da4gx$Z0mu-*S)378HqHw) zp-<}&&f|SH**E_(E3B--mZYS*`4O=shS&PGzVfW_le!J=ueDd4^vF1hJfDztP>DUI zi#`$%43H50Jmj@g8O$C#ee0Oz8E-X^J^b$E(4KOr@_OAfKE6wg()Qle{9u*zVe*k^ zKDfI5Kzw?xrCe!VcL0R$j~Uhe+@UmYS}tF3&Krc=6((o8?@H)R>XY)@xFbt$J#C%w zSbdTHqmYwORJ&l4;ylipT7m0lPD1ZLK-z7!e1WSpu&8oO`t4*MBexFvU_OSm>3QGp z2A@t{(Vsw2cwfwQ*?o<8*sWneL1e(MuOCmIry=Hfw@|4~EyVJMPR(>_0@fs0%V@dw z9~%4ltSRS5M}oW*tgvJ^uAj;h!Zm5#$qCbgiVGeAqz&2%l%C7WI1_tAdi-K|zpZVf zY$h!`zT2{%>Nl87I_j;t{`8J7o!q-VJ8E(42k(S-Ag}47-m@T4{Z7x4ubr?>hoQP{af8sr=C3lGY{XW9_LbA$K7mRTuYgQ zYAR1ah_8>ES;#dAp9|p7pii( z<22qpJ>blON$Loo3op*1n~b|vU#ZXPufvy2&Z0xQp~+vNJypK9g5j{r8j1DlNFqd2ByNqzSr_I6&#VXxFJ)4*VJ0{o-fZ^<(-hS2SnbA9T_xVy95&GfXa z?e;MT`i^_*=c?=cO_H6^&(+CQ*LWkT$u&urY7tS11?R|j8CGcO3%#od4bs zp0GS8@R-h~>VT|iBSPBrN9aA>EW+9nhTjCzoCtd=_l7}#Oq}n}5BJ}|Ce$Wi8Ush@ z6NsN?n2g9{MEXE%S-eF(-e@{7Dr{nb3-=mRmta zkr{nbjbXBX+yvvT(NCpgrbDIUN1&wOe9AmKQ1YORrR<=TrA!~dt9ebhD5NaWJ*0G; z!*JE4u60qCQM92B5azc{_P;5Zt%WNcy9lMsXW25B2|-k~xEEBlrmX>67f22PYDELA zYXNNxO&$P}83^zI`c(oX9}ogS5Y&OJ++U~HX~2)A zQwW(9BLDm2$EtJ0?#{t&2MqBXV+g&NE<_~EotR%i?Fometo)-b|eeZwF}% z{w~H;4%MDi?oU)z>YcvP3mEaMQXi2anfGWQsiYi|`EKjz?Kx2AfAykjV~5V@cVGbL zja=p8Up-DvXI%--XM@*H!W+6yKR%`is*jb*#}XT+=n)&t$ptJ1OL9aox@TEB5#A$ny=^Je0Satb37a%69lYk zq~`6qzjN&yH;JKXyV+nn)Y)KO=GkEF$|97~#5cPK!bd&*4}roECG>X!Nx04xNx0Z^ zis*U}Mf7&3B9w85A`~n@&ID$IH3Vmag#fVw>?c4|0=h`sA}0}7yqJahr}C~-xYwtG zFt5Kl3Zm|)4AejS6w!zHXM(is)u`MJO$Nv%vvmM?KdRM?GP&UVm-ly#4}s z?j;JMJn>1mRKEZz0H|y1I;DGk;*9h9Bmu~42I|A|B;3kTMf823q3BqzPcMPOxkmrma zU??2`p&($}9cYFeDD!@|E5z$pk0N@SoD9u->X87F z#g^ut%-5o5_0Os9a78@7R^3nIL_;>KId;^_UYMHSwFSXn%}=**JX2O3b9;h2|C!#u zc)rV%em#3j!@H@2fW>W<6b?h zqwxezoA?C*QCB?u96scQ`UZ*l&sI6Q2T0$?2ph^QclmW+brg>+OqEBkBIzEqe&oz^ zD?8qHS8(^MGym-#K;x;V^r_nJ$(ZfjE$^r~s7TY{WBK3Q4c&ia6s%wFUXkA9Aiu2n z%~kIEIq{fiNxEuHrxco2RRr}r(1J<6)a&nJAvHMc~e>Y}DugR)Yg^g~(rp}ONlPmoMR zruRbp$ForNDdPF19txR*H98p+L*{{XUxN0pBU}SCQw&~R{>F+1P)$W~`%)iD4--YB zui9sCT8wybS8MJfh0B?TxjqIQ>z8-QD3o`FVLH+z49d_r;RzQ1#h_zq$jF2Wn;qKS z=|1IQ5TQ-jvtgU;abYW0aTLXIGk#@&rAH%Cly%h^z4PyKiI~2<)|Whyv9EM-jIemr z*aba{Gp~QBAw@pmKk6nRnGNh$LR@-gd2Q-39CZ%^Yr(jpr#{`;z+ko`NxW3AyawfV z4Nawc8`(Yz+I3$QrgdLEK%|-0TRsA!!?gaP{q-XHQFmahmpO7$4e@N?8Nbqf+-t@A z;#Wma?SRm~oDJMUKI)bZ_A)0;^)gRXZm$UjP)n1~D~giO$K`jOV!iU{6VDsUi=I9M z3F;L^Pv=-gPq`RHPw{}LV-`K}q0R=L01^RE(+2c7paTJ5D?qydYbCa$?hQaN=*=67 zo;m@quFh;=91sx?G{XH%wa9nWT>!A)Q#645etKA8UiW=ZyMA9N-R_2!RO3AxcmTv> z0Z3VZmIdJP_lmOA7UDwwKv^t>QGp*~((yo&Ntea$sbv{)uy^|e z0~t;u;LA7WQN=FD^3DdWe~i|c1~dzF9BoI}Sksc-^b~&hCtAD>TuwIJ|C!$F5rC7T z9bQ^?>TJs<)Lc92C+$O|^2L>q!7Zz()|0ty<$cO_^JtN$i7i{6TlnbU){8th%17r_ z+SU>|%xb$18@)A}x>c7zDam2N2W+R|1ASS^4#$6Od!u%G#wot_HKQ!G?PZ_ml9@N_ z_f&(-+*E^TPF0cOnI2Tz9m=on1oPyd)E2nR_xTUe@}yQDPnU-erc5L~Nv{1G7Iavw zQS$NSdQ%9PeZ7cn*FlzZaf*?Pxyy+Mj-Y{%@M)E8{)eS5kUe)A7h93gmow z@b{+hlsJ-zwl3La+q zobTHS$~)+K$~4^$wkvUE@VN{MT%4D1mAsgvRFk0h`I&50n&p3dkl`P8!+%TC&3}9O zb11;$gSK+7&PIr=YFPq<--S2ttrWpXfGf{M%Ty;M?I7@7*1FJf1MD)2_PCsXazE8<>hXIp;? z54Qfrlw$oWJ=ppa4$-Kz1|YWd_*p)0ORr1d9RvE*RE-&U7a zfP$)pk=!cD!8d&0YQ>e%X1mHPGLo^e8yLv#d6KY;(y2pjRQP!2<@hvtld!$`ldvmY zKJe9ki?XKeqN3$+NDdYO@Mequ)^;kPNtRs4aC;^PKe+FFW+87~jJN(v2XOKNs%~Z} zqTSUokP8pV@wq1hWQBkYGk|QI8Y7A^&^$B11^d5>IoUI80OF6}wD)|q7)dR-KY}|0 ze_`)(hFbsi1CStzGwO5gn;yOAT`?+nB6omkUXGOUjTdVtd_v2pq?U-&IR~`fuxW#MKsaXBy0}14}AZ?qLoK4rb;POK%v~{N?IZtsNOw!e{4Hva-xy4*mj#)8WpQALA2w$xA zzq@KY7UQkZ*J=GDwmf)9bZ9X#`wfAF^MIafHJbV)1#`6%I{lZ%R(u@%*>cB;;yQ&AgGCr${j*jWoS^6xkxvR+ zkyv<-l190^D{5KaT!#|zY!3e?f<(60aYe6UW@7NKUj~;aa=+3FyRjUo6;>pkjbZ4g zr_+7tO48X<^(V0G{-j8p>=+g%Tkc*)UDh`=(9l@(hJ>)J50$;F&qq%y45zB-m6L%I zvA>ZLF(2S_XDE~Lud_06HZ12+A%?T+Y=Nx$$FR%~ptZ{gN0UfsjfJX-YU$4!AFLS= z{JCcO!VK|;B-KV_iE)m-*C-JqD=J4gvCsSi5#l$}b zLLa=HB-+aAHg?_(X+Dh&%@8fb;<%>rgjcaSVN311xCo!?TNTDyE)K1`k}(W*HokLs z>|GJqDrm~s^me+gty&~ux-&3tpKNaa+BUP&zC>bma@}9)Vm86oyiR5pCoVR#BqnkoG=vttIMv2(ursz|ymNlg2vqSqtJC+ny5Ct^A}CTUpYU`m@B&_-D!Z z(BX84cXnlXrSGR!ZG|Ay+~!ZH@Y>3>$;uKUG{UjXX(BDUM;RMzPW82Z>FCdleJ>KL2qFBbuH-wKA z3{mOh%7hn{NsJpP>`I++3dvtSR54zv8!K&BH6{-jyD>!x^(qs(m8UYAg(oqZMW-@u zd|^=PY}QNuq5$Y)K&!TVDdoByX1ir zXT~V~RYk&@ZUFiRz(gJh&IE!Vfy#0K?N@+(_uqGl?R5Y#z?vz_cLeAv6d>yYFlhkv zdmu##2pb3R^(d9L;{e<&d&a1AKp=C9glC?47{C_?$b1F(q5^f2 zM-XY2w|H-BczLTep;B*o7Mudm)^T%|w zF0y@O8#Cea?O12pRGm-v|Fy^#=m&0_sM~I$YEeNejV%bTZ=lBB8+x{MKLD8m1UKptN`QFtGLTFCvE$CZ8xX4#n~q$4zFT#MN_yh*cg_-L6^T)g^4WjtM*qbjC~r%`efGP^%0EV~-#!jWXx zLAf${Vijq$IWQwx)kG({KKv=N+A)%RX=ygUBqiVKM|!b#TSE9!PeIyL+ql7Yc2M2G zdVt%}75qCBd?q2STEE4BFzvjV@r|(C;XBJS(C;gWA)9D$X$*sL#C4Yl}8amo^$44@g;C?ZVj65+}FFO z>j=z5G7H)S5aSvX;aNag$bt-X+L{(d>PiQL^h!ggfne?>X(;!EG_*!Msm5>?)P_@p zctx&=UIL#!)XFqZ~!K^DGhb|282BXwZ-vnYEvu;$RJ*bDeMh=Kfe-r^n~Z;F$)0LYN~&tamOzFx6<; z<{Cf0b;7TE?~co#IA=73Y~kM1Fyr-RpTZS`Ojwr2Bix5e8`r?+-BC|nhZ4Hzu@d^b zMMd=4K)HIaPieRlv%#C{=3oJt8>eq>0o@R+>KQ1RkQE9P>@7-!ev z;Q(LKc@AcMOp6UPBZ!!LLy3!MXSt3*1CzyP^T<<4w2!%;PUS{wOS~~sXW5MsZuk|w z<*a!Z>ACfi^17}<%lZLq12_xTE!qeV_jg?lI9)L99>Wh-G{v<0+AGEN>5!w+p*i~7Sr5W zl}2&>a-%CFPn&&Gb+RjSE&5j+<(6+-tu5v9^W)JhzD!5bd(8Qgx*Xc+SDJy>2lt#x z2)WLD(i=Bz*itKo&Swi}8I;Y{HqX5NsoE|Z4l<0zQ9B=q|0<U#Gvi+2aVA)y_SevMtGG;it)9Ii&*SIp zi5!k?pjkBEU;Wyq;kM?1aFIfhNzGOAhR0*rZYGvKNDIe=SDC7PltZty~tYy$SRZ$f)|4QHFzk=r5 zqTosQO?;s2qG{S1+d!)m|E&7MI0cjG~M6k3nhm6E_!5)U6`p>oL;0htj(LK%Kj%#Uj4z zFTS0=$bW{BkBgB{`V~H$^(@+Ml?2kxfsxOLkuQRgFT>=OgdtmqA=`i<+lwJPjUl`F z`!S>43nM=WBR>iwKM^B810z2lBfktIzmCbvrF%Yqix3;2f(?kn28>|?2yp-^IDj~u z$T6H%&lf$4qCv)GgfyfinWQ8%FG(_AlF*QmWRjg3IqbKze`NM5?0M0b^@@as0t)12 z669eL^(3lR(i5YsSbW_<8IwYLtmw)7TY{RPQGCq#byNPc zM2%5FKwUP6dTUe9LV2>@EbdIIRmVSl|B7SS!-2+veC@XdQR{?MDg+Ka|CM^OvXh6l57r zOeiQ2u}5Q1#b9lj`e#h{TjY-n>W@IPz;D5p#rP|+&%*ICsU>wMNB^w{qjyvB3phaD zSx@%;`zB|(&2BV09zm2r-Aes}fRSI|f<^gVD17u#YiM-z%h2dsb(^*CnRf=Ik2M_u zA4?mSQK!qP%RA%eQeHb6xRmemREXYtIBz020vpQSjTgTRxV_|M4Co}CTFX!iSdXr^ z7#Tr@wgDd&)pFs}J3t*xkprRMDBu|?*XU(ypKIjag~~%-+OKPqd#Ql6VFb#{?`eJG zW@3(O1m)MX$v9Kwp>8PatBwGQJb6oXoR}~KICQKoU!R{6?@vi^R1fOhuh+*1#9BEj z561JEoS;r8;sY$Cy1MjH{;S@36N6GMO3FPt59m!&CZ>E*^gh@7ug zsHs!pgWPnI^I8Aq%FTp%lxytX)7V^>js@@9S`}*8I4?o!!Lg%Z4mB_SG-gItXnP%X za&C{-a5(9be0uVi>>5L1*9~>}46qHjkBB~*9NYk7q7n{rFRJFG>@ZL{PuEZOPxB`X z^fRv|Tfh0Lr&VsEQsx5EZou^30d1Q#r~~we&pU5z8?7_IV{%l2F2a|~-Rg{@q$f?9 z;n=oas0{Faczi%Dp|q~fpZHN^rruYvAIH&JCpT{f*yQJ<_GTq+4Jre8O**bm;=3js z?h;Urd>`peO}+hcG?)%<20ex2)Z+GLUjIgAU7&x!u7s+m^Pc;zgN9?&Mc3JtM=pc^ z-JaA_D)+|pAknyk#O3&j@|*0nym*(VQl#Tk)3u?cN2%zD@jhL{NoF5i^Ny(qzV%xj z*H@{_zL{HNs~KCiS6m}qp8Q<@hQnT!cg5if&^GiCJJb~f`RUc+is(6wC~HqGr=-x- zXQW8#B#5L+O=PPRR2~wmr8k&ynynmWw7AeEg+Z00GR9mFh6N}iURBB}Nw^rb25I?f zq`)SshkPTnES|mc)l1P7sPLYN)AG>od7kt^+iYG?yTXxFyR8?bZMC9EkzGiMZ*xml z(Kh;fze8j~wzo$wWKm3a!HjXPcAI6++SF<8W}9lRZ%4vbw361={Yi1IK-O%I-;LJR zt?{MpreH*q3MZV!mQSmq)^(MTfrKcer(x z0iztkUybO6;os<$Z1+Z$m9(x_vh3_YA?CuabKJs*JHmD!;#lo|>c_>k)`iENCN3=G zPcOEddo5t52^JSW^sqC0{F+4XPER^2AJ;BpW@gSbatv$%!F#Vy~eO|MIWrHIcsbA(fs^GMT3kVx2|w-u6A>) zypC)op*G1Uvq3}N-@HW3&CE);-E_y9&}wIy+=?=F!i;jwr*?!8*uZtZUso}AemE$K z?e2F*W(i9g2H)*i{!&mi{lyz|SiOwr?(kL19o6T5MP}Q_aMd!va1>*f5F=rsCVFi( zHzu;sAWpV$4xc)dACSEo8N@r(+H5+!eAqu6YmhiJax}W~|HOXpSc7#h^tav`D5~G+ zal$4_;LWssoWVx1@RFCp*vsJUnQf!)+l_i%Um5(mzH{X|O;Eb7_r6Ts=;MBYMv98* zBHytotO8$t`-?vLG<)P!8C?O(C8e80Ui22RD+yO}%YUuzxUaati=5iSJM68$vben~ zIhnklL_rtPo-E$H?;<595~4bpg9_GWPUKPd7JZyr;W1i@Qe+iI_Pp4wdE~Y~wa9EO zd0yEn$@hy&O!xhGB<_d2ROxSZ66^Oi;Ec}qDv2gsvY_nM9*9|IoQR5J`5qIuQ$x<0 zOG%c6oz-Wl!4NfeB1C3@efZKK&YjG_B!j)#?H460jvM82jyP{KJUIVMp#!XD&eJ0ctd^4er==H`*7j7m|saf(qEi} zK6}+~Txc29QA18~uTAr|n8262I5(E^zTsda5J|D|+fT8-aid9IW9MkYBdepI*-JZG zj@80Rcfio;W^ihw^tQjRt~;zRsxT&M@3^R+BV;>TL%_nRwM9Am&$LlCEFkH9+Xpu% zRfLSwaVp@;h>u?O8jrDI?HR1)-+cGR276flYkWbcJ$$v(dq~d4nT<;StLu|!u8J!P zowza9>P}%r!_B=n)k5^7^EqH%FvKK^)~8~lSgv{_Ro$n*%80e!e%Uxu3@?_IIADvLp)!wyAv1%G!9UqV{AG)wxM+T-=cu!e__~CqxQOOU zfr%puwYEmeiJ$G16AT$FxgKYvysl!ejS;g(?S?IZo?auTc8f>!&dAXeF|}3STYU1j zY~(!Zjk>2VUn`~XZ0>3{XA6+M*87v*Do@j(MeF(^9+TQR^kGFnFom;-L9R z%>8(&!|J5-5r>FD^r1Jga4w{zTaPwek~vukBGKPYkkLE>W*k=>j*oInza;OPlP0eo&X!Y$e3)^&GWs#J8g$Aw1jv=E`+P%($b3fHQawwNIh5 z!I$K7a=IR0jUq06Dr0KL`$VxSqTLAQ)^)pjMTrkME3@j3`@IPcyefJBy{Q}2$EYCF z23B_JhSUl7En1Wct2=L|(zAqXYfwq5dth1> zKT8|1t*Gl@DiLytQ)y6F{aY0G0909~+h!{C)jqh1VGg5`K{uz4Vo>o(V(x|R{8yuB zH_^FM-Rat^-tT=hAx-pi+PZJd9!U@U9h}VY|Kt9iJv5c35Haiq2hVfN{uA3hEQUc8!8xfG^&w zE_%_4r~qVD6BeVg>*5*a^@u7G(c{X8alKCo=Z4;6pW(QWbE{21e`a239_>MvG1ZIP zB_oZ*1AH+Spz9B}R;Lakn8=_rGFAJAlN`dq^sA#^rzFts8O(*K^E;L~0bHW#-|Sl^ zTj}FA`RfI-&$rxXfB)sfeRjjsa5UY|`+LkgMYs1M;(%=Eu1U zY0N+w^zcHfht#<{?`PRPp)?6ZF_?3})tNolEa93Ei{s6S7mIlHhg2Tsu!@^=x$$7~ z>3`QD?O0`?n|rzA;I{!kUewFtUPMB=nb5OKbY=4Y?u`$WpoIm=?IM!(mj8Pi?2Em3 z{y;Sa#5_?W&?B5e`BR&xaC-VngF`Can&7<^1{xlzoHf!s3DPMAc+dd5Ezo-vv?a3ny`gNs}Kdc>0q%0gR)4~ckV>b68y{ATg0QLIcb%>FdthJe08NPwgGK=lxuZi(M{ZDwP zso^&XMG*Ah!b~P~r~~0(Zo@H>rYV_GylQE~S`-Lmh%hd!MM=+v9va}@OPZ^~XOb>j zF0AGgUqJVKXep>Q>A}DrcfWY4TR$P5k&Z%;4V{4ZIO@zWisR>+6;a zvd3ia+5i#izxEoxHbHhS>_IYgJZWosw_VSP@t(!p1g>S~Bq@y@ZnzmWk0nD9Zs?IY zK269^-xYU}84ZbJ{*}BGgIIcXGEFE#?X380MgWq=?2%EYCIb#fEEAmY65Uceqqw}Y ziRVh(0<76DV|o+*b?!sTm=jcgZ2FJNb`9b>9PU8sn9N&&zqCiF z)5n;5l-K$}z|Fno5$ZM^cGRJUk^wIx(ROhog@~)K;vl4-(FH+X^NT+!^+YKKxLW&E^o$e@M^}l&V89DdNe`9)6v2SPm zsGoAcYJ=COi_GK|7f)xr5nJaXlC^WBL&tZmIqK$HnEb;{eueq`ZdkGkEt^F^s(~{g zlWi+~gKeu_%w(AUFZ&j?Me3RHY08<-66Y&%m=b55SB0jibBLy8k+Jhbi!3MY7h_If zKqvA6@$qHIn~zg72&k_^gYc^xcbMsPx< zOwXLP%xi$BtX~mWrnR}KesBvb5}r^mg8vXxZ{y%DDs7ASxr@O#^zM5ixl2*YOx=`Y z+4OH6^}`_^b-Fe-w~t_^{*z_y*W}mGmtwprt@b>x-6jN#`I{fL_7ndYe0uu#d4Kl3 z!hV$zSHVP@xQ=@+>iu822lw1(MKrYk4KfN*{if+|4Y-A=X zrg~oF6!pJm_@vZ&k(vs=)w+{h^R)5O4$C~=o_Gzk|efj4sz8u?mOw!a+>rs*0 z^s&+b<3)M z03Xu5uR)7b`PRU_TCWgSHlJG8#zow{g^jtX(==dF(xne5Ltyn|2?J5mI3@*jJU)*( zry)6s$CEpWhv+nwCufDth{MF7b?N$t>ruqJJ?{+VA?Si^Yu#qOYsKeA*JVe8)&^l? z?^f@aS8#Q z@y_b>!@uwJ$Ny~PWV_f9i_9PRIOk$lZM8S3!f*YnIVelNrQ(+t814?R!DNtse#>g>m=MSoG>Qk(K{GLlko~cHZyQo9t+dm#F&TN08$bddC|=I`NF^ z`p4eFM#pm=!P)JsM#uK)%q0sGDH;Zvkb7=Dj_ZIUZeoHJ9^yG zG`t}t&g$eeB0y3aeE<`$2Lp4>ZZ|TvLu^Z{e`%<~Y;Aa3WqPa0>jCLzKQVkD0No;F z#PZVE)H>U~T|Y~;k{VhLj({(hM4!de0H4AR^Ml#=t&|zXKc)3t) zS~Ag@fogCAF2%d&&Lfs)@-OTT9e>8@sW#z~SuUp^R0$Vn#n;%Uf3+fGPj1E~Yez|^ zL(I+M|5K(qbqK>XvzNtP1_kxRnVQHF|MD=2y($5HOH;AqUmJiXG~(*EE{EcB+drp{ zCOz}xyAL2`dRrUiXIB`qD!guBm4%UCrV=Rq?9Tx2DLs~V9!rd4)?`RQ2%$+quG%_o zKZk*x@~%;(LznK1ntKz?+<>rUYAko|`Ed&c0=TUEwZ4~X*X!@y8A;vU5+ z$>?$|hf=j1i`TMx{Vl&(0Fy;s4AMlr@=3%Q$URoG3r_PAURS29B&_xJi*n_BG$p(F zOrAQ=jwyC3B6%l8&h)Uh+IS8eW0dsjA!LGPNwWciL164e1PKd_!)E z^??l-4FXdnBHppUY(6+|q5;w&LN;g?hz$q@f!PxgpIKmKXhsxr`{fWJJ2Xp~4VVc6 z3kQ9DpNMc}ft`MEK953TlMku<2c`YO1}p@DL5YZD78swrvr9A*`_+&NCzMu;4OjsJ z>n0)^Szu1`&gW==Vo2pJlva-o*Z=}sCn9E8V0CCl3=*4iNQDPVYs3a@1A#pg5f^9; zGy@G#4XM0?(tcwD_JY8Xi3oC57@vZ(ODqzbdPs#IN^8Le907sT6A^-}Fee4)b2LCR zq#_8VwP6EJgTSSU2u)U49hwn`#HJlm5r)#*vjLYu;1+Zrtgurw0}ap(sl12MId733c1H_df3C!M9l^R3{A~Z4c zF`PR;NL>f#x=Owk4^%so0|gq&&b!I!20lnKO7M&n?mFq$;mu2g;=r|39M7pj0ibhy zQK|X4qI0*E#(VfBu_GBHCO%xn?DV5OfC^$D;dG9TNeDkGspFOY8;F>ty?>5L4DTqi z?l?O5bl`w;M=0aSV4i&z%(03XYwS!gAAACIW1gYEyYNlkJ#h)Wyz%8x>KitZ&{uGQ zpcWX0EIv|1Nd$?-i%`aRhJ7}rhhAl{&%T?W*C6aOZ)#sCeJ~+1jvl@DIT6zS1$aE@ z$BWyqxZuo?-!EPmcPx3#3M6`gJ{>jWS zs&#pkV%<}QO2PjzzfWMslog&^7Y%<0SVX=9^Mrl|EQ$v)L{Ny;CP+(%+S&geYa=0N zW^I_>oT?Vd^&p{PUhe7Iw5ZuLO=>(5xiO1U+3fImn1KAx1)V!-9cK8NeFa}=66=0qc^9#J2%uzyyDU5FW z@tKJ4f8gj)M?8^Xtb2h_O*o(8?kUYq=p(~(E@Y@t%N|xR7xE|3dk^N{vZ_Nt=h(q- zk&P#No~i*#gC3UeuY>^2kcPY;s)2#{A`UP22>`7SeBVE2a(}}NU2*S8Fiqe^FKTO~ zF~SXxm3Pjg8C(qot~S`&0DX`OGbaIdOfz_s*XLg{(&2`AGE&#!37#jKfDuU6&Zeaq zU<|SxT(>6c89s_PS^vDT5`&vddgTA5_bB^cV|gcI?kKessp+S@%K3o1Y5Cu0@jh)x zBdr4DN}^x*7StojO~|agUnn=lZh0a`josXb2W~#1=Ks$p>^vTq_VP>^O0u(l7qYgDP;KN ztjfNCymqaE+>Z1Ix&B!POS`*vbZGgfKYEMFrU}rLmqEXhp=6m{MQ?GE`B(j;Hz8Qj z)3Ab1urs+=JDu8Lk0ga4i+C<)Q$qR0IztW<_!lR>*w37mUe%h7CqG++CNw#@%c@(t z^vgWMQ?)My6uFFvO*kck=#z&xC;sb9&5Z6!)5E3O_^Bx(rdn3)Q>b~etc5G?Yh0#R zoU6|DtwG%%e*I`oX;D5cV4*+dF?_Ldo1775#n~X*%Goeqqp`hRSvYFzru?Tf zL36w7@uMEqs*;}Iys_Scd3Jqcb5gzC)XS~=<<4QeHKR`hk)JKL`eTOoqv^?AP>REu zn|{e|~Z4o$tApm+f@iD@!X5eFHQXSN@Y-6gbJQ zzqT>32*^1Y4!Tes{)Y10f^V^HX(r$dFPi>JInS!*qC9WZ;%Sm7F&vNlT(@ z;P%V%Ps4!iDg;2k7?G{63UdP*!N?#wFtOpEh_(lHug@H+w>E!;w$_Ifh9~UO&fYKT zc-1>Zoi%!X+|t`g96nd!O*T2}XSilB z&DGsQ&UNu|e)mu@Wmd$Hp=f09Du!qNHKuL7IgIV-f0j*n#>Fb>Pfm6{5!>;`c`&iMMEgiSj z6WV=FAfoI%XQ%AE;d^VK=xbLk_Jm2Tjz@i+72c1O?>CyO2a1|o^tiq?z@K*8byA&oGHK*#UNAG-8Ruwi8H2f=dKK@tD(t)B zQX9BNF&-R1X-sC_@*HNU^B&8Uh;l0rM4F)0^BpU#l3JMuckxepY0hpf)p-#O)d5SQ zD}fs8tuj+;t&PTVuEim$t=+GG?T7aPrwLc?G=btM>y^l>SFWr&j5BfLb02}ldpIi# z@(r$=#@c^WN17pHM<8r7)EtZg9$pkHJKcwkYkRd0*1b2f+sRHU9z_pW)_ALDhe$#ZyM<#PEY=LcOk^Of6y#l74-W(tR;z9XLXJx7#1lX-{pgSPoM`j zyI1oS9w6zYoO>eIiI{s2XAj95{H!tsy*_*QK0OvI+t2-)Xxy{+x!PT2ZdTgpN;0N< zUmw88PPzQceg*2ae{tt#JCQBnA-Re6GI{keFd%rfm41z*OtEH+O{Hbm6zcF1fI(_+KUPU-<9nz%O@R!EQ48_s%Tjjob<0KfL zfP@oqn}ALE!fqqx#Gc3G&gaWO5-+8kUo=0>qM>+-HQC5$6Kvson@Xh9qf)H~Uz5$n0q@Ni8TW~hrnlNsv|K9;w)}H^Xk*3W)7`N2I814#%D&P&v<;;;jA9tG zL%)Qwlw?Xc`vheHwqoD!2>`{b)iaIe>QmRc?tjK=M4n_0UU!ek@ zeQ+j=L@JOBjWI!KW!ZpXATU89f{q0y^T9a@4Ui6vu|R1R*nn{$FkK=-oCOwyW<((s zUV1TT*o(bO27x&f5xOie*azp3C?q@CP#oH?sIUREL158DgeME^_Ji|_Xe2xNP~3me zEVNnq2?BpgM5MF8MC6@A(7>yqI8JC5+N@MczpDa)4HFUVXc2j5vKXWS#n2equjsP@ zn?PW@LnNFaq5OS_92UKq@c{jfq2P(Kh801U^JJg4RGY&;aAmm?V@IZBrgW;0JUctoJ8< zZura?&YuY$QPXo%6X!WE9(_X$CMU$qIaXZ(J+Y;@Y0p5-iOo(-04XF%qmEcQJurJe^d5s*Q(ARRWzg30o2Y+-=+tR;2Lsxg z5W>BxYX9d{vQZTO*Qv<6*sO5-%kE4}ExAf^XN3a8g5N!d^-#`>guaBQP=5I%^FB!M zdN8Y28Wdz0F+$RdhYas%k-|pXlx(O)vjG@k@`4%Lc@PhM{tesNgB8L1EFatX zL<5nBv5tKvEm~F6`_wXl6HJ6GW4LLT#AJY(P%L zf~iu5!XRkpS*S1^8uza~=pVM;9fu?l$p%x-)M+t0$u?8YqoWjSs3_?4m1A-^H z=77UYJk?LV^CnaRG~aRI)Ap*r6RVqe%8&JUBm@Ic<`CkC7tHkY+6S?N8$>eY!El8O z2!}j#t35$Gl}c|~hq)Irr2LMUXqQ68Y^&AxMBGhUKkg*H#w;xxPUR#a4GFDOJHUGW z+B_uAvs0!v)NTqeAqGf?a8Ens$V`UXZ6@q+V5-95BNrzMus^7hM`=S35@}{LD*7%W z2}dJos<2q@@cVS8WZ41x0Y6m|RpvH@jZS4i86=9a&O}x>>`*d~L5KCIBi!`E;3 z#OeOzGDIX+ICaQjgwsuvU(#WIgFEp$5lJ(JYg+y7R0Rw`k}9S}KQSiw(aQ281$o$zoyRPpZa(y zdepz;k2#%7To|aN&6AVMwt3Xm<<+qFW0%7%`R8giRQGa&8Y5-8mVc4(fiIEp{ZHep z2J1Xwk1IO+)kgkIVLPs@wVjS;CU;?j1ypTw=^smbr!fz^sPW&at#sxioZ6-%44reS z%Q=5i^SH*f#cS(h<)ZAih)WjBi{ci(b887COg0E5@i#RMINBSX>D&8-Ml>@waxa%s zHma=pHI9Yl+89JP&NYjRRM`7{9c(u89d0(U>36r2>~*icEj`NY3_8l)f?xeqHuJgn z`+BcmQvawzVT1FWWwE>5XCZ%Xr%5jD+*>xE$}EN&WX_ ztC|8yOBE$hB|yol7kttjD*JuOSVID_jo|#Jdby*73G@p zw={g3#jS{hg%q*ZO%;l2wYK+b)>74|k8Pdhm<>M}r@?IV^I-N1(_fj|Ghm=jli2EQ zQ(F+%qJ*N3dzY@u(Nml6VRr<>VabS8L&SV$ouYwSecpV>EW?ChZ>}$8iQW#Oxtf`5 zlaqi}HMk0HG zC0b&BocX!8ihca40%}^RN>Z6y-hcbf08wuz))U^=G?9+{YK~2n(wM+g_N{p(Nck#obn39jmHz{&|(|ft9|~ZN5Is-yix@Pc8c8GyF|6RkPps<_l-}@7c@a zfvI?^)D@xiJ_@*71LeWz512u(+3`0#2(dWYVN8Rbc%NQN$;>k;bLVv>|5g8EC*5+cZUNrFnfZQa*T1t2tr3to2xGQ~s1|y7S8I<3Ckn-WyCX z87DA@^CmTi^~S3N;LkV}iZSRa^jAOj^Og}~(|$Zt{!+@^_siFRzW;45{~ovAG@IFw zJ)@sv(R4M*DVx(OGV?<U1K+LGggZvK}6-zFz@C<&i?n0o1%B4}MFZK+^YW8316PN0t6-#F+#`Z?- z)suI!q)lVIuR@z?O-d7Y&hU;Jdt_(s6lL!lcVu1faJxzTR3oWxl9uwf+4Ie2_?nW= z37CZ)Q>~bT5ZO*)$DI$G|7msNLu3vG?P*V$yz;}wBvgH z!WdM_@rQDh@FY5haXVyJ0Uh|uxPIa>6@Hb#u;k#HndH$&+5JtW?^5S4^HEXu`+?Lx zvq2Txr6KB+Wj34Bn&2~3)AeCj$Fx>wc3_rB(t9M#;LZ! z#dOiRSLd(Ba*bTobS2EFBYf;=AM>2}H{AD(pDv(8Yy0Vfv0Van;k0gYYmiYYt{5~a zoY?O6Jm2qme%SN;xaavP&+~Jh=NCQCuX>)}^gO@gd4B)%{ITcxv(NKaSiDBCWS8a7 z=I}$W@IxsGLWK!JwFyGq2|`l|LYoOf=LkZt2tp|cLxl-LwFyJr2}4r}L!0A}RkVO3 zS^zg4z@83JMF%*d18~y=?CAkj^nfFJ05=1`o&iwB061a*a5D}qOOohIL4O4>#bPkU zK4Xe~&J_ECDHf9{7K0Ib*mGi*R1HeeGQz={Jf!vPfH05)*|tN?%+U`UaaWPp@J z;U&qyOA-Y#k^wRj1#*%BauS7CBm=KV6evgrC`c42Nd_oM6sSlBs7Ms3p$P&^v4Tvo zLQJv3OtCdd{Q`tnQZv>?dGIs7pvz+{*06$lrI)pNCnuWGSifV z2E9uSdY2jWE;s01!I#@JzPR=XHU5nj>>fCF&jNPOE_Tlib`L=DjE^YvOFVLu4!}we zFrx<)(gQZ>0jvxFGX_8*17MSZ*M5VQ5n#p$C}ad|GFB0%VMs%sNkg84cyI!la3O{i z7&0^%G7K0pEEqEE7&4p~GCUYE{1`IA7&4+5GLk|0F@pOrf=4leXE1^h7{N%4;6sey3yk0gj9|=X!358O$)5$&Jqu=&SuNSbpTY#3 zVFF%a0r;^1>R140EPArEx6%-9X$X%rgjX8!P8z}|4dLH~w`4yH{wd?E_X5)<1bK!H zc!>kx#{sD00Gx3EP#i!#4q)nyPjkTK%F{28yTb&ONo#BO+sz=6CypnD^5gPN9p_f0vU!-df*xiy{)9P|DIM zhc3V2fM67(kIr)vEm#ufrwD#p?WJs&uie@g385TFP z2s8-Ndz{+->_oAcR1`W893u~dFYLrV>$!M>D$AgV%x?Rm;F&!#E5LS>#EgWs@jFya|n6pzDS>$LGJ9Be|O*0(;A}#tR$_kM8nE``~0>2U-MW@v?xnX$c z_Nv_*{)LZMs5zOJDA-$la3_(z1gPDziUSUtNvSYBlL2Smeeww{yQGv78wHAnoWPBZ zZLl0ed5YDqVsD}M!)*;_|D1d$mrE)O^%vFF7M`W!mV?CmlRWY4-@(3I;`ld*WfwUT ziuZrLe;;l{2aA587xcQ{&KHTxWmT82pBWV-q`$}5;@5d@vBtg2mz34E-A zwdCm9E;B*{;%jupPBhBKA&mX4Q_nd5j<1+@__r0^xtWmChPKxT-)#Pz`mFm_=)p6V z%GI}G$@)a{N+zwwoY8f=_b_BtD&B7rT!&8PmhyZ**HE3wEc?zv{U(1^G!VC5y}bY7 z5?@E0+5TGXT8e7{^;YP~$e6L=InmcpC6PY+UmZ17qanCc_nZ7;Fi_JYt%{r;+_h*%%7qwZ?lq{lWdgTgD8| z5;#Y)hc(7Lg})^ClTt7e4U#4rPqMLe{7^w}CU2P)%BA8+c%}@}V!q>M^^8O-rgx*v}zh~q#lzS08_dm=y=WyTr*Y~8AtXG77 zf9v9zZ+<9nRsF1YN$;%kI5GX1z0EdKAGr-=Aw>1J)P*Klj34jRy~ei zJ?Kt)-H!E)NN^t1`ddXWXRX({r(zSTz;r!Yn~S+q^b%`$iYNWm_U(Q)XysJx+cotp zeBY{5pM1PvS?=kc=y@y1Qq=!f_Dylh5GuT=$lUyW#lV^V>DuJKcY`+$wc{v5II7%6 zr~U2B-0FxgZRE%*3S{)OuH?mA>BsHRf85@l;*vXNa5?R7(?pK!D!%sfQfx0l>TUME z(HXqS(it{U9TX1|)m%0)-Q<>} zLI`y8vbUEz-L>Xe3);MIiMiBwfY&g3GPee;Z-!!{D%_aqp_XGE?M2|QMCn;c)GcZQ54sh_jI*XlGg!AxIm+X5e)B(|OGF|7&lDj(NX7Sl;|<|15Fee!h< zYn+wq8PjKpw-(YzzN%is{S30P1-u!QcPqY1n&3>|5~Po9edRYv-ONkJDn7IE`Wy~m z=hx}9x>5FWymRyd%~*@@Rab~N$~1~s%CUMa)QoMBUbY6_o+m3ly~MOUvAykV2nd9o zV`yA+q{C`{_t9gVyHhDIV|m#nPt&_>bPRK zi1%1xzW8tyEFwm9e%Bp%W8l#Cp>4K7($=U=2#I<{y#1Qut`PV(LQeSGJA(|pi$VT|-A#1(p(D-Q@(-7V<)4}K zY|UgnK_N|cEei@2!iM=ebkNq2c>JK>j?tGY?%+h zJz7x4{9c9tEmo`ixfd-aOM*n#4j;vo7%c$8(e)eJ;W^8s(dqJP&etk$J4n=HyRHFLb@?nHE^8HFU<7SgVdT6^#T~RgGv*Yg@st z_%?nc05`b7rsE<538)gO^Li3V^lC~n*qQxEFj_&owk*YT7~pl6 z+s_RJZhp)npP!Bj6BZKp3hAYuo!AolG}6PN-*&rr;K`98+KWH!W+&#=meNXQdcbw` zXHMpj%kN5PcqnaZzVO4%-<-tMsmsGyrpC{gg1_;Pk}>)wRdIh4pp%*ZT_My$|6Pu= zuA#$P(<7SIneMDnBp+UWjr0`>WGTW6;_w;vy)_KICxop!^Qzz;pb=2= zo#xwU^>>=T|2upg`(IWyT5c04)`mWGoN21ClBbS%V0peRV`Pwhee~{`;S|v{#wtvD zfem-^bG3%LvCb0;@pD@57p-kVTlQxwYvLlX zZ`?y*q~)g$A}Qp2jgR#AzdoE@-2u%*K_=Bu+Rp#OvsZ@2(KG?#?jZq!1q&`gLeRzC zA-D$#P9V4}5S-xd?(VLE;1b;3-ECp#_Wkaj`%zumRW&o))jd1i%`!NiC-|-Z)+>z@ zSdC+{5xxnMV*U6`yk*AGBi{RX5C4Z0S*jVEcs1DA)~9DmYN(QOxvBv=AZ>edK}q1Q ze5fG?IinxIu`yCQY;jLtw3?Vf&vbql);rvaPL*${CRR@|wBVD6QE_z#H(T(h7pN5$ z9dt7Cv9r?FI7BuU^{2e8bWJhGoH)mAuv+W+=bPVmB2EK z|8cB&W+S{!ERg6n2;BjDc~!KNst>d6Phc{$k7`KDrG0$rWmR12B^wNcskye+Mxw|M z-@P$RJC^7F8<@P~Rbr#y9)h9ZZZ-1ph#V$=mU$iwOdcmZrjnhK{MaSj(C*J~)vfcB z$v^BuHa6^H6kWl)IZVMjTgiC4UB+xXJ-w&@fi5WQ0`ZCeYA(#-ZAIcGlgT&z6^d#L z=P%#9Iw>?Gkhw%y)+j;eq3~b_`Z4J9puqBc5&2B?n?^1BIxKD+iiG`9t1b&pZ0W zAI>iWvO{udF(+kEkV$W>C$LdA02}8Z3Zg1iK-F{{^*mpp`xtC7_W^j=U1?dTsfsY@ z91O9K145D&pimI4{Il;!r9PtCi=!xfC|9_Mtu!FWxcD1tH4RpUAGuZEc(*F^Nv1VI zxFXkmiW_6#SCc9O1@}bNA|iKngDGx8Nf<5rr+TT#S@9ds?}iU5_SCq@j*YZaHzk^D zZ$myjs)p0AE(hDa$m!*K#i>+?^#7&%CohpP0({&2NW)_#CKrtP2$uQ&14>CkRyGKf zV*uHDLqIwU5)_bv#-?nkS@En}+CNNsIXvqF^^r}xogmK$5KcEF=c3(%@)UGvbg)}r zHgVz5s*~qByFPx`v7Yn~@G%5!BrOz=3pA4N5pIlb^&g1Rq%3rc?opPI@Tyfr>WX)e zbBDMKe8;_Vo%*nXA6FtvE1Djf{7dSnL;qlppS^myV|5LuT2TMbpbc5^+W5}EQw!^x z=7XJ2{FFe*CD31Rd)t6Tj}6!H5r__VIs`~cZ@^mDfbMc)kD?y0&ZoMatG+B{F)u4S zSBuqHyJ|-14Uf;gy_7?IMH&i1Es8i(?3HF^=-`bH%>uXS-Ud8dbL%oSJ5SALsz2?G zJ~ux&IuHJ&9D`n!O-P$`ilUPw@%X%Y3%zKpMHlx7N#MWx=RE zQz1Opy1mgoGW#Q;mv2#-!NQcnVk@iGVMdV6D5>|-$gB!ySD6?=2~xW9naUtY7xsUr zdW7(u;0ZTJQ9}vm{Z}>+Lc{Mz-$X`wcDiq9+{sT}nwmK8$5O?*hsPc=H8gQ_jXqwW z5>7pJPFw)*x*vW%INp?Ll3V%mAW{Z-2=I85N9p?VymnYWe;qRkl5JrPnxkPr;GVA+ z*3(utoXpMYaQ!#AL*pJ{*uw)?kJVpbwdMK#pJERHDJ-xoHdl~yMh0*r(3}qe!rVc? zU<9y()E-#dfQ%Xa2bI=oC^kU6M$sD^@5#EIq-O{}BRa&$-Lfx6#_M$2Exc|wcFZm= zI*pywt(B&gRWfRVcJDOhdF&OJo*&2Ql(07pg`}zmhg@(B?wqwUI_-uyW(I4k(Stjk?a;=(hvPq3b}s^%ZEPh(e&zCBL{o5P9@DZPuLf zXH3$CkDTY02*_n1A1uqvdZuywN&_h?yoZwflVbv3WbaRx>sVNM1r*=v0u#qk6Grj^ z;yVQfjZre~Rp&{rM(eblzDgPo8DQx(FSFt{hXtv-$i#02ukw`YxPsMX4r$YKze@9G zo29TcaPKbT*3Stlx||0I?k+>e3GSe>4wYZ}%Tax*c0rz!jrY*f+T!v}NVQbi`olqX z?=K;F$~El~5ov3Dvr7(ltp`_gb;Jiee@^|?mQsJWtH!2gy_Nl@uGgIwrO*XWV!{(^ zJR@yh=0!y+_RMGq?Dg$*dR{TsO_|^60U;rjI)MvOMvP!8&}kc9m4Ti@CCGHokA;vK z^=GXOfXLu1eg=l~&zON0xAvcb#|WB%tAv(UWlK+re-SG{zw9dX08L(m(;ldo-M$Cq zjhunkRa;*tl@eC~TMZaN9JT@oN#?=YmDs{I9GClb-|(4O_V3$&Hq z@xW_=>B`f4?yzt2HqVgRF=7G13xgYHLVh#Ac%h&_NfT$4ZE<|XQ%?+CRDo)BW%o}~ ze*C=pavKhAxV)3So#U0iV@_EAf+(q-O%ORszrX9XU(3wwJHo~U+V9q>JpDPXO+5Xc zb^#M7E{0%rthgB*cHT4^t?@|dk+0s&&DY#sH#|hjkOqU5W)J2cH5n)qu7?%L%=*88 z2Sw8K#Dpk?MPvv_HR=fztGdMxxKRPk<7Mu8dJ9JL))D4QeC#{d>b*%s^IhgTwJ*q{ zBbL8dl3J@twG?0f)bX3Q{uoD;X0bs%ud6t@YX9LQ|F0$YPT+6k89y+2)-d2A^b~HzSjy65@{!J#@(;G>H)U;2aWkDQeH(PNjaSvK? z*#UFVgUR1)!H2D^MjU6#sCkD(x7(#yk3l8gVFBT@iH%>4_|W3(uA%w~-!HFT zB|vIJ_wS2H8ymhAqfcEM1*vm~WS$dT=xq{;ecovxIGZ!b=c0;An;ZdixNr0{y)I3N|0!gRl5c>_>jM&H|l zcmo2Cg!|?6E_=Dwtahm^^9>WC99p|L@Rp6K!T|e9bX$fV&)-&%tyi;PFqGz|4m3Fou?$O!3xoO zIQ=u~n_ETFOw1GgmT;bgJYlu-c_p=mkn`}={NJ^XdKG?ulO~f4wbfCUmlgA;LR7y9 z0nEI7vU0-{KhS|_G&k53ivOw2@B2u%hZKEIm``rb0ZhI*P$$$#MUa_0jzs#q`a z^_W!N?xt-0;PZA~?TP=u`7V7~pg$taGB835XMZj-n($VzR*zYEtc|2^$)qXuTM&en z?T}l~^RMTC*qP1!`Nisq`wvj#T|G|B7BXzml0vL zibm3if5BFnBzM+7vGp;pFmkW#O|&U zt?CIwdJKo|ytc?p_BQrzt?NS`#p?`#uXolUtCr)%yKgOH;!GT>{fP&cUXll5TA1rqm3`jI%T!dV|8qP9?>$B2`PzzNX8 zdwvIyH$w^<87PcH?@i#szf+=Y{zt^Y2;(pqk#2NXt1#VZ%ZVIH zdb~!pE4JEv2eQ8gRtIhQ?n6yRd9~OmmeN=#$kXFgwhfin^W)?#{rWH7igmweGR{?d zzSiYIH1=*vhhncw{@#?A-KZXO;JHz3Z=LI@&WQAUoHLT^aL!LEHP4~A&)_>ze#hM7 zskgYbzL5K;v)47!mDOHGRUXoZgp|I1uj#DS(dbi3k#e?4rlZ%qt@?pptuL}i{pY6t{lUom zY&73E2Wt5om4r~>X|I*I3H*tRbGB>=1ww*Q1$M#6(uWXG)hoQS8c=XC<1X+}$S zt3Lgu@cRtfW+0?@WV@{19@jfRpFqj8S8-q}gi|%x>yo6Gs9hy(P8neQXt?R4w&K{7 zRH&}4+%v0=`!%^2n4adEcB(h%1Nydq$eA$;C5@U>rzI?9y{ zR1bm+e~9C{;b?s6XC(XfmN>6uZ_B8h8b`FGJu*d~srmRF**C&qwe1cl&JRPQlgUMZ zqWSNiUJk6#W=eBii;6xfT~#Kn%6x-iTWcHEYp)Dzu&NJ32#Q9i)Ay=NV-yX#@W%jt z)qWz4GJSj_mHM)`M|V_VOrvBijs0X_RGj1rniIArORo2}`p^*h2j}?CU7YllaoN-C zHuuIu*5;F6s}e28b$MQeY#>_w@XrdQq<(Q(fpH2h=tq~Y2skK<X~PyggKLCl0S9 zkd2*rtRrW>*t(x31=4qm=i|M=j*}u|{Gi3Yg<^>}loFrhz5Z`NIyms~6*aA5@YalE!T5=yn0G9SI-wIt5O_vWa+G@CV$uGFa7!6kik++AxtLV z95W9+@dI4mVj?wMonzRIvzqJ;JD%04JV968;v}NNO>;KtBF&^P#pXx_L=`qmjzOxO zG5qW4at{JVQZY6Q^|#hlWs%WsXSmWp#F!a=X(VNSY6e*!ikz;w{Px~vB>~Mfwe_6B z3OQ2MG=M1j4@f@&c!N*2#Q`Adi>v|8xqzh}(5YGy`s`pwk)v+i{j0CwSh!g}80C)c zkMvgDLp8jIFzV~>`vYkDLHlvtY4eV^;dqGhXk$`G)gT32>bpGS8A-Y22g5q~^3I6J4;@xz9q=lLzmQc#!+OLB zdXVW5u;h?P641CA@NI|Y;CraFP(?!Y$>AuRT;RN60yt`4_zx0QIB@u_{BTlj<@xRW zZRP4g^vdqqg~ey8FR>#IrpPt*9V{C~gwX>{S$dPPkm7S-prFXS*qnB+jTWfxhjx3u zhA_hxZk+Q$%WY5Lvdlhi_5$A)eHOs|YstwG)!_S+?cpGPFt9txm z)iR@R2ybQXt})c_U}zG8bS|Dxl?L9gD2J2JcV$fdxcXc>lTrMJ|A2+ySG@BZF{#di zKntOmYRI>@3G#We3bW^9U96o_yqQRjb?u8KKoE_u-x`f-Lx;q+(+@a4wqkM^g)=e(=mtg z0$yjiv;mbCQkDTBDt0ZhfBTfa#K^ zrpOy5*Z+*4YW<^?iF<{(nF=1#3ZH&qkK})wHrAHg7~@`KrA>+QB@T1Q8T@eL;HVnj zAajTbsKU{?!Wzc^8rFMRUqPZ;hT-ZUv+UjG-?V)=^iM)1%k)o?xezq*jNJwGM&)ap zT$sS46lOb42cZQihsD3}&)lHtWb6{r|A^bbPl7W#EwGFGGOVn@H+(^!nAwoYBw!_o zifj^@*)Vu0+(#;4bp(^-Yq(CMlx6H5(ACEOZ8S&o%GhnzsY}1Z!IL2{j&VC;)De>Y71xy~TpMms6!CwDCG3PqX=6ta7okx;Q)FwAxZ^pQVU|=PK`}u4=mKgTAlonBH3QtnaEw z?qe^Ok*b4Es2`rI96UDquq!w8TYlUoV;WU-S!_js)=%1x(pxcko1uMfH(ThK%`Y75 z4~5vTMD{{Yt9^DiE9~A+dmk~4VxOP7+P>FZ+y}aLRhv&5w9qUrxLI!7Zx}TC-ma0` z>9#& zkwdXKLO;=7x=p49wyTe~EOd@z*Q;q0($dk)q^cjDEyq(?4tgV_I~&bhH@2~(nJ?f* zI^IP_9+);*HQ7ws5ffKB!~IF~K50oGXny&!jziWbo`U*!+eN0fNH(L*p6EDGV6WuH zQ&Epsl&H6JLg+|vq;BRRBhdZam%_YtH2I|C9&(K~nna7QI&_DqLA8Jz6C|zOlmiLn z*nNf{i8(=)`BSywgSm9)6;GRo)9!1djqqG^!kc_fgHe0J>wix3zV>7x`<#XehO%h$ z-n9F^9v5qi--naX% z$WXV{o-~Kj=8?&Y@Vml04-JU-e0{o!~-VCWLv3`xk!uEyw>sG0hqL#N9s`L3QXnZv&==231kSyuK(|N(~Pgk-t>YP7;NxlQH zyPBr!G&j*15G=ue4jeyaf3*PIel=AAtAAaezQkl%!-2dRaYLr`Al=$o8&nl?OY{|B z$0B#L<6y|5`fGhe8e}mNPYab5$MfCc1^q+0eA>MBEp756s6d`|h zj2|Mjnnsg_jp)H%o?$OOJp@~j1*5mViw&BOS5KCKqm~zMXGvxPnOEA;L2CkPkCWRU z`zGC18DFk9$*;k`-eaB);wIX#T_Rh&r-K|c#M;pFCY;bd4}GB({&4EmY8aus`|P4T zf-QJIiVdw-#CdT_T)pg+behtO9*VrTM-H}vcYPw{7+vaNs`pp@h?q{s#0@#jD1%&o zt#ZS3enY`}u8ES*xQG5QvQ)OmmR$JOssVF<+lh^LA$+3IGGrw<3-e7PQV8{p)7iBB z?7V1(TADH&71NjYW9(DCI={O5a;UDMEUS*n)9_bRxDQ%nB7XXcZ~;!OX>d5S@E?Zu zkYYY0zwrw=MuumpV3jN}#1r{-g4`T0aamJIB!NeTE)qb{8%PU>A&2xf0gZ(LUtb2H zCr2ViG)&|dK`#m&0*)LKc>-WO*##$V;*$CO9*Y;;N7vMqp;J!Y z1Q;K?WFAGhXbv^BNPc>gYOo*A***r*L2$1f?U1ub_J|#0yP<)J^pIUsSrOXDl+jN) zC*Lia?@vskIi7Ojnxl&pw@kx^@Jn zrayKEyl(woewJ)*syCKz6vfF@y4CGV9|j`iM=8=%w+a?of&n5OHK;?0Fff-0&1U`j zBB95tdwW+89Ys+{N_utCHtc#qJqmK8jMWrOpsQSM5O4e~o1n@1sHwYlBW|TARn*WF zRNZDjW$hBbU~V-tsjSa6+3fCn;c{^2`MoNc3&)TQ#<3v`@Db9~-Vp zl#zC2mitG2p{segE3Q3J47|$la<``n2Ogh;p^!+7gtNicHx=rK(*z51(R-wmV%y#MbAP@7Xy_vbzq{(-Y z%?$M+dcu>(mQSX(TBFIoR&=0i<*|F^Dn_FV^_i?tCeV9 z4qvxh^__?6Y))v+k>Azefzf9}U)x62%&Ug0k$%3v*?;XCUo1lhuARr!lZ^T-^q?!$ zE7vw_X7c`nD)PPHXKim_PU%u_UdHj(4)Nyr?+aUUhjiy;-i7K$g|FQcPPOTR6Zu@# z9i&mn!O}6VTnn5ir=-uOpu=X{rMVbT*>yJApBR&2PE^rcImh_TT|CS^gpJIUqP5S@ zixwU-M2GT1lT6GJqi@J$c#IZTHlNDC9hx>kER*^u{3EqB`YqK}8hHZWsIoKk#8Pd4f{k8F=E1__*fJkn{= z6#OQyY>y)bIb2jc5=${2!X}?=&rb|GxT^S!(F4q@Og+3GHdOr{IE-YpVeXSuL5(c% z4}1k2(X`tjxdwQKe>gc3dVU0F@Ny044FA}2BvxPyqFe($!#^dMMl=_rhKRP;LS+cy z6eWnI$2CU+(n}SgE`_5*#^E%yZ65bs`HR^Vw`>D5VIACx7!_s>JB%O|hY?&bLhl#B z8Ma&lA8qfsV}P3gj+NglqY;Dn$}4L7PoH)G!t#9HP0=?cPW;v8!RUZ^VV_v|@>lO4 zJ0^eeir8qTbYP9yP2XBgQ_eL?{vHq~f9{r&I(juSI09Ch61+9^AYUaevX}LgB3yzQ z&|P+Hc2NWOB6!ZS{@cjbie*2VfNYntg}9w)|$WGsS`DNV#HxOQnEU3gh{?k=^w?c>zmp? zx&suN3@)dcn%DeQ?p)*o8gFk){k}#6xH59eqcWo@F^LJM2vEwmYTi(z?~X11cP~zP ztA));&a@LK+Ff~=xwMEA0NYO=S?-5gQmn)|?(b+$q@(-Z&jh-Ltp8@aP`C{Js~jMi zjW+L%p?&S)l&l==hpe1jdG^{p_u?ON8^?_IVku{oj;^ME$e1xadXevuFc`B{) zOF^`t)whle=){*Ar_8Il?}SEDmQC`cui>4|V6*9dc|nuI-f=c0w5iB?Xl(A`u057e zezx|cbXk0DM`MJKx>(IVmr!|D+!V^YGUYh+!{Wt3ifgBOF?TBzee9OL4$0QI2&8OL z1d=c=%4Xu4iN;Gg+F<3H>6W%A+7PuUqWs7;Q_TnidafAnb|j=+i!_{}LtUT9;tptmrVj$@{{n#hEN z>lKre>y-%E;@M$n?3M_oW`*(CFj!)NKS?>Fhk2tP9=nCWv^(Y&&pWU*GGVwDf$Mb{ zW;GAPN(fv&4*sdayc?3!)`W0uzZ0ZiE-we?&A4}4 zfgFLCL)Q>PZ2RulfLW6>^wOPe-U21(Cb>*!7B$Cfb+w_APV4XU_14-CZ>=9UkE|uW z=|p06Do4IJS1cFC${t_KIy9bKJjl77+7zxo{opNKc_uD}WPO}AxY3mpN&vJ&wBSK_w5>FFNFnuL=Cmxz5Ka1MKD7DF)Wlzvv5Vg|f5;BbPtY@n)75 z{WPMD{GLIh*z)L?@O;3*cs9gfwM}nOAWP~cb96yl7C$~-@cS;-+M+Ckb{tcq zcy&f&T<$45jeNUkzu!>B`L$c4*!5X4t*omu&DxhG^_sV22E0l?zIb2q+%udwJ$5iJ zkK1)y!)?>1<9QhNgJ7A7*X(t!L$I94^{LJhio&jZkf;J5RFLvRhDo>J9ZjfN>MGV} zLP{33Y^2Efg?cdAshYU2pfJTazP^`w`~kdKx@o1(S-`lfJ>Tp=lfksD-C727ISv_h z8g7~REyZlep->5)tO`xh6V0J;*_*;hf4Nws#=M`OR9NG33==-1Fi8>2{!YvCD^q<# zpwgTkx1N4)!wmwZSwYKt6>ivN6mFDZuo088?~s_XuSZ+-q%5m&^H8R6ll?Vyzgi`A zUs)@4f0SwF1)`0B#WBtc(|VOF+<1tQ^&!l>*b~gW5Qka$zJY;!m{r$@Pvg~WpT@ft z&C|eP=QKY##z7xHjcaKXJ^kkz^=r}719Du)3RX>9oRpONt1kW`(RPu!&16|Vah|{KEjz7b*35b zDq`^=)3k1RSZp=ZG@t|KD!rc@Tm-|h{oDW_EJqx9Kc1qeKd@Z#VPF6QAsB?gd|AMJ z$s}5Jg}{QVn5S(d(29MS@#ammf+E8pnHHZQb-%lka~#lyg%CAQ>sE$^D1o^ygoWsY zxi^7bzK&@cNGVT_%8mAMuH1v90+KKw)8)p)e()AP*EcSysaHh!Ly14$Y&_oVLh$$k z4U##^#uW62$kgALFPxC;+!C7h2mkCdViZut8wgV`5Mo}WJoHoP7D#&mVa5A|KY+3R zL4fu+U}gW$f%Qpnx$lwlmeIihd~tPbj(tQ#)xvWt;vJ$g3NJ@SsQo3V+0K6h1NFTx z89v!h?{j}CFCxZfJ6{Q_4nb+FIu1!Ie&-ZZg^P0T_U?#j!K%vXJiQnvuaNh0KMW`N z1o*S7S%fb%TO3Yx#y`gxjc@LZv7y;5IN9SkFgRBle~bVvnn?s z#E@uj`0%H0@k}#uCRF-EGOqB#r{BI4B>Fl#VDY$*^yYM}AEBi0_J#nl+m{Qbc;M4q z8c-|uy^)M{i}Xh&&X->hRRygtSehrsj_u+nzk@th66!nUb~DbTB_@`9HPBD8iUV=8 z3M9BE)TqUG5{E!<64@d@h8qZnX|zEp=+|r6Ra$R$luVoN@J%0tpxXvF??Z*M0NLM4 zPl~5}#YNN1_uoEQWR%_7w<`aR`)YT4-ymt!$b+~-Q0|@N+=U1 z0R2r2NW^hE-f;g=k@_81<5=K6>8MJU;amKLwk$b?ctM+NdDHcWHl%#J#;81u=9ouK zDx;kt>g7p!Z=GCv05aiQwAy`bV?KM&oA?ixT=g%DVSebh&y2FT6nKO8+9kh98+8@z zVYAcEY?>%u7Cd@x3U;NIFFFdZ_1P(|W-(u`;$623BB@ND1$+{Q_uEYWK7W?m4>Xdn z%8KUP%UYJx9C?s)y#yy@EXK=K0g#bdrHwH4xyd11Syi-O;J!>wGh1gQBbCo=A!X7ZyU%}>7jo4vSQuJ$}Hj2 zlB#WDGWB&+$bFXABJAWhorOA)g=#gyS$zp5{(sHa6uN!JF#>uR)Dv2SU9pHNr_w1A zrqW3g06~vDCuL&YGKsx`W8r*xe1 zwk(|T9htPI$o2i!FiRD=t{a1(Sbmr= zS-GS|7zs?z2kVLqgDVzzw-mbjH>q?u1gUgQFdQ5@MgU8G;@6NYtI zhQWm_wbCbf!BP%+ZzGeW$IKy?AH}ew17UIbU~x5Lb+!>!AiFc4Ss^h-o=LYkv9xRg zVgae?gWT1OMz2|(IYmEkAy19-gs7}Vz+KYJgssEXRbJeQSPXNc;AE4#OIy>uc;4r> zbh+W?N6D^SGu_9ym8iZRb^%!*jUdB!^oQm@64l;GlYd@Y+cX*=!cI0&8Dp&TlggUc zvyZa!iijQ@AR+)uK(xOt(9lrP-}5xpy#d9?)lbJ|-o9Tr1}MoWw()?7si$n_r);|* z$a}jr<0r7jQ4p_hqGkuQNsenH`wLU0r+b5h5JfkM^EgE`Dle2t_2O?8B*K&Tb3`%@ za2@D*nyfsqBX9VoRC?!FWN$Vt$`rbDFfbE6OE4KaDSHvwNu71}6nt$GFFX=rbfJdr zOHcS`GJ^I3@MV6^7>~6+ZB^XTIWrje-+qO(^s2`LB-!WFE-k^?h^-z%D7Ye8K*MRF`l$@7t146Z3d zIZZ6q{yZdnUY93pq(6D6>`Rp=DKF&geoe66xZ!YFCl?Ked34X zN)8WC7g~X+h9`^K@F5N-RFsxnJwSeBLG*s^RlQJgPiI=icF9y2tX4pOt`GGIz!lrXTngO0>z6;{KrMWvU zm&*fnbFkN3ZqXpx1EdfPrVl`0b)c4Y*WGRopo*?^9z8+b?yu|rCWZRH0X)0ppv%OP zn;t#JqH~Oo+D^R9d7+X|she1O9~Tex72nTu?b-4i*ZCP4$6eAD3F19c#|N@=1*L6a zD&|@|s-29fSO)WRRp>fvj;2)}E7%VSY|UZFWoKC1dDtTA@S^h0J{p0IMM=In<o3 z__~4s4fhwMCnK53(+1iDs1@K2wLAkdk~rtAs((~M9eSw-ZotMQg*RZLYw!xxVW#1K z$K+_g{VL6;=igh|_s(dOy!E5{PRAly01= z|Avf1^V`#T$(3w&J2HOt=nCN!+qhFmW7u}NUtwY7TSgdX_jEe4cu2>_)RyV)blgjpfzW7qWTMb zby7&hXY29AT%4bJu5gK5o5%2~QpI{s)7BzbJW|)Xl_z-tAO4<2Pt{4CD1~eJ2;fFA zPyo@Pvpc}oAV7IxUX*UaBi4xUMypaq34B+q`%hTp#K=?UCn%@x6;1(6NXF0nwR)e9 z@Uy#X6uogej%hoa-EYjrKg;v7h(R|f)+;&^Sv}xJ2Eb;k@_DG7>u+ugg zB!4y^Se{vDgQ`we?atyq$ps6q(ND3@zhu**9|FH;k0uV`oE4#zh8h3uYzR7j=?=dF zKBm8~LN9s*iX2XN0LX-_QtH+91tq~xsFCD7Jl0%iX2%=jvFgxFzm#01W%Lx2?l+f7 z{%#2KNGoSZe%1-CZ{W(*-nN2ED-lGIT_{B!;GwAki0den^cUE~m@;}d%+?N@X@|uZ z*oVmkk3dYr@Q%81;?ZyJa+@n4#yI5ZqF2@PrMv7BKn;HcbZH1Ozhy-q5N)7-X{?J- zBjFPv$L)&Wu$ovsoZxdN2#BYb2>D&Nt9Q^I(j~xb^nl1TjeE%=L={RA>@{(Tl*aUS z3mxl+rqYyxm-1%JK6(YuvL`|b?cNf($UtkN(=Qd#r8g<0<5@i!NcTt|CF<#ohb^r@ zG31F_4_H4Zb1*zn3=8S8|NU{CB+OW));k`7-#+~{+oa* z71h8Fpc-qk74S&{va`*}701Q(6ME5qv%$+t$NWcU!|y~G$i3|Fg)-l~JWMJnlf?7+ zDtN0&C4K?$w_=~4s{3y!vl9}N;?lVJ{vbb7zQcc~`SJpLGK86cFXPdY6-oT`?af8Q z4Ty!2-cCO*{^zOgT{e};0Hj+oC!!$-v-ITAgW39w^96_Drw^)mNI zOv%jR<|)0N1U-7j;yhBCyaD9@ccg~JaeA`5$aE*Z&RIVXpc?@4srzmpjp(v~{)UPI z+XFMOfgV89mrwQx4#kEJ{=dBs#|Pcp>_S_wmUM;1bS|#zHZgI(#I;7AilfU9UqXz5 z>QI$UFn;0{?5d3ffX?{3qQX{cC~PV|p91*QvH=6WWVPwF)kbDRtmS3rryf?DKed1I6$jm-II|b=?D(xxO}_ASx0X6$WMAG&DNv=hYv7qS%*Hw0rr!UXPf$uW z0RKZvz@5FyVYGi-rro+@?8Jeh#9g|nq>;a{CRm_?aSNAvHzbZ+F5Oy}e!kDD#W%9n zr061eNzA&{y6)FJV=n|DpZ`4dl-9mv&0be40MHqA_$H3vz^t#)zn!LlP!1trCGpvz zJlUn4!mSmRBJuz2|I&+AsGc(vcGVF=q2hC!X%ASDsej(qz{G zc>_DewCwsWXg3KT&bj&S;rZmykCCCF*zq%gO#==mtipbfH!tlLd2YX(B*8j~oVOc{ zA@~*S7DlkaDCgY_Vpc-^a%cr90jr4ow! z5eN7Q^wD@TsR3T*h33(UBQihyp&@+t;GP$d0Z<+Yg%B+PccaM{JY|CfMsK6YDcHz2 z_!NX|bIX5D$FXLlZOB~5w2fr>Hlwpbvhj2cp_<>SqYOv%YvBrKS=8TRfc@Zw;?<|) zr{kD6pdlf_LHzgN=`--xf52eGZTIP8b_k)}{jH73rQCP>`)3DS=R0F%=R4=|vGvBT z&t4ao91wkB)2iC(@0UVAV($f&oA|gv2zH|su(H{GD=B`~zW!DvT%MvnqyKX@en6~y53>2bC%it@c7`KYsws`&1 zMmxCpsms9V{rQIbJcsqsO7OpZ^59QJ*eIV|N~x^+^Mh+&Er|cRq?*atJ9ksjajbK6 zH4O{)vW^RGjdbqMPNJsFD7kSdd)McCOphv#9hFtr z#|R=w45wvy=T&Z0kAzG!i%KUR79LEHh)u^tCa1xlP~zTyF8UNI`3oVg8COoGikpF2 zPD3g#C&o-elu700G#d7Xf~`pirdV*#(_F;`b~Sq<-mk{1SiIJ7}{Bo%7B z90?AQFh4&6QFuB8L^&kXI5<28d|DX{AEdVE|332v=iyRbdPq)*hV{TWR5}DaIV7qCGzkX$ z-zwA}awSy6!g?g05jFz};4$a|2xZ}z63{dm@M~14zvoJ9iiK?k5x`^71rW<2aVDTy zFyK$AP?zLN7>kE(1{1*J&;^jmA@L`mxzbku7eqX)CxoCEmk!~B9Fk}PTAwARoOuP*~Z6Xi%kzJDJcZW4Wsv;`xZml3t;CQF#(LCfv+S`;P)N0?m^K>2MS!EQW8Pk&66$W`p@;aVjlt{IVK5B4=g>sMy*;Amz9dE>q}9QB;Y2EtrsVtn=L@X1 z*zKi~HGm|2l-EE>(9xW^5W619|EgbUCxQ8OVe1e>Bzl*%# ztAnl?Ly=)Mn!M)}Oy#!bDBJbzg6e7F^=#MvM3|_p`Zn<*4U#nVZ9dxR=Lmo9!J&7n z)~PquewEhldglC4t>U{;pAHl>pYm=j3oo9O)8bzJNqF^4ObbuJl*29KEQr6=Ifg|m zdCG3?wU8;Lhoe{mtbjyj5U;9GSl`Bm%sm^x3W%4-Z&YC# z8!G!*`5=xRYjTvn<0w}4XUxR5;3if0&Y3cqYs?SvTgsqTb|ht1ZsC+rdhKY=aOD@v zdbD7M6%Fj&+pj1dPU{X%!7r5=j6N*#Z_0m>u)w`>M<3geHhzxsUs}QZ=zy8h{?_<7 z3yF*VS6Dj2z8yX7V-J zoU`v5d8Cn`T#5f*3>e7_RgOaaSo7Qom?+I}Z#P>st{&eq(Ljg{Wb)MN!1%QF;42b& z6_tDT^@OGF?+i8ID@Iuj?R$0{v{NJRzIN1{j#H!4gEa{i(|dMJ^i!kXmgpGcP)Sf+ z$>OOI?aWDTKt1@Mv;|VPYPNZ@OP zmn?Nk4ftQ=IZX)+1)kHi)PSj`e2_@)*}HNeXrE7w$f(0|{y+x~Zk^GDVPPd7@BD zqBB^E!6W|@s(kyOkQ71$8_2th-H(Q_OL>#}ziytVMz--w-7Oz#{_`ao^FLwmsgYyG zJJHxz%&1A~FLmHg8pc3gbPsfZ5O!N?S>QRAx(`|F!T40LJ0@oVVgnDTJ?_v~4Oi`_X!(wn1DmhoiBT8Bs{cd7m4HL_h5ZmJWGnt6iY!@^ zE$d`UmMHtK2t^2CvQ1Kqr4iYeQ6W3ovM*&&w#hDgBgSsVzRb-1j_-TE=V{J;&%6KL z^PV&J-sZ9Gla(@S+hohKcwLFF)%gGstamitJ0&iB?UVxyientwhKd|1Mu%IFXaY0_ zx#Ss;AVRxO6bW&(#ZQ5GRNNPZYWp*%z;}Lwr-Wg(!Sm8L=)c z`Z-5?Bq%5DXJ!B*L@JHQi!HyhJ-r=2mIlSy^pjvN9NQ4bXOhu8WF`*fYi#MQA0Yh6 z^02%P`IN#Cq(~NA|5iz!KJO@p}d7Z$?*<7FJUHYhtTJ%dn8!QOUJ7a zU$08VdnodTQS}z$bmYxLNT-*b;SgH=-*Nk7guOyuhWKiy&`?)zB_5JU6<#4hqYKb^ za8NHo+fr_~0#y%}l%Vo-65{91KSn&`sk*QO#DzV?z+r1Y#C@P<9ZGS~AhlwYauW_9 zr10Y`Xq5!f9YC7X6y1|-uajCciIC6OpQIEh9JdOJ4=F*dp%m!gY8Etm^W?9wEaoQ0 zN|Of%#Tpov+)$-4^zBz&546p|EFvJV>_Z_5DMOD zTXrvHv++6UJEUena)^ z9c>?X*DfR)Gct;_8HSlNGCpZDRHwa2L*w?<_ai55)&C9m!?eF|yd9n@o?znT9cdEX zV0ok16SEcWvhYaY(JLF8@1@fdD`FSE%}He5E=@UOaS4j+>DA*MlJd3A!$WW0D(^uI z3=AWOh>L$AtBDEaxpJZ&S#p4OxAmpC+}A%MCUbvxhOgA}e)-5mW5OLW)B9qQG&kJo z6_}cCfY5L|TKhshe^=K{h)-T_v*x}>{GRjo^*)*5Wb&-!lb451HGY#!w53MtCX<0M z>)-yXdNtE|y0q&l8%au;(iuwW=6AE~)1IuTXe7Vp>`2qN)1mvxD$C%}w6gBsaPmFY zcjUaE@3Rf!5u|H=Pfe>n_Pnh!9%FJy-577wY}n3`p`9MhdeGR~(mYpIKRLpSmAZ~J zwEDak{Ljp!-15UEnGG(JO+)P63h5z(kfw&h5Sv)pUGnbJnpp*gb4Q5x#WNvrpYN}- z+DGb`!q4Hd*Ne_)@)_TFVl5!2e(6br&Kf_H;Td_RsPhSI;j5_;@?_0R=ButlUyHN# z+NH8(-W{yHlHD4X9BMwc(BEHIvUv`5Dhxl+54aF3Z!;|YC6Cr|<(b@5Ozi6la>9tW z-G2x_v9qOLhM-CpW{%+H*eKo)7W^%f-N*>xY$U{cQ#VKEnt3tr_Mw zoc39E!z>olT*{Jj={fV#UHW*%RTKSc3q@j0ts>?AwYa5l`@Q`d5zVqQfAmMzUP@<2 z)yC#nnuzufJG%^jd-mxID%V1l8bC#+1Lkfv4anYCJe5IZ0yDecJPVH8HNpnm|hghm19Un zqLXgvk>5r|Eu9CSG-{#GuBJktCxO7zA`K~d{ULvYmNWlRQB+icF1W@5B^fsMfu}-3 zpQnMMz&!)4Y;&L}@{m8h_$F(XZJXAMx^Ar(_o>(i%0$@*-UE;CfFcc0Y`klr6%MZ9 z*MvU%EvXfW>J%d6rCG_Qj{JfB7_FUG7_Ap6Ab=bA`~h}e1qNCH7jCfDfNKiJ4c4qu z5a2f$iL!(6WI*_ zDNBLKM!!Se&9md^iI z@q0(cAv|9P6xex9X<4pJ84|c4{OWAy`|i7j19-@C74hL_=aq_Z0jB?GtbSYl{v(dR1}mHLXokPgMK}ta4#XAVRTes6ctl%Avk&$`tdVcll6|+(JSZ)Ua6Yqvl zN=+Ob9v!pTs@xATGM#Tf%6{MdpJ7opVqJhYEm2yM{jUEVh$Twpa1X|1jLCpHbHESbDFyq54mImw2D1C&M#o z;%A~i)JknL6EZw*Y({NUJNfKc7UIU&8?f(x>jLFM-dwS5PM5%~$C@f&Q>mgfdS88h z*X;AJN_KEmPCD$N`S7$3K|R<@gh>8`*+;U?3hM=zU;9h`jM+C}n^DjU-UO4ONV;q) z-2rY1# z?3#*RHe}&UnANvAR(04<@{A9Vzt0al$R1zVdtTAg&%pn;^2)QNcd&W^zM)2WUX~_S zqoXO+(_U;|b4e^|#1Q~UJAqInLQK-NA1gj?t8}_sfO5`TLd@p=avIWu{FTih@2YZ3 zD2{O74zeqRHYP4gc}`3Od27IoK0ai|YS9tDV&qyvf;xrm_q{a8lE<}&b4A5vWl4Du zQP;}E$E5N@rQIj!@A+*+xaQ8$tY`t`O}_2KxAvo-)BRa6_Pat6K;o8bQ0#$})FqiF&Yc3F> zsoYfYjbLIWb-C6mql~1L=TA2?>>+=`F z-mU!?X*B2+un(60CeLR7tY^r@hP*R*)2rUEZYJs7dXai?yay9|YMtP|1Ct#dpy;d| zZR(K&hRVL}H;dE>?X2^qz-bmk%^zpY6WZL2Jr7jZHnul40{F}WgZurC_%h-%6XfI- z+#48nVB#2WNI`28TGb=hzUyvucQ03JS7Clp_U+1eA9-cu@4($j6E*_9|G)PAg4vI& zUHr*-FzxWZ$6`f3@}#ytjw-du)}0!I97a?hoeig(Nu_Wrw9~|}m+oca4#KN%p_$8+BHk<=ZHiK| z)3TEOiIU^a1rVTczheh85@L0N;n&N5ug?y6zV={w*!J=20*V@bp@HqqncBCLKA}e= z8-$(|-T+4W3oYj!(w6y^6j+k^QWGGtr+ZD%LI$>hFw~oLkV+o4{)nE=INjA z$89u$&p$GfvAtQDxX+|k!kKA`g*3T#x}dnMl*~c)5y@X1D;FNV^bP+CQtn}Q)3HOH zuVK93P93B_VQG9{*FlKRQx>OA_-6OP5%)ejO!`~6dO6Em#-gT3iq^UiFHSCgOV9|&_8LqGb=e5Myp#(md8-=DcuN5XybPT2C#x8j_Q3bIwP zHEDw9YJP$L8>2VD2JZw{2h3$h?=SjAV;B4!-(5tAwBTn=NiHEp!&5bP3gwPo;aXpm z86TI8=v^KP!K4Y#qyPIiYREY8+v3?yeK~xR^v!Xe z1{Qr!XqY9#dQ00Wn-Al%CFZ#O&?`@SLU&=$diJ`6a(z_kRM7QOa?1r9gm>fMVq|T{ zH~Ql1>xR!*CZXcDt8~nJ38xL(y^v)t<{>j@V zCZ?x!5Njf%=Na5Y*vqt{3i(JMbF(}}45wb|rB$y7NR$7hB>j4sjpQ0ao8QbMf4=6) zo*zrH{1t6~%_U$u|KESUn5hV{q{1IJyjQ%K#fB0kKQFg?ckj@s<6@^Px@n~Dh{{;R zUE~Xvy`Lu6SbK1P1-kHQnD_REy<~P!^DdHm$WSk;YZn!)z~>)%=s(x|M0-O!$j8ge z#>Tkp&*z>RKf7vw)qqY9t*D7eHaQkSx2t-?T8RbkL>>q{_-y`Js}Bft*<>#Wx?R^B zHcc!j6?x$F;PWUDXh%)Nu*v-==q9Z<{5Y|oPvk)ku*lKsyAw6>j!iD}+7m^+;TOR} z1?$(;=YOrd9t9$usEIf>)>kF4kEg5Bujn3=saWsxi+9CcI9(Z)1c&6 z7erVk?ovB`)Ed5W_GcK|0F5C389go5MC3IQ)_eE)KY!FRRge1lnr(nqke^ZS|9`V{ zml_8wfFPW0fL@TFMNjK$B2r6)_1%4b{&;n2Q;n#fTF=h>jv8mVazRh)dLq*FLMT-= z|5@Z)wg(J?{G57PQi;fiBCH+v`4c{A$!bO+BiJ4=3G#F6X(=QkpNp_A+~*(sq*X0& z78%L*fLV~APfrV$h*d0imFz@UvDm5X7)O z;1uMS(!K4Fh-?#KeG8)GXvyA*LdLQ^;1cAQVg2v6Ya;Tu2x}gQlA~1(B=6WBWSn8U zqM)bcmGEi~U;$BpjZPF2#rEK`AiuJnRzM<>0I>9=t6TI9_2flfDfyJvd}>eDr0A#F zsjua~;^{Kr;WTrcSJiH@>^ov^UdUcynXNI=8sTzuTY#hsbrlN@MX6dwNhuL_ui3uo!mR{A*kb+(K$D8N7rKfoMuLEL^bF1%|^{vA@_FxKjChb(`3xBWR znShe3w#OWddTT=QeQH`-EU%GSXZam_v@!h!%@UF|9MYpj)}7Wx$Qm~$z4gwPRqYWp zX?9<6V5Nlw%bQhjP}}a+br)Je`-WoEd^huSY?~@Q`zGLtGPASUV&s(y30+g(SxGj( z_<|TMX#eictoh^=%egNlYx6M(ba&Fle+r?m^odjJMxhk;A%zt`=}x|E=&*-u#0TSq zn$;=CHRMfMSL%>by{VZ~FDdyWlKZWbBOxz2;O#C5r`-%gIwG^z6|)wl2H~0SA~TMq z!QR9>PYHJlg~QsP$p!6(PrLjrvwIJ<;ai)PyZkrz?rhCYVmCP{SU+;DLORs?tuyVf zL0^|VYi~ufZNn20--><*HF|Lw@YTT;Q=K9uO8Lbz1z^cD**rJI7ne)PG^0rWjr!gD zITjB+ik}vk9j2u0OO-)$s|yX3Rqj5>VG{asSxnP%yS0vz71*6W>p^_?Ns(&CdgUl! zJ7sdVuf3;Ua*uRp8}=>0_W1Ys%!3dm#)GcySv($(I66FNTx|7vMCsXX{qedTvTp3^ zH^&^A7tl0siVr_-`-a{$0|FO3Ov3S`?^7oE#veD2NHLRff zohsE7X_@yhr0HS%-6Yt1Qi=oRK2D)jBmM|MdiHq+=|vhd;y{c;=1%+$V?iY1t^+Al z8VXoPG!2j-GgNlsUcLI4%x7ckOt8KZzRJRvL#;tfsV+ff<06gO-1aUCWmNNDuckl; zm$m!ly;hE7=QjT#p3nX?jGtD%vlqKYiNm)BbY$aOvun#&kG#AtLq<P+<{LvoE7SqkYg)AolMrG)kIn z{bMPV4u`U*)(a^;#jyGf8FIunOy!&Fi^sOP6Z=g6TH{*Bq&A?r^)1LN212~t@arr; z8XhEN(+bn_Tuv}mychAcyIH}q$hLvYyylk<$J0iuOYvJh-cfWf_0PPl_gAvREo>9+ z@Z*;?B&Cc{2kZi`7z?o`QWw%f!%YvXej-E=yoFn?38Dzef`dP~-PuMfJ=xL{Mq+M* zTMb)5IGuaV`KLph7|YdkULFy<$i&CzYY9+M#y!$+=or*wYJYc%YAfy-0;55P5s%~6pCdV-WwoUQeMNuxc_%AmK)a5OJ8SHm$9L4A?XBYu@+_HT zdyMe94vl$SqCf{Bo*K3V(B1;#i9vp7JdHgzh;$VSB6sg*+m0cEJ{&^sV~{S4V976wf z{iBbUP{>8czIxqmj9^g_Wip20k8Ajr5&u54%wnO%`01JPLs+Xnc|W?0gqS=+tdJct z#!8_5GAPqx2J+i8+JMrJthaoXiM`HLGqx0ZKU8c?pyuHw)U(=}VRzi>B?~!_Nid5Q z-~C_AwdH*Q{~j1q*7MmIGFLMHNkcK1*HtadVmqE1j(^pKe;7>5TSZq>R7#$Z6_Xv! zTq2uITvGGkVV@oe-qg0%Q>lu0IdiT$u}S4t;V_{yc~D_RC@HGs$>B)VBc=$&<3fX` zK<7@R%e@87gV}>$%SG{%-uf>Og-uK5yG0}u-6X`#!Yw0VGs@Y# zZde`=CW%7TKxhesnjP7;MWiA9B#1V*8#-safH+$SwL+IXS^FrX!e3x}(8jdWQPU?1 z)N_~}1IjscaE)IYvy6qbsJ5{Wp(=wr0C#*V3V8 zyF;jm&nV@{_D`bx*A1be1*YRpvq|6F@qm^1pYLBe%^wdZ`JQ^(&{+GPF8Cp*;#rnB zE<);I*Rq4%qn1^9&ek7497gGh1YtEQgX|+7_OS1)HbwJ+Nx1y5=1 zp`C4khwO(TI}#+c4UMd2q#miPc1lS&?Fa2tuWg1(EU!{KDBAOsx#^-w!||2S?a!a~ zx#(B9lYdHFJ?|v5iIM$s6IDIJZ8gX>b$MAPM%v=rFVvJEo#0{%C;n$f znTO*{J1Z*K!u{H!!^Js~=LPRNoQy+tz9)@HydB1)ez4!;7`l;De^VS`O{LPw(IYL=$CfKL!7?NA$Ms%c5Au~4&)j%ib9$}kVcClu1IL9t zg+8+uAET0^nNZ&v!C4{m&4wSgPHTU)^oN>J>2{;nMvLltSuF$H9M^f+dDQKTCDfTs zb)J8_o;>GOU%$}|*Z;{`u^1R>W4VI$eCGMJs8aC7b8X*tUEbt79zJR9badC`%eH<_ z<(g42UuX@}+r?Cko+U4S65yY$#~^!VZ>7U@4W3o_=Hx7NR3)1;xGGwE&AL66@j>RS zlU^qKtE%0sMFhO*xTG}B^XF>!;3r#U-!AL#$%Y#ozNTrYs0RVj={YjAf_M1tI&Jv$ z$Vj_1{~GXf=)`yq)#V%y+C-@y+57KgCVXd`d{(Qu?mINt6A+aXFhovPtDo`3nAtvx z(x;5`WNTJovbkM1_CMCmH`sL3xp(xFqRpkh#JzY}x zsu+35V&6bFE$UviV*1+Y>Xs#rs*i)dm^1sj4yxroA(EzN439b=}P6i)mVxv2z5Nh?=<+nWmK* zJ9qwo$CMe_M%qGAcAR!O_g$pY+ zbd${Vib_n4oolG?A)^J&%q2i9-J}tQ={u%33M->^lad%ki_%Q326DTLgv`w43o9FR zlP($-mG~Gt=Y0?@&N8(k0+Fzpxmsc65fI%kDtT?}EchncO~lMxtDqnn0811t&H=#k zx<#*>nd=o+T2r%WbpYlUQ!B;1?v*H6j*F&6C7*%W+h{jYGjr2|f+Ap+BwAc-YV`=1 ziJ6&O7gkQ_C0(>ADrqowUPy9dkc@=GGp3NUyS>qHt&81}@yi=mdCgQfXS{|-a*$oQ zFWH~f%Y_G(e^scr#y>%RY>!5TTrofYx*y^G_#Q~p)-jZgu5gkuf`S;Je!VpR0| z9P!`pt4LTccidREahWRadYsJ_^rAgGv6?-J)u`2kkr>Owi*)6!og=HIqhsNtN1u_d z`@Z~W83`ub=~afN*{K!_)=3FGE7tuRJu!Gl9>Y-N(77rlD}KAF;I}6xCNqac%J7Rm z36c5VJNU2qeHO&luQN@vgA&uM2qnpL#L71j$dsFk-YfQq19=>_j90Vi(TF;Rn|ZqZI7IDN7<{NZ2idwL z$3T4nKiT0^~AE9b9-!*pN5dAy^$GJ|eaz+I*qA{;5BBLrrWYGF9Ld=krM=+rr4 zigG$`cR#Zr`kWJHd5JE*Kqg zLyV407Unp_V*+)sYLmgjry#Bs96ZZ|nVlMs7=sa)Z(xL5tzR7sAIBq&5I4^e(>%kF zSX9iNV9BZIePurO*W=4348*tZZz81@gXxKan-802^EL;swj?xPz=`RU3$z5n*m3R| zLvy?3d@bPzoomHNuQH`K2cvVUNG~Jnw+Dt9dC1N4AkX7(EA+@+^YfT}l|cR$hnF{T z^iOBIjzdyl=ZOIpfmcEgWT}b!jhC*5z7Rb{oY%HTX0M&;I##`$L_@5i4Mk=L1R@VQ zkWI)B+%E0Mc`xAUXU1$c!eY6K{ph3vbRfsh&o(UFo52ycJiZKZEQO^Zho&cDZ;`ju zkuqx;F}LQcxwgoullkqeuF-tRTieH%SsQh#)iVV>|Fee`JNRMr;MZ`^>K~oA^3T4F z`D0Rq+x1-(N8Q37Y9M_>Qh2>LO6f2!Q8TwQ75?($f-{ExlDC+rqw3n$R*|X`5_fE$ z6YMc>UkA*Q&(&H*jQ^`Y;~ihb&cb;QM(16D(M%)Yq$*4@ zvhJTGfk!Ma3~#xtJ*Q}XhZb8=nd8GR$w`NHS_lgKOBOXfg_ZWzB=Br^pF=y%EVTLd z9!tYfhjrojRP0&6Zl*yyg>nNn)FzIhl4Nz|3~0)zwtr=_xpV*lKYMW=2%XY7VY%t`<&wFt@$&Udn(_3OMZm_cSa!G zUmcLjA2|X~j3pD=sqX`rbvy>Rj z$fELxDkk0tcym%C@T|hmsdOo+(qStKuze6pm97TA)7Xj) z^6}LT)-z})Re|rm;@>RUF>d#O+<8ZRs__gQwc7$R@diO~d*OJ?S%x`9j#e-`S_f69 zY=+~pkR-#&MCU|oJBje>-XuXkdz5L-0_!H zy6yx6p8_Yw1vTreacb0PC#S_%`0?=(;K~yOZC2n?>6-CkKs)gZ~tmc`2xK)2BY z2BU6sipi-|DqY_$8pOaSL2FFLpI7Pf4+ce5E(Z!_akT<7%?v7CEyJKlSOEQ452Vzb zlgvMJvLTLh)NTj2F`}L5xqMusdMuHL zv&~lpe-r4`tZQQGVW+S}75(qNDqaDAwa*Jj;k{TQ>TX*Qc=idjSmNv5Hs7ZE;OvsJ zy`<8`06UE(@_Qb#aw>u(wfSD*;T1J=ij@8bKtYXm`;5IKz#Md*3QJ7*@ZGn`)CTaU zxmCJ)Olh#h%2L3xRR=5wu%tb$r?B3>U^^%{&|fIIFfnAs_Fs!#QS)ofG%x$vHYqxMSxY}H*~ zQ+ov}xWAL%eMxozbX+PkdR~D2yKnF?!3L=V*ue%0KwBBKQr8Fowm-qAO$r4&0s1Hj z)B^zPtRT?(agV7Y8EpTW=4Mu-hV$t84`Z+J2a!P_f$m5U*H2d;Gjs%4JGx+&a~+Pt zuF<0B`OJFX!OfirJlX41=y~5Jpx*BbX2sV8&_SU2)*IM~P4|EWbPbM@O079d{PAv1 zvE|8c9!w0_{s&!Iz~UDG4du!M1rJ3wV*VzGQfUpeRG8qXa*=cBc`?Z`t+hGOUbjFQ zizR{G$^wwP1YO6tcY(?@9Hz09Z-NA}n*n_8OTa;=Cus4Pe^ikQC#a=-!te)+gTUtS zDjenIXA=cq1x;ZT4E#-w0@+>;kU6;pV3bc_m$fjlZc@NkKY>N?4Brg*f z=J*^?rxXEo82IjB4g|9ARm?1WfuLr(4j{F^LvxCukHKvK-}IX-Im#~R3Ya{DvO5Xx z3Shc`I%F5npy!2^fuBM)8kX!ba0<9q0?R)EZN8+nT41gQkY~fdG10FKW{(8nCD^aC%|SP2yMk`y zd<)X+U;_^>N5F=y`JZDmIeXQT+dxs(j?|;I>fR_1U#R!IVS}<@kfx!z*U6xLU z9cnDs^h&Q*=5=m(e`-B77!G;CASecgh3BVnC9|*bvEBGNMW=6?VAzXm^X=pj`|jJB zNdpF3aQD@Jpa9J7NZ`lMAvHU@YmI)jMLVauC*4P<18w7U~PH6^U_+xxB*nr_M z_GAcIX0@oJLd&F&0Y~-@Vffe?aAVdPt^Zv3d5#euh!t=X-oG?oI#$l3t)@sl0!qJftA}_ahjUV!xikJ-y`l_~@6Ue48=CaJUj-b`?k24j>adPN7cfe;44Xhn+lcf-2yR$P(l(BMq z$ea9w?wiKTfQVnKFv$yW~COIIy?>BAj^UXAR z)sF49lj9(=0`ob<`X*6lAKKfc9PZAF1|BwSKnFs{P%++{$shzh5b{VzqUZQ9sYD=L zRuHht=BVmld%vxp-VcA^;_0%{xyefen|ip3xVp3D#M+rd<9|P3`c^ zTqU{m{<(g1@r9$?Cg+%W%TgQm&7-TA3odxDlwBykl-p!7YIJ?$Z(-ukObem`cfm`w zodG@j>t#tw$u?rmV$J3*>rSO>Ke(;yujf~93G6?;h->-Xo0aTt?_nf5(>x}6vDm{n zINiV`n4$jxsp(iOR4U>+744TNRIUO^dRSadcLq(8Xui!8GtSzvB9EdHGd??$V(A{u zsuz-9MtH1r3(bT}ImQcL88;rtNr({M8Vj#mSU<1W)+0<8>mj`}*nFPH8E+0!=TV~b z^uozDHNVgDi7=(~4PDw@D|{1jL&JTFq*|)GFH%~F2#{z}|2iseq$%wqAyp?-= zF=`N(v1Z*9DVSuw+B#&-=W8-CUUl7LY{q~=j#A+K#ji({R)U_ zgB9zcr6zyV^h|3mTH_g4tCwD1%15`To$u6Dcdv@yh>`tFyLj(@njwgB{VlCay!rijYfD>> zW!j6^Bf!)J@C#1}&l816K=_|=LDvN$0mPO>;5qyTY_CQ|Ft38=K#P@JRyQk6ix^PAlP2fx;&u!+9s|?1)M0hQCEqa1<5#5G4y_x1?5SkFRau$ zxt=W$xd;Rc)IjhU6eN)eSPzB)Yqb;WGYy#{0zjU38ptzGiKf2|YmW@so7 zkp|^)rW4g;0?6vaq7%Y_(EokO=baOEzLR9$08Ye-J64X=*?ymYLg>_mfbIx$j9qPW zcSK;?LKPx|lPP$cgmjm=NgKq8(?l}~f^Bi1@^M4-t(dS8JT*8j0_rmkdsqXj)j=qj}kl570adslv z#DTiipz@jPgMTkqVQS2Yf|=)d?fMVPc}PAQ*V#{rCfqJEI9+&?E4Ta2d(jUXMFxzS z9l{BkMFuQ|H+6Hnwc50zCIMe--mU5hHMdFrEt7n;KDtxgpl z&|E$y6))Af#1+g)UltH+@}z9DIe{5RSX7pCBAJFfW+jGzAGx;ceLXJ#Vv#Bcgur@*K_2X{u3@Ao0{QF_0`KKD?ch zqPWF(wCc@1`+Qn!4`u~r_{Xka9?5Tir4|z8f7FGCwbnsZ+2!S{6eF)nN~@1xkml?# zzTWlErGI^_6GI6@_$-WV2YTGD3XOnDBE6j&5J!s)pC@L2Cd3lHjz(S5i`-AANG5ChuO#`9~`wOk2j~8&B2CIU;3tSA_k?db{Mdz59Y^ zNWJ*hkK*-Fn?b)eTX|T?2UIIIFN%pNBu<{m>t*_nBNYgT*|km6o7hbVKfjJ^RrrM{ zpMlD~R($uZzDasbL5&BSuwKN|nKm{(z3xx5LwuBD*a-0&9I?ByeM#|KUQ2Y+Yz(GQKn;g0_vVJ5y7BZWsAX|kq5nRnU;BR`tB ziN7`Uv!Ts0-_j?wJXL^csR*cp+mzys}(Y(L^@O7%=#h;$Ox5Q4B zr98^u=5^^*)|l9Za+y~C(i`GWY^jJp*08KnTip@x0blD!2bM2y2d1w~v627V`Ebt^ zpNwvbo84a)FY@h?4}Aweyi(*V=2E89RJq%TyiDVA#ojDqWojjv=9Bx(C#BkN#@tH{ zvZnY^&h@z2TFT9ldZwm}I40%7evc;0i$r`6ASUe39Cg5pd^7OeA8i8fXH6=R_+=}o zM(RE6ObAMoJ;qs+4>qZBS!sp+>!Qsx#InesPYw+d{Hos=8}N(&Em^Dz-QMT5do^3@ zRTK?}(jOq1cRXGn%ta0$nQKm4C5ky4`n2LbiIS1QB`)A+p)#J`@Jo|jN4aS?bQhW^nr%h z!KTULP^8L5TzIk0>hMzdJTb|OUcEdo>Wb3{d9(=AUE*B2E!$BFE6(S#l7OTacWj+TX#=uv z)F0X0t)<>9!SSTS;QM_(lLvz~c^^Z$is$za*L&WKLlI}a+Xj`9Vj&%@7U%?4Yv*6i zPe#<{6wN)>JfG}-0Ypv@TR|Rp0vdVN#87S`Mh{2P28P7mBB=9W)cA{?(U(_t&<_(pViCg@bUE}a*Ttb4!$N4>4-KIdV}qtcab z+CcA-4n#k;FAFv^qMcZ6yH>TQn0t<6__(9xGh~9@2M3Potfh#U!uX1L@cCxvAj&o?I4_mMCBph7Yw$*OyRGnjv|R$j&^7 z=tQTf;;RZrg~);uyW9gf|HK1!{^rvsuR;Ec-)OMXjb{O224Wo25qNCwgKRjaPGKwF z{+d(#wj>5v!Qe?&jdEJ0EBRT}o6wtJmK+KeD7zCxsgwpjzU@=C&G#Uo}2d>LN5PA_L;}2rU8(4?K=k*9Y$H7fyjQ&5f4cfa?o9772_lN_hePLpVPohAV(IBNZt*;!(L7jWec+HYMA)ZIZ6q^rv9 zbG!8tm<8d(@Z&A#fdT?7`Q-w?^F(zqK!}3g)IE8TnhEftQq=^SK{GgTyCiJTU@M#y z+I-gz^5+!y`Yh%Y{c&VNq){j}+9@9g_&c|N75jO#ldDsaCiW1ZJ`|c$TEg8qb=l-Ryb@Xs8h|&h``y7W z>XX;0>?&R8cK|z{31p!i>GkQ?0_gNSDmaRJ>bq}d5*60lAMcCl1W)3gD(BJjPd&r% z@QK|yMYQMag(pW(0r7VT{-hI5;0ef}M$f15xB060^1xAz$0J&qf}mxpIOwq69m20< zL9)wLm z^!)&_wl&l-HO2tZbI@;NYRu?)b$yU=KWH9iUV8MrU&<>y{N%q*#<*$F^ShcLb#+-d zDl*v`S@-A$9HjtyVg3=i>h2~8{b5J*ZXD1z0lU53;^ZCbH3CmuDkFNn@A91Dj3YP$ zYlndcGj{@yS1HIYLlm&(#hI$@r~tQ{zV0UrpcF z_=6i>fVFoSj$+0FyzQ;uu7>V|-j5px*aQINg961lMYTXbv8B|jfbhS$cq`b}hQNdH z0+?GJz-qvrfNq&*_@blo=^PwoWB_VwO?f<6478UE*_B>$!chn~C_uhAHI`VY+~ylR zQDDSsN)4P{q(jdK1%dq3wk6U6T^iseD@{Iu@iWVy4iFC*;KliXFGTz`Tl^y z=(JK{iRC9RiYu^fzNCX!;>gxCuv)@OAjw~$bBduw9B@=N3hZbZZJkG=e&>Ys2h`px3AERw zA7B}RK5exbjM)Ka(khqFdtRU$GzL0oA8dt#@b)=H%JgH<5=$U~%^+d^&|H9ieiX2^ zfM&iKNXhCyI0_~OmRw2{$YNZ8j*pEHo>K(3SM<`bvuK%1oUie*PvuVr zu*+kYT^vzP2D4{XTzd(U@cz^ad3gWQC>5BRGJ?UDLB;h&06mtg>O%e9KyDRR({aN= z^2xhXhL}%(Qo``DihIC|BJ32F%NOhiF5iI7x3{oCKw}2nB{;Q|fg#j^kXdnCm5@0g zzzOfS{K^jo!c{Qbq6fplfAIb%fv3IdWRN zqt-5(7Y{sff0+N}QH|J(3c8U!CWb&lyiA?#eC}LMc-Ga8iyt&bt9?fsKZq4l*3?Jo zx|-WH=xb}_ZL?)c9%Nh^{aQF1DOYBVnaJIY`uc_Ujyt>x+x{cluHw!?|M{4@ADI&8 zJnp(xY$sGXZ|Ufka%}i~Q&YHA*!MFo%cHhh-)-z^W>v^*!C#je#xE&;@vw~aaVou> zP2m7dQC_9m%oVl z`=4ktL$OakIe+}XVLc%-4xg*Z3 zYyIS`jyUr@>nF=^$5J9iB&ub##hJ}cVq6qGZzW!dZ08U?Z+CL#6g~gwg#X`3?9P+e z6(Ho(6V@fb`hCJ`I$?RAuy{_ix@J>uO?|?>@8XZSwe|`3`{YXc#7_%y9X!P#!GH0D zpaO*82|*1AIYNrWAPpe2PdKLmVRk~$0>b%(pc6Hwj}hk&`N;qF>rei{Gor?4fhY@z zjL!k0^n_rzXqC&!VXYPY=#zFX7a)pG2yQ@hpAeT#b9p(ew2suZ&pF0x=Uz3<FMjIY;aBt62g5+&dl#nABP1oKz5XTuKUmOJjNIp<1;y9AFFvmcihsrCIHJlyeSAphAF z?Z*p?u1f7)cp>CP_-&%b!Q>HG+23jVd@Iy&xhA6De>VSlXN_h>$rp2aRnaEd zfAQS)JbYQ=pR{~aAyuyd1GK@ z>28pc?rx;Jd)f1z=l6d<9Ii8S&wb4`bMKxx`vIcU{Rdw<2l$h-{TEE{GO`Gl-zbfw zH3SBpYOEqINwfGa6VX_}ghAb+OuOEE(wKLE_hH|WvU~DCz1|(m>feL3HYwJfxe@)t z6{`AcWr$50tHn%>Tu=E9?x&ec2UywL@HyR4^4)qr8M&jicBla}YQ(;n@|a$r=6l_s+b1=m6PfHpzR|KErYB5HDedxK>DjwfgsLI(3A; z18;gBs@J!=xeDw`)Y;c{sG@p#FH;=Ou*4p-$^My46V(*9nw+U{+B`3A&7l5eotyZx zIum{rZ9r0wuH92uP|a6i@WE;7uC%^Hj*qu(k<8(~_N9I3nTA7eQkh@0WNQ|y*V?op z9lKkDfsd9;;HjuhM%>YLgJFyCvS)flqpIzGU3i5y^o+D zn3idUnA;KS7%?^Nn2&c|#IAQ%j@)8U1-9bHe_tR@mtE{NlssSEHr}q+NRXa4C3%{4Gk8u!mCpT<%Achq z!O)Lrz*LVI<0kaI%>>IIPHnZGwN06_TvfN?F1*tOOI86!Wz72?~a$7_^h+N@}FRC0r(j0xf;E z+&oLr&@5%i(y)2}Ov9a8i%7qQ_$ag+kVHwHq*s zZQp5F4THXLtwo_UU`J2|F0k}k^-9T7UGYT%rmOApQ zO}hUYYM|C`AS|5 z(ps#@$+JSV^aO)EKPH_hmO7eiSdk7T><>HancFFEj@d1wceWc1k;L~c+(DhP>(WgXIlig>tQ=31>98* zgvA|3#Ai^% z+W~$|4~ovrn1EUdw9DZ)pqWU~888A?Sfdj3kevXli2$)U0milSyBF(nII$9!e}LU( zevthQ#d@aF1Jgwa?2-@E2>}(z46rxW#G4idB8rCn^c^L5 z@=U)Q$NS~`&M_XfE`Es7N6Xdw$rLyGONH+iUB?N@wT<%gdMfbJ3n#X62lWVY3_Q0( zHi1B0HhDJwGd4wbZ;_0UJdbmjWj z=9ba)EbUutS-4~Ao9iFIh|W^^mic?80BCUr-hT?|QA zC5^e83Z0x>tvro0J~iBAlO5Vgc&rHL9+?Dt7sz=Z7ryO;(m4v6uFK+1cyB@ms68md9-ORVRNkxf1_X zlBL^hG@ZK`<%U0iob^oXZJ^oxlDne#+;Qq=(?Kvi=FBD>FFfgS(1`mLeOAJ?=HQWI z5#H0j&6sjy*0Z$cxD%0?uy{q|p^~jrD>hz*$$NuQFUDTg_LYjN6NRhN$Jve2a0htU z^-*>h7XKyTb;iM~@QiiS85iSr+or&Pq9B_)m7dk|cQY=!LQan>q=)TM%T9L@tQzFK zY;}S2LEj&%CT*ARct6d^y=_?_OL<=)K4>t0%Oh0>zjmwJT)e@4eu%>Ei59iG-hO3; z#i%G9lD)|md}Nq)_cUjAoTRUemo}KpcU+M^=KeQ%E%R3)`I+5tahVP`n<<5a{kQbN zhg{aHj}fQY3-ifVTb80n^a&^&eFuyEeduc6CO-E{qe);)d=5iK;~1*kV@xtdi0e#e z@A-@<`7i^!yIR69@+tItA7szmbW&zuJ41(L18K8qGbTyMVdWzn)Sk}4auN6FM z)Gl4&iDVmolsy>XRInm+STI&zozy|}D`k;uR_E6CWxbK4xZwkD{Vax*;nE0svRZrI z)~mHZuHv^LhG-nkC69hO1QIo-H>zip65B0guDtpP119Y?`+u}m*r$J21UUb0u={Ef z_)pL=-`6r2HFwZu#Jl z6cfm;hhfOptJ#S^FC1UZXUxhv87y}fTIrJLmRmba-3fyu+1${lcFX?$vvu_OYL2WC zBMriaLwg_F;{E(lIe`iOD@gd7^|s37{>#G?Ay$G{9WlyJcsSH2 zJlPs*x$^QJs=PC-voYs=ln^yk@+<6TVRtz4yNO?NGKjRDVf2f==gRwFI$=S*B4e?Z zdw2P)!?P{p2Q`Y2p(e}!OS@j(_mV|xYn-&^W?b0dq|++NscVbvuq*V9Q;5op=F2xH z4kR_XWkPkvpO>qM^k>9(9!!0mm9E%t0TR+F0AkM%o6zm^=Gsz6#~0kwpV&jUn!Ir|KNJC4u1n1HP`Vs^rqev>$$pvxNh_M?Tkas=B(iNqbY?hAVsB1^yIxA5d{8?xe>6Xxz z;Af;>vh@hk`)yXMs6zcN`7CQh(N{c}`(@KC8=|1X!$Z(;&exZ~A4QNx2a{HTc`TPz ztr7EnfgxW&35}qH_uLaK8^J2I!Q)swn0g<%5!6rZ2sl0#>30c%B#--G2_Q*u;S`Hm zaEvu#yEdr0VUmTy2y6r|7y;8tN5KajIi(;+DOfrF6j%mYwIfj~;xB$sr>i>1CG%nw z$qN*;V3(ucg_2~Ar~+>FDh3@Oc^4Jvz;xolB+Li1)PY$znMRRX7HkPP6hPho6_e)) zc8|oC06TA*g`yYi0vFhYAqrW=?#1!bL}QBq?xEXI{a{=5RFp_QnOcO)?={AgEWslA zj)pv#6yprb??#cp7(TGO5unZvu-!^?V4aH~l>pc|H&D1VSZgH51Pg@(m^2t{&jXlr zn}33(%d|irL9AOA^OwGa2`DDJga^~d9@MQ5>b7wNizo$;>o>UL3T(y-*if@tFiAeh zIWlWUP&Ec7+2p~)K1r}fBn3Iks=@xrgI&=A$%Mcr?Aw6HuYmJcFO<7o*6?sGt~!boApqI?A|UYs7cp$1gP(Tq^=w z@d7NArBDX_3&#IHWQ6~J$hPJG|Bz>G=_ok9S|f(p(NVY}aU*=e2a9(ABWb}%4`YKU zLeQ3Smd!!6M#KRJyPz$B!~@f0h&5san21n{`-|m}ZWI*{rhp~5$u=DwjU`uK;vKjd z3|9YNuzHv^;wYGNAP+7G07F+ic0gZk@Qx|Td0JQ|3Yt4&EulslO01w;T=ja+JE=pf z{*_+wYfKnq1SRMm_TPWgr`*<4xAyfv|7Ax7<|_b|dC1MByD{(#T>)T&>ku=o@uqcn z0=#HXTuco97IJ%ccDA>-1o$LeL3LkE%%q?)n7dgyofcm6J=wkiUkLaepZcxlY_sF% z&#&D~cNT95!uROYnb~i(_-I^;fBj;?Ip@d^?(V0-#16c;)Y`XyaR0 zzz?l0AV2$UrG@K6lv1g&a{bVtH0Eotn_!5t5HnHZm`NWzR>8n6hxXH7cTkK%0l|o# z)@IlY(b+R`B}oC@jJaf+mDqAPWm0)?T-52|EM)@2XvFqPApQe5t&+LeP22j z-hWky+hlf<$tYmxYdL+l;rW5l#3c-&g7lk>opSJ;Z+W7j3!>g~-4uV%s|vwBasHxv z|JIk47YaEw)WcET??;1AS@fiIs^DggC!=v9n4IRuW8t05C8+8^XoXg`S zonwkk=;)hy^Ze3&ZO>gU50vjr50vA~4*#NUl+|KSNLe9PZ&&){?jU``l{Q%J1hV8S zRt@&jHCpQql*e%VH1yY{Y?Ljc8nC2qBygl?lua>9h*EWqVf#aeiLhcI+k%|Ig1kMa zFj4R%{UO*8Pv*phS_^bvWghR=b%2)~0#;Q05N?0?S?&*Uz)WdvE+y)uL>Gch5Rk?dxVSk{vFJIQDQ`;=s z^Sax9$qHYv*G(-$nRRx-^s7=tXXDqan~6=rzVpgU`k{HaS{+HyBJbeZ7Q^asPFTMA zc-SE8^r^1Oj?Qqa^roKfVr6b7Ka*hXxP}$xtHavu-N^b+LBBvIS26FcwYmY_87`}u zt^xE(udahyVVQMHdf4!Ho$5ZFfpI*6p!N51speUOzx#xS^mmK~TCRbHGg&^YOSPG- zj}n-ybb&gCBbP%4^S|#LR||P{Tj;HDSbxV)eJ)@#&_U7;T?^YcXfh7b<&`~wKlHr#sEo+sGIg@@lsvfygCZ>pp=*-gzAKKGhkK27}@u%FTnB zLLF^Tm1Sx`wXQl2N|+x9Eq10B|Dt$HV)=zeTpj9+LW4p35as?~AjhDpWq~0=RQJcA zJeE}?d3?1?JbnC2JWCc!PFnGc>MyWfoR+sg8OpZl_X(70y5gboAn$jOSFbz{8VO>K zA;)01V$fITKQx#e59lo@2`s(;7fg`=wf%9I5S9jt{rZu>_7X3#vPLZdniG(OlJS2& z=l}VF6QBZ$jR?45ng8eY_@B4x|01*h&-d8)gvGADRRn9Oyys=V7-}~Bw+zp9DS`M5 z01)cCnPEB zDs85cg4yqFgv<>qvJ}J|#j}bTZNT??=(A;-m-DdJ(}NoxkBy}GQy8Ikt3X}BvxYq0 ziF+&4EptIY-S0oFO7k`RIu-spe8!!o>dk3uJswx`Y&&zameVKNv5CU1S%s|$DopF( zOa8pSlg6pCh)t)sW5z87-z41l31z8A_UdGPp0Gf)lw%6sv>mYXac=Ckh)9-6l{}V1 zv~^A2H;8KJ`je_Lt*=xSqg9lx4Yxwt!omo~8FbL{SX)C8@4!xVc|loGmpfmy{h=W_ zC?H2P-(79EDd6{@vbSwoCi6*}Dw)~}9{pmiqfv?Fvt`)FG{KL9d@9^ljB-Cmc9F9> z>u>B9Ok4rAIy~L0T4hI!JEZF_%6&(Hxrqaw3)}iaA1;(SEZBFfH5IaiE!hKWLo}?1 znfw>?rRv?$$(M#JtL@*0-_~b{mlgQRsPg&V`O5t!d~b7Um#%Vk^PS_t*K{5>{W29v zWQqxK{xX9}w6#!c?RiCO=UP`mTh7G4UKz?XOD|SGpUP`e-J4xK|NbFL=l3Lmzgf{; z{nFdx+ngoQx}2AC%71p&G{44HaECQ{>q^ck@z1tStCfiR$F%>ss4&>yD7g(kBC(BG zx~`2-X0GI|Seja+F_}#F)2fc(Z>RA2(pa3yn-DGNV|$}Hyq&M`J8ZCU zZf}m!NmfYNsZmLH$#cos9?Bt69iCk)_am*;lOe_SjBr);Xj^QZ?Cpw1bPM+s*@?bP zwAjQVSwcq+L2X+_`_U|Z`EnhD<{izO0=a;ylD#2ITE4u*k6HU#eh*e$H$PS@t_MB; z;?LXW;-%X8gpMCLOCV#3T z`q~OFpO=g?{^%DM#rHxe@tW!%=jYUYMe{uNu&56F+xfPEJ@Ff=2fV@Pnfioxg9UN# z7@g?ZFXgE#gvE7WgemFLX1!PT3`r)kGQ@Oh(nQ9ak<=1BG|*9q->UGsFP(P#_HUt zlKVjL{V|^+6FF8VXj8=P)L`dPsGOHIR;QmcnZ|c@;!!xt5Ysj%FYH_XfZEhi|YN`=|z@9hkr zBP(SZlM=$>+jKNrR!bRDx9bO940V4rVUCqGVQ$Q7o5KU;SqS^Gwv=bG6mQq>H@MeS z#m@`LG#bA}Np72a=drAJF^PKsNy)^4Vbx0;|I?*` zPRmIt!@zdE{iKtwj5%H9%@2_Obv4V>c0CoS$&~~*gk|AeFV8k%e#nKY(i7ylS&=q5 zVh(H5F#)@y3U)>H1PnA33}HFmt{0meaB!+lnD;ZKuC!`Qm_PiGt%x6RY2YC&rhh-^ zFnMxs`GTTy_NQ|z6EPyFo)qecwp~v=0cN+LsMOB>&yVcO8ulNe_?ACyH%s?WRNnCV zvP}vMIIL2S(-Nckmgj@jP0EAq%U`be7A|SxAOx1fLvU&EV!z>c-k+cI=?Y*UV#3>W z#KHP(1-9!A;`eZ4BHDD`UK-^g#?3RyXDjkhT^ca{NAwdsvi}L7s!UR9G>!qE_{tb2 z!?Bmze9V^y-e4OD;`bDC1CmCg+vZsR`$&MQsHrP?A*nm6K=k?#rOn4g5cYd>H?VRG;bVcz%5Ubpm=)My&xivTZmFzVWTTdam zO<(@w!h3~Xs-C;WXjy!YcSnA!*FVlMKVSIyE7Q7X@%ty6SYIjy?K14}{n0vH(U&{O zC@mRDF8@&@Ctj7Jn7YT%Sylq_r?zLJtJZSeq?YmUMlq*D@*uANwEE|5n_GjJ zTVW*O(YpDHs%{)D|DU4!e^5TVHWL+YcXRVO_DBiI3 zpUE2hVLOiPfBi*3PU(^BHgyN*N5%Zg$!g&Zr?)DT(;?3K$jWrwB0kBciw^x7@6F|~ z%_(Vy-6?6*E|SB~PUr<^8-4vh?zOZWzoqQ9~pAH>wO=vzdw_d(Xc5dajMHiLxEbQP=KE_6o4+G19!q`fXxy* zpeuj|yo`H=T_1H0L0v>p&j-zT(EN7Z83Vd9z|@^!>MtNBftUzlY1X;;(uaG~0hN35 zuX=O5-2fL@ZnWhK=@@F5DkTYpJ?SQZ(p$5O13%-yf&V9yoeKv}pN0pI{mxHL028#D*6rcdMO~6uk6R>`{3CRRiSAdP{ zR=F2n`EbvERBZ;-b>1bhS%gE*hs^|pv`>Ch#ptTpU=RpFe(t?Kueo7Ltco zu&4ec4{YEmHJ+b`yNCsygUI?mV{rjSwftyX-%xk)Ffm@JYm4uOtssX7WE*U9{C`g8ubvO)C;R0?S;j8eS+g!e+s&!V%zoR?2LX` z=efGNK7jvW582#s)eno{_}OWHKWhNnVv%)+pK0%qKV&yAz+KKEquHdby|+KU2a(<( zJoh|wLf6M1*4M{9pU?OQfl98CUhwOA8*`;d1Fy6S@}pVEZp+Q5wTjv|=l;s#==hQQ z?97hyw)8|Ek0>>@!3|xmCA|-C+TY6c7x}svXTJM%oZEw4bVkQ|++Z_1=hYyI^W2>>Zc?Ryf_V#0%rBSpllX`L;-^G$) zZPh(Hi0~{jiqW&cf@is9RRsldW?nKoq+Scz0D8>qJ&ECm!Boa7>n6&o!Zrj8<;ej1 zxg;p^%=1Ysvc`>S00Rf(jjtUS+8CB4w*4G!$VSS&W{4VQzXTmQ5lUPz2L^IH7tSC9 zw){{iO}^Ul2U$vvxbH|`ekrLDBC{iXOe?m;N29Uoj#sDnnHZ%gyBVJLz57y{3*%|T z693joA&s7P-)}N7ipT%JZU1NW1wC)aK<$iy`@In=7*%Ds75P73e|8l4SeLQ*#w*^w; z0k{N7o^Rbi{iNF~?(U`Fch7>r)BRI(1#sPP0htWkvZu!ZBJ{J#>~`hXCYV{QJ~s88 zLynEnjkL6%I-1VbltxfIx$rngKM)Mk%Jm_ArLM-1Mlz(iF zZBkKxLebLkzfwKbn`E~`8 z^J-;%731{G`o*0K@+*n2_$%TsiGz`un}84Cg-Fr_$h>F*WI;rH(FAD$cjY6l03_5+ zz)O&gR@N0b01=tG2~v!>V#@LLsnZhW?#v4LE`n~dsk8r?_Y|0L18(pbl_IhV*3l(Q zQ5-JYUq7LboA9$ZFRGoI5-$Wb35QfX0_M4RLotAV3&9qURR@QD>j$940JRtAXj)|h%=dZv zeje{qldQaKRJe7`c0Bj^9lz52h#itLz2#S#kR5L0&V1Awu)Z*HGvOB4XzA^67qfl@ zO(N0vZ|gL0i5#oG&8!w}*6sS>a8{KxxJKGpe|wRgwX#?F85R1P{3!7Dl<$5v(Dk{CI9}@if?)*|sZz z=xw>P46@U}Edx29leX_e$IICZLTTwkN2*m!VMdpM{Xh}gdqu|A$_qyXw6wU^v=6t} zh3oJP1lMqc`$k&iu5)ZcA*jh1M^Dj@y^FBpK@1fw_yve; zAVLTgG(&?CiUU=2+n*216nBrCF?WBB?*L$tAgSeh{5A2d2f&&h>j$M@C(#I6OK?tBZZJ-E2mC`gdijQZ44XXGgtzC0dpq8{WV(O!DhY-D*~Dd zcVmC9p#SQjEK&}kb4m`vKCo3<)VYtCf)EB<7hn?-VRAR{J%_YMbX?s$i(f0C#Q-Vf zK&8>S_JH30kr3@FP<2$jp5-+v2v0lNDO3sLHgRk8DZqVcNkE$5HQo7P5?!kv)8xvV ziv|jRwWt73_9%c%A4M1v7_l0pDHH1XH2nJLGC%IqI<&X%(P(fN2)q0CBh$SRBe*dW zIQDCY-?^S_m-ayf`yrE8;+W09?>ZO8A%-_b5aYBJ#49UBr~WG`v8`qd^K5PAndzV$ zgGog*c@49aE&(2v_k5ZH*9o*Q&z3uot#NF;(UlHR%P04LtEK$;$q)`cEL~(b)kxN` z)+PS?vw=vyW|TzXQ;%^oG7i-R9_`F!TtX-%#+*akY>hnJ_0Y{suK7~GiHtssJ#?CC z=~>!p+5T6I+?(9MEp7(bB0-qc49&#fJ z0j5YeNuV-NpCSg^HJ#+D&91v{m--jmWAIVD@F`NfJ)8c+i#e^kepui0r+dv+eh>0S z(?-qb*)0dQEJcks0v*GoYP+Q`ujW2RlxfcN5Jmr5*BSmKYoB@jbUc_BFCZy|D=00J zK4C1hyT@o9UYDq1Z2K#lu1$cw@8hUxTBU3BrB;N#9RAsl>QCfnYm(4Tof0upSw=Dc zb+zZiTP@gJ`b>vSw%Q|W8td)43pQ+vKJvv}BQBEcV|0I*cIcN?p>H9dRM0}Q$Z#UC zl<~ri;EGt+_tI!0CZ@0KtEJGFyi8t=85UrfR_l_+U4(PF_Y2)TXeHp{Hpr7E2}t0; zI&r0Jl(A*K^%#CcoxkfoRKe>B6vEiF36I7Zi$%i`ZjFkYh2v6*yHNfRuU1X1s*1K> z@-#^!Jo+U|d!n5ev0L&l({qUCjZD`2cKNh_O)|VMdl`4z*8B7-!z3O1t=$Z^?7Hh_ zc0L)6xo(E_&iB5XZbiR9$N8to$Ql}oE$;UCNu_BpMnL=hko=RQyq6grVZ=P|g0~kU z-K^NUAt^C|N%CAq!(4*LVno~N=cgr`?^(AcnKoZ;?Z!n!T9$JH4?j+bMJ)KYTrDVg zkxxrlzFcz1NoO)XaQ?--8s|~a8viPvPB@{$!@DMT#xi0m=~Sa3KI=6xCNp{{R z`5P*9sZuK(snV_g94w#%4Rl0A5^Ag(cmK3k1y>9e2qjnL`>^dRB%%eY?;KOOWm1bI zqe<1f$}QafP%YkU%>8x+@p)s;W#g+1F3QFn-P@H%bd(jp1~g|U?5=Y9$c?#jWLF0W z&PvNS1n1Sb_M%B7=VvrFJsR~M9bCbUIVjr3+>P6xPnatls?_K!eo08q?Ybz9K~ji~ zCPiNjGw@xHWSE+65KvYgnQ>P9z9Klct06buzUwM4+5}SuZp`(7QlcbXkDQy#;-smY zJTbvAD3~kk*_TJ?PkEM>>rwl$XbcFR7)|wgUk%wcy2=~>)7!o=*SQ5^-^N_Ve|waH?D{)kG!DZ3(oWt_~_qi}eiJ%t<(^DuXeM z*-BrtFSav~g8)51!oSqBm4c2Z<%EMF&y%xE(2YLxT=xCB-z*5<8{Fng3h<(=~8~1-uze zbRH*qADPsPq_-xmZQ=Wdez?}cf4dK|?E{6*3#>`?0O6q-O8I_r>03i-fE^s#4>&`C zcEwAh;KRmSnfP_1*&s!NkI#2+u%EZKejy&+eCis&iFY%9;bzu&I+c^^=vJkvXg4tN z{O_mHOU4Om87z^tuiGW2GdU+b;^n##l>!QvQ=dGyzdO;8x95b11}a@n$ZMakQ-zIv z!;|yYql}3nqzs?UVhnroc-J%jJ%K9I@EeZslS14qY45=Jvf`HNdpvrUYw8CY(=5U# z{6T8DnfBM9?-sS^w=L&*cI%SeHg1iwc??cIixi&n3ne#$Lt;iYo6LL3 zeY)J3{a=9goUcIO$QK|+>krT%zX5Thx?jgYhq;8J!TB|jfTP!B@}F>F zm9!v*a{rTjz*le}bpb!3HXuRo?@c=};CJ{N&|cOp2#ps=P`Uxx6FBdT)jIE7eSg2s zaxQ^8y#ZnFdhyXQ1QQhqS%C!{^v$1*e#tE({vkB2u=mDIg_ukI< z&%SB=Mtji)@ss}cPWVRx+iav7`;N40it0m2^Fh!`QeDwZTVhcYOxsi2+08iq#dt|l zVfGf2YALl2dO52$Z}!{YL}T)^H++eB@@^ zdPv;=GSO=Oc^XvUNuf<@bC@}ALE?NBv78&uqqMIe6-IhbwfQx~6zBcxpobEH`<=g= z8}ph>3_F^jW47;Od|wU^Po`QVYC*1fAI2n2om!+Aa~}391*smaFx&T4Ejp1VW4YL( z(DtHSIbGr)j{-`0UE=WcuoC@L{=6z}MeAQy8@54{A3pQAbODbtGAR$(KMk`1@6WA2 z0N;4+Ey&IQP)ZOm`L`9Nl4R{WkkscJyVcWD@?G4^S)B{A(mA?D&$1EA!@0(qtp~Xg zeh*aW2U<3yIbgpl79%Da;KjPtAboCYk{##KCjV?V#Opk(@2EkY;$Tn`wQwzIQL-2} zoHu={sBtyg8O1vHKG}fnS!JegZjTJ&Te4uk?|T|`kuftz=HE>J6ja>cX7pFE(O=(l zY?-xrhB)6odskU>G=pmQK?(bH0gnFJ+iztxW@T@8QZZlI3im4^Jv9(= zqu*zwm&|kSN`?n?a++aqU4QrzUV-npAAtjxrj4p=B^#{c775Kd%8<8P; z>SPj9y0wM_-OZ@dVTSv2hUjqjBK$%}XhRkwr%RtHTtvE?Rx{^Hy*X8!B4wN6QQk(q~~4OyMPbsLBF)JF!yzp zcf!&QK5YK8lBc1zqoI4B}63 z4xqX=<2N5rZ6pT3CpqIZKT#_eo^w6JT)F{~m22m|UGYO+xUTyBdOA4i-fFVBlk@sX z!>Uh%KYgaSdv8E@6f* z+;Ew$x7Wp0P{Au%)@UBC0lE3j1f1&a#4T>xhX|PNOv;njo4X44O^(YrT`Q69aHcHT zZ|Sf}CJ@`q6eihYDznevW{?U}3!IGMX1J`;*6CeAJ`EQ*ARDt3iNP;La%U&<5An7V z66eU4XXu~Cw3h^e%7hv-@&xvr-WuD#(l+7}!(GlgEfYx+QgrO}j+~(6)Clk^FPh-Y zg%(5S^;}~j)r6HhXqq1GOHM;xW0H-jmSksXgpJJ}m70vcW-_54D;YO}>m1ZrG%nq! zsSzG(c0R0&7YR7zrd_k|6PuLm7WF| z7N&f_N8VTX5*)KlW2TlqnTb!4`c_dQuNcK}H~H|!xOHB?6yNfgMH`8)aW-TE2_lXm zz0YY|k@&ZX9>a2lea;Q7Yr+w{ z2@6*(nb;5UU&%t8K(1Y;ea?Hrb`)fhNsB6N>DWJNx7kRH#*C>`Nk~k_jKv@@8#6Y6 zz%smyYiR&3P0AE9+t`r7z6(p0b|!3iBSRo$|BOv=<4&?(*SRu|K%vexUQG4LPG8vz|fN zgW#2)ZBF2g-|~Rnq<)}t{i(v3Yfj-rK8wP4#C_dPzi;5)=AM`9nZ`)DwMa@IW7TGB4Wu&>o7JwatS!T~cN>-|AGMHi_=t|aqmeV(x#2ObQsQjt@})}4i@orBIkedz88+oY%m$8>7x0R2Ua+jt zH|e9gCANH~Lu=D&-3NUYL!-=y%nSa+pA2Xvek0ok!2axQy`d3~+3bf_@421- z0K!M2(OJNS$Lu}C_Y%k(#jwFN5oTNDZ5eyXtg=m4NYnO^JIlCM1Hb7}Y`=VBH=MBC zz9edX)=g!Pg)y+?1Ai%FNlgx%PC84UIwN!&P3C}v-H6u^11niTwbD@troX!YhXS{o zo9td4m!GE!Q{K=+$ZoIFWAWsI9_BuaS7tHB!0bOQL+4{iMcvyW<3#`{7&A865U&SN5sl)Lp-`R91R&KBHIqgXZh4(w2oxkaw z3Q*u;wH~;&^y|!9@bu}oOU(6k`uY=@t5{ahM(k`y1X&50hwb=nT@TT2&bp53^{D(S ztj9pF;L7;@w;nW8vv;WGFv5(ebP!f-}ll2bz0OBI3CoTIX!k<^$YsUHhX6@RY-9o6@B&~NRDcp3jcOyI?4XhDi-7FS=dZz zLR>Was(qnaRZV4i(hivOe%+$gBYdKva&|pjwDfDIP&_S&wp`Z0MB(NdmQ*y|Bm?;+ zJn|@5J~acKVvdH~>)@!MX!F9i4M)!{)I6W}I(R*xD6`vp_jCW&UY8cAT$Yzde(-$V zmZUQ;#8A8zzF+tFJAdlI`(>dh*K+C_#Te_!lw> zLF`UA*`EeGB&|vQraJCKYoAWga8?#=O%t(BGYF7>Ql)vT@Z*Q>I z`x{zl{VYu1V?-@dE36Es92qTgYk100?Gafr8oE8c*3sLukEm@=FCV)Wic5bU*#WG) z7R?7w{cJy3H~v!IJtcX;1k{^~g0 zh77-!&C!*+PsDORuZl9IG4+M$ziuRM?wSw-mI16K>(SR^8R>!neKp58M1Rt#ZAJ!y z#OfWaCF_-a_ z7Q557+RQS|Kj%1R&z=sQ*#jfv;%#3KTY(JKo7GK^TcPp|&sVouKVzqh-rpmJx?@rB zSRmw*yg)l4+%PWNyGK3O%io9=8++U4-0j!g(rPoABuQ(`XvBpmPmzEwfs~>mu1}$*- zdL1a_f)E5+;0z82Ef9Wy7C3y9K?{Uz&;n<0A!wzSBUgY{Mg@KYXl0ZmcYs!A1%5wh zfiMPISrzzmpasGjXk}O6?}Ao#mL!$yxDL{TM?sZ~X^3!Ke<6%pkn-5PNTeW9WRJhltTCT?^?qQTZJ9v{@-=H zE_l=YpJ$pP)5I^ay{IHK%Y^CxxhRFWXlv|?+`^fvCZ^#>Kl7qw`EP$DiUaKg+7r`% zd{Hwk+PsdZB`r$P2z2wa)5NALHVEMSaMMGvB6$9d{q(($AR3-_Pk`HASL%PROwhI8 zvo?K=Y|$-<)*tFkOjmWQBKN9Rnb;2>sw5ZtDTase-R!iR(6?Xgj}(kpqTj^9pIaC7 zx7mfM{wZ1chb8!L8v(Lep4na{|DjPmsf1tD2NfVofP2M&=ng^*272DETF z4`Qs?Vh=*H7t>}wgwy6eO$C3>_Y+_D^l-ikmy-f1<*a>v;-mNE=dxqPdVscKLwq87 z5B2K1@V}KHQO8BtW;G>R_+MObO@ojuzsJm{Y%qM25)S@0FyX^`Lx~qIr^JU9Ymx?L zh{FHknRAG=jc|yRhW|OY1!uVjR!W{5tc%%Sby^%NmVz5A_7POl zQgxU3y6fEa(-|YHS@_FbR1T53>bpd$u5&&7@brBGFihY!TLYDl43kS8uim=nJT)D? z$3h>ha0ATCU1RR!F&etb$wv|{cLh2s(?JJ%$R>_+_~s@)YEMr@dy&VB@CZzQRlAp1 zu}ju7(v4usuOL4TDAOA381vH=1MQd1Y8E`+$tC)`Jr%-6EFPliX$Oe2UvLr zN6$Ihn~+UPu+aO^Kj*1nOE+;*dn_WsLQlX#vyVj!csWFfYwr@*5{JTzH5O0x%MU%p z88U2cS}$9d#EEJ*35WkUxTZ6G-!au0R~l|$okK_K$zBfi*N3k z)ZkOdapU&;Ozdly=b|u0-p56@ooj72=%LxLvP-4W$KNi)iX}zcl_D-%1SQ{k%Gw(g zH18OaWM=hb1n9MRPdRnCj|obTI=SX1%HP^wV#d@rp-8fNrdKy`X%~Ms>nN|_)hlkK zoYR4Le(&xivGljR0?ajTx}t zB7`2TjIj?3o~`<%7^qq`TecCdpL(p86=^_YT{;`HN~Au;-WSc!ae6BGh?oFl`Uwuw zcPlChPIUaxXbdxC`Z|<|X!_)mxzQ>-C&Mj`QRzm{F&*%!pAhg?$9_Is>i_F`P zvw4vz9r}RvjUg9P2LP9)1MaggUd-*JBFT#3xEQ%k@KhPj-hbd+Pe6O!Y`h)+V?*+TZgQnB5 zY=2g6?HErd{7lKuWap`KJz?tJCkPCuZ&!DZrl zZx{12wXmSS@9)mOGCR$WNC;>97e=vMf0&}FO$M%{K|eU`*0;MBn{l;&CoXjro1E}v z^Ew~tt}k8*b;tFaaCfWEs4dz^O}`VJ4^(oq9oY}R;~knzXgd{5t+=IA{1Z*Gp;rEt z{qeqe<7SJ)prxK|P>+#^fQNb$&V$Qv+pXqf5S%+Pjw+SQo}Q0R(4%Hs*`-$O5|XQ? z`XyJQDvb!E6iLdw@>O^@8A|w9`_NohmEhc}zi3kC{OP~Zy?WI2n7h>UYWvUqDic0 zun?m%gAqS#KEJh&2+6_zi6F7Q=}wGl2v#$U3CUrfhLup674))D*Pl^5;PO= z66+ygk$9Bg?sIT7h3e05y|U0GXlT%ayAMD@9MlB1yH)lQ5hRRFz{Y2vhX=LBr{E>J z@UbM;QNSWE&-b#6zWL{5gMHdh!b{|cfqhbeRja{1y+B9jF1YC*K+PafvjiLw8V}A4 z0kvj9&rfhxV+T;&7uw9awBy`w8s@Lv?+h{g^hDM8$%e!|=VVdc zOh&uPChzKmqP6P=fWsB_8tfCUr&F?T;pAk3$Io}(lK|yO?}NzFU<~(@5ywLZ7usV- z{$-Z#%HQG2_6|VR+hoY9J7kt2^6v-t;Y$4u>5h#c(H@x*-2)Ly>nfl0_iw*zHYf&b zhAbzDI6RD>4UWEhPYgpz@T;mPFis#PFuV`+e7$ z=3&%>i>kkITQ1N2;mbC3MjlFqmGK9x!{7wqja}(Qg3IQK^n>EkWw0VulT zFBQ9G&&g(iKQbNLWepg5QDF}@cv3yCZF#F4O=;_%KD8f4xQ0@1?8Gl(q9SLhu#g8u z+I%#8u^js9`Mz{qL>MRAtd8TGci2W9YscLFaJJ1n_BCyF6b-AQ7CeJWyF-+bp23UVRjEsQ5D3y0C;oUFr0L_2cCq*Vk5x zzmK8mF+pfk7GyxUkHG*XPuHDWr~7&qC9E%k;t@^2hR)_Trr%!dJ9K+4vMd<(ONhg# zxU8<=KEXun&FY9+C5rM9S2G+{?eJIDbA1sby`NiEmZ1ha~ z15f;Y3|VunBr7K^9g`+T3QIOV>;aLKqay@kN`rl)GZzFEUqlvLmB>{yo1j5uf1}If z=On7j^x|oqSNZ2}j?!7hV{SwKp-(UeW^%nD@kAKL_Xxq@^>8C+xH`XKzqvmY?k(&Q zO&eQr@2x0L8?mzqoxF|AEnAHz3W7bpCwbb;B0?;+`-)CHFTwuquAjehakD%7r}O6L z!lCZioK@lw{SWWpJy@3Mklfxbk5Mu zr=;`GkmFeJOB%7Ba+v&rZ3N|Srj7K(dO1qZAA=J$HtOUZYQrz_tanMX}K<5g2}iED&d5v$X!`Cc_hNMx7!uU|(@%`|=6hv2ph}cM*Qb z2$`?AyHO6~rn%ET8-1IJF6Zcv&L2JdMhffP$+!(N1IE6I#0_$x;5m4AbYT&hIWTJL`smJx|vkcAkZ);ym>&!TKCe6NA%Qz> z#ll(`i`H6L;Nn_XDc)Mx-9K=RyB5|6+CL~~l9^oHcqBBPc$QXy43h?(#qqk*?zCN? z01KTE4*HAiCfrO}CxlGdYMe}DX|$;&6lM6_AWO_3hH6be23k<`b1nNw@tGvMitmn= zJ1y~BQ70BK5kI?$Fmr(aCHV0IHslHF`GF0cqYB~hkqO0NBNWP$+nh-n;;)4{b9Uo> zHv|g@gN23R-Dv^h^)R7#op^G58ya%vFEa=DKu14qbVUBo z_4+?op)Z5Kd;SvKe-3naS_4p&bgq$Lu5t9H(Js1BzYItd@Bg9xwA!X#xUnlDvzoNaS3}wn4+2r_EWasc_C`hs9 z+#CCPcqo1FC}ETX0Z?bLaR*$wrask&W!8Vs`ROouVx;A>tA9$=iAt^SGxppM3p#t@% zhbP>5_n8g(8FHM^e{cWXY{Ifi8`NXo|GX`?9}qlBa%mcl9qK->Z*skE5+ODJ_2G65 zb9(otqB>)~N=@K#*>$ri&&K~@U4C=<_nf9ag>!|cU!CCc(@_k;mnV!tzR?G7#8tm| zCP5J^B5gAtE!haJRdLlazNEaOx2RQ#)Ho$NINT=8V)R-~v_*fIwBb{9w!^dWO7w98 zO%j>?)k+bK&*nE7uD5?RAjT@zXTGoMet_D~Irvzgsna16Q@;hhmGxL0Bl*%Xae%e#HXTOd3^ zPnB=2LvUlZHHtm3q?4C29Ny}+aX_MYpW^XH`na96m5;^?jF@KZel(&XRa*K#`ON}t zA9GYq1<^4nY?wUp)(9VrPETChV7JarGY^oz>VP)m9n%%uo>hC^0#(}4UzI@9Mn?kE zBKo&4mC5BlsT6C~X5&sir3mJ;7K!TO$lJn{XU)n&SYZ2g6$w2OireeJch`%zTEOl2 zj)>NeI}uG{K!I1FNP)AbG>q}`4^~;Zzqyvm=%kyGK%KSEa{6}e?+jgz?H>;lHHAvv zLU%GQ<`GoN-jg&6PxPC|>RCHJOg>3WnGeu2y$zzU0HxR7@BjTwKMQpk!NuB4#!)7l z?GyO|9SY$qB|EQh@J>Sut^jYF3N#*jD|4=;7K8B)F~~~DD{YqQ`X6j|q!$bFDoTqN zKU8}2S>((#n>5=vU2z{Kk>u2uz3eGl?Dkl!cRTvByv|14QL);cI-8SRwP&E|Y05cc zgxO@0WLfVQ{Tl}p4pn`+irS=Z<(l4PI<5`^iNe!tomK_w$KxPxoTw45< zCQXTkPc2S|G#%;<(___*UJXlc?jUw9jZBsUhyH06s1>d(t(H5ydwohzVrP|JBk1uSW&k&djjC8 z=H=OOcVB!k?-1#gcg6Hwd;K0slpSyP1tQmiL{&n@E9$$!(U;ih=DRPDqpO`>z$?rP zyznnPB7f1P@f^+shF22ozIeyk5NX0P`jXr?O};V-!-n4d?~;ap)3D;}#%It5g1cwX z1p@wI5cGn;{TU2C$2^0PVX4=5UNsRDZw-5Ro)rD8nk9m)9t^^W3RTH1Y#F;88w;4{ zPh~hL%J;2t%J-S!^ruj0^{3chmKS#*&s_<;EVq=wDYw*nS-wwqB;4!MSU{oCSYTt1 z>AUr^v4ChTDca+Na!VqDa!V(K^5O*4Is7LU?$u5nDG{`h_x6gH@Qnr9l8ps> z7EQ2Uf~hfUzrGR8bV<#;`yCMR+du<}==x;`fAjR%V?D9_-yAY4`{76Xx9g-rJEeq$ zuY;5%?v1>)i+~VG<>+Wm87Bfs!1L=Rz?2E=D0s|L(tB7T?Ww+>nckMrCes@BGJGmJ z=3d{d5}r8Wp!n6Lf8;Xw>)metXD1vE#)O#x_>IPvXKgb8?pZ>^Hh@jD{- zm`kJF@H#Fm*-FXw-NWd#}U(~$*htk+Iv12BxWt3_43+%V8d^Kb7 zB5%>^$h}%sxx3Laxv*s7VTmY;o8E?l2&=Rw?i`u34c z72V7uYmIv95gsjNg~5Gb3%^U!6_&L|tB1mCg4v06bX!PlSqPwLE$l=5Q^tV_+A9bW z!UH55`Ds<#7v_eS11#3u;wz`X7=EHYK~w`_+Vh^_72|*>{2tJuxd+ssUI7(MR~?Fe zu+?mF08%UpbX14~W7^q(ky;L5O0Wm0GF?Gp;hw12!~r+eY(V1A;uMheFB?eK%K=tv z#ej}4L9iI{2K=szUk_O(%>@9@Q^h_+5%_O1^HN}L1cH49H2y5AJR5W~hwEe=16l|V zf&H!G8Gw1{K8p{v={$XwjY;bID(b4JK!VDaLSC=?w?-&Ki+v+}KC*qm90n1>&WDwT z?{kkpc&28G*G=M$bH1u0ou(5*sjle+X~e9~RnOqBPi$5*sOQ6DfHiWE8eGxqGyNNi z+s0({-*YDpY>?3s;qXh~Wt@qb zl?bv^or!*n*Bq6I|D5#l)(n;I8M$lkTjhb3pWlYeAym9rarJ@i20#AGouzck0W!*M;YK1grSb1deOb{W)Ru6w^Mfq0)u?$t#(TAOtWa`FG5Kvbdt>m z%MR#coao>C)npTGm=ebX#2UM9jP1| zXDDyU_9hTLp@6NNK?@(&K{KBdDkk;(Euz8(zK_K#^mW%TWhg{T?+nPty@O!kKp{V0 zz_J+T&mp)ZYd{PA8RSfo^z|CB7RIgH2_$Hq0mLX!fPw%#FrllZ{y>_*86Zjk6^VTX z+^Tf}ZTx3I0n;5s9TVD7gA5y!-ma8=1=z9p0Y-#f0Gr$z&=IxloZNTXQOy^IP=}4g>p|1cDR)4^4y%T_dwKBj?)W=`aFLVJAjV?g7ASVtBmIX?6 zl=%Y|pwur^m|bEgu!`FSB!hiT(B1*)C{PHs^9wPQGazjIB|K{<@GAQi@;6_g<@Xv8 zL4O9kq`iZr!^0|wx&Rqa0t1}k1l+fJWZ0@29I%Q-A*zB16+sa6&BnL`*v4NjR^2mR z_ySbhod8kE8n6iVM5J*B#DJR?$8-k}qCiEI-~l5VKOhFX3$T!11D11MA?t#J1!7Vc z^}36Qy*mSLaPI&P9O!DyEAYLr3&;}pe-qkwCnCmk29$v!0+>*MjsObbLxPE*ft4xl z0MtES^gql}AI>1)S@gfN_z) z8$Ah2fbY$oU(b!A&yBb*+8NzPI!^6BeTN)^ndJXW_L*!je&!JN`nOfYZ2FL}6oFTg zn>UB9zdJm5d7ILcy!R7 zY_|W-sO)fGBD=*_{#RglXp>1%xL-4KTH`mauHNr&!->EDdybR@w-vnvp3O7`o^A6z zreU8Nc$AR0M%rT<24f+y^#lbZPssQuud{HlZGKfmG3?92ZH>$T4|))ffq`uN?j)Q@ zY^Ki#G7_7_k2Q1j_%-uJg)>f4xid~jdUq0}*X|@l0-yy$$v{hGf5uthbjG=oyk_1? ze+?7Voq(c0umt6+=6{?2aGHvwcD4KZo(2fMlt+Jv#IV!v7W&u!%qbBtw72irZ!pXChE=!;3=d}h zT~T*7W)>q(ztg2(|C7wJ@kcv75YH$2F$?5VQ6nw;6LAH1U(8U4DF;5{O2=K zJz;Eapq6gsEq=;Bxrzhw5VFrs8tHd}LtqXs=&7%|#jmpeWrgtg#OS#GN6QF>q25Fw z{a^T+y$N$@PZ+BRCPRI*J%80Tz8*>SM8C;bqj38xG1ce~ZznnQlCvpt-K5<~@vr|O zm4*~LjFkBQpVLEh&F$|b#~Z_$lJquhg3)T5TJesQne8{-A77rUwc-3i#0pdY*M%})T9HWhvLBK zfcQ3TmpHHzradSk4W>Q#f~x{2aPeVW}4R-?j2vvVMK5tM*Rv8>^!|+*v3; z+vU4zB}c_)yp6px)IRK3$B9$TaoZhh41zfwFAY9;M}0PrIsnt;4OHV~iZH z^#R=D<7cn1&O>xX&_whfAq2cY(G%h2xbLvlUgb_FZA;jDK^fUhKKVqk9~W?a@B5 z|I0?Y@T(NV+LAC1pd3Il<=gY~61DDpbsu|#C>)Cwf#E?-MVx!MC1L8E;bm_f`|AGe zA2rg&%W5&qEeW##@&TklYrYHP$wZ%Q>(^9FErcOnm60b2SaX>td|zr4ea5pNC&>3; z9F)!gJ5qB;2GX)J!UzM|3+L5tbS8hhFx-i_f7{Zz zEL!16U(Ipu&I3RiRhi&Ik)hd}Y$JK!G=)aYL$HgJ3ln@mni&%W~BB_{TU!!ys|Un6~98%=z#Ae4cgS(5N!%$d&2&*2O& zM3uv0el-AgDfVym8q&SkPk8JAoB_)b_i)Si(c zxG?&Q;SiUQ%R5H*97Ly2(il5@Bs@P)&89ym4Hb2@GUXzguB!%o50b%n6DfnSBS6XX zvtFpAYr3>I7ja7Z3(rqs9xztvZ&4Tbu4=#_cnEaoBGx2=v2LKy2xxsqfs`jTZZ3Mj_Du`?W=6?LU=f5LdvloUz&5j=*0!J;nuztdrtCwI|H z{hmLjcPoxtl|$S|El%ZEDr}K=8(eu;ZzKH<8@A}009af+1j_Qg^T8tDH!iN)AxDYy zUs;yW+xmF!f5ibpW?u`q=^NU~_o^gNg;p}LzBxpqEf~gNjV_L280B2hacVizSsblr zDQAoX1$y_!$ftLrz<33mXIU(rL)f-CaN^W-mhT-Kl|>k#A4|#Cm&C&Vk8PUHFI<+BziC-kZ9hzt!G0>wNR=^=BE@MtV9-i*tmAiN{uG zgexG0YG;P|@oG5E;J?gKR9IH3CW-H-W0|iuDz&k^>+`(h5>6pz7Ik)UkWZ0-PA*<3jhRmGXK~mA~!vv9Ccv zm@*ho|5wTi4Sz;Ooj3>#;+w|bP}QS;8uA|1Jc59P=L+!XMVq}rDp}{(r zk0a$l0I&G0r}42X@SDIcTGilX6kmc4a-JZ25}qI=f(ffQDz}@C4h8ris0cgvw~x-l z%luE8;joHP+qp*(54RD3O&Ml%1)Y0ya&TXvn1oeEx0UsUug-U>Vg0smcJ7Ju?zPYA z_+|4`^Axm#($i&sX$<YMIC!*zIG+XHYfl~97C;7dI^45O!tp3*6 zk&3g$F6G+T>uud>IkNjpCr%ck0=pz2yk56jhBX<$)cJB>*l%^w8_mTcV3T|f%u-o3 zX_z8H7`Uth>2N)IC`ZQ8ojN_SAg)}w2A@@UVrdMZ;oX(tUx z)GHmVs@3<9o$dXQ?+M*jbS&v?Zf5AC+S2n(pAw$G)oYL1w9Rm8kz5ftjqJ@&`qN@| zR9*XCI2FR=5|#h70^cODA@q033ZcnVJW@)eBKe5)Ji2`9$82?4%%|NCjj2cLO8i}`p^BT9q+X-ID6ivRB7TPKC=em2Lo^LNeJguD0I$4bp> zt!>aVg73{ZB!_o#T=>hE#SwS0m8{LGVGgZpS;ic@t>kQv#XBDGY zx|JP-a=DO7*GBGnSgQ=R1b8=<|JL^X-Lfox&*~t+wMB%Rz`|A#lemeuqiZOCo`@%2 zpi{v1Mq^~Xo$?3F@8e?ojx4!?Z7oHlSA{7(!uHWS`3P3xAMJy1 zVVvT@d0)R(CF9$;H|AZt*vlDh&~S_%QgbL>+RNP@ti{97-G2BY>rb+|PkXxRshq}r zpZ1*9RXM%?DBWh^hj5+QU_R|x01`N$^=B=*MUPiK-Tx-m)^8x)#vmKwF(x|w9T_=i zWwS50?Qd9Hp)Y(gZ%kWZCw#KjL|49WXHWh}U1$FCfN8c{Fv6=gzQ13AYF+km&!$Mf zmA@>+drzq#-~vHgq>Sf39khj>FK4l46$jLVeHUM`(B^W4hm%7jFDJx)`5mZNno`{y zW!R0;{NmlmTQuuCy436O?y`++ z&aw7R-(~qfA))XEg#@OR1_Y7~NpxdOh;_T{(5wr&#M%d#%Qp52%JO58;EpcCq1^sN zN0}P{Z3Q|?vZ-I-dwPj(;rxKW#Xl?RXF-AQ2PC>Pro_6v|4MWp&x&=w79YY<2@XsE zYZ(yAHckl9FOj0r>-(dl@QuNtFbu@nbAYrVC;ifULD|MxPWtqewn9-%6r!Sl!1u2u zy1#=r7XN70QxB-ul`dlKo4_C{P^%A2QvNR6r~ujqaNPsi8BTir-w3!T5KyES4uua# zEt(!dwvmr|Th`GxkV|YRj{YSIpKnN@o|Ru98Jk2mYo%!Sbu!I*0k|2NNi^&Jtg-g3 z;B+%^C~hyoWCGbnqvvFBNGwK~cy#(DLrU<&7YU^yIwVjX+_?y_GAq~-KS=k$p)`mF z2iAaE1w*m+e_3MfyJ8*b1;C9A`avg$id!y_M=qxoYkv>6*@%fd8Ws`={`9)H&tvUF zwNnRv23|_GP6i^PxJiQr;MCqxoy+5q)01x`z5zG?53{2Ce{pxz>(~|CFU7kRt32ogH+zL+wZf6EeB|S6n1}Ou$MR;3LAWbJ zpfjK6GUn;5yDP%>yjjn(gJGHcP3mcs6X~KGhQg3FY>v-Wg-qS*$i4AVBr-=8M>Q^C zghSOF0)Llu*`Y5SF#=qS?*K^_unNtyUJdastK{uRM7ih3T{@Rmx#}Y*u7lI-=}Kdy zKjdpn8O$a{S${#X5Ed*!)>t~5mOT&tlWOqji#?0zu^&&KbMf#n7OiYwOBWw zbX1C!CVgt}lJ$N`jJWa(8ZIjTfQLpU)q`grqI3?QuWDpv_q>O+U(L2zK+Q~KTj6?l zIx8-xeSIoPN{y6wh=4A=*FSmjPys#q$e$pAGCKQ(E}gaHg@u)Xr?Ldv=&Y4AHv{xn z=e+K>fSNi(znbgc7^AZ$2%}n!pcQ;MniK7agDj=G583`zs<^;*8W zHxeBJ4s&N6zt(_h@hyS-FZj3Fn?R4?<&DzBp2&w&*V%MkZmY?s5_834L<$?yeQ%zq ztM;D!M@#3gG{7N+pu^SHKxOEeYMc1xi3^b2P7GY`cXT9CEYu5=1Y)7?e zkFHgl_8f)BR^NiMI@AXcN}$HmU(SBmA}U*^(w2WH(JoPK`?g)}L1@Az%^7=IT9MKs z*qZ{rNZV|?W19qew)z?05Ji9eyh-}@!_+)RkZGph1g3uRZ}HbBkRcLF^d;+Wm{BP9 zf9FtbEv5@5ef3t>HBt|oUWXamf3y+A;4k3KKqS8$Eu0c!(Yd7_E}5Wo3jvTq(mqb8 zGe}BBvp(Xkl|hY^W1gS1sDo92uMDXe%665rX1Q-IeM{bhK;3D5EwySFZe}?e9zu|90_o@RJL! zKl;ce;j@2B7d#{&tQGUkgK2_DRwMff7-I*=l=-f zA|ByV;-Z$a`Ubv&_S6sFz;aqJt+}-7)7WLY;jPuv+|lcc6@?HNr{7Qp*l2}@^;`3r z&dAUFPVPydHYDyx3|E_!8$V?_i95A!&6SV{_EG$IxZ^y%S%y7+g1`StcuvQ2i4y3C z?mC`B6xIMr+j89}pnVIt`i&d~SU~~5O~4MKsRhmI)$WR7A_eB|I{E+}m=P@iDF#tp zf7?3HCXw6s=LZ&Wy0x!b3gjm60)^dI5Oiku<+I#tq#$Xo+eCh+2~#qLAN-K@Qzo^! zYdv{k$a>R*>Fcan+h59~CFw>4G81gV>4%<2zy7iHh8%WWs?I>F(D>GBI%a$=>jXB_ zI)1;B$jBEfz&KdT1` zpq<>iAaa=14@c>a%j45BIOU*>{my*~wl@`b{`ND1fe2>(+Qm9ROil9gBhQ9^3-`$o zCXb^soh8(+WE#cFk$x)rc;inKQhrg4cKAd>d*YvznqIjcEc=9hCG0}y`GJ(IOCr-$ zpUz&_NDWgq#odCL(?X^9#6!IqhuQ6CigqWx2G=up+qrqk?feyQS{Kw)VK;^|FL$F$ z_P)bFfye{ZIkPmLyJTuzGI%+FR2v{mjqC(H${ zdf)kgmN|KhwRYdR_ynVP1M8-{@Uvwz*eCn~rkg_Pp5rX)nfCB=7E>LBs_RJl=@!!* zT4$b#bcfdYXPe>BD(g$Ud=t5Ozph@mnB^dZ3m3T<$DYlMfzWp>%U^j7Cn)t*hdsIh zDT9_izecmpJnh8ha_p?ha5>f7XVSsK<|D}jzuN1tBgI?uFSyA&WdcX&O4 zA*4T10>rp?=nE{5l0PS~oOz>V0&6o&}-Az%T{v{N`mwA7$P!r)FFA*iq-;5~L zP=gS@13l~~{3c-Z_nv@%H(A?VGg1g*^(4aT`0=V}C2ptDGXq%Gj~D|E^OgY(V<<2& z2L;qkCWGFZKm8L0-x@swe+XuZ4<)7?+(eog_tUyMZrwZS4_u^t66^X@i~oJzV?dfVruRRy)iCOm+-l1hj=zzM>8`PI$3z;%~c}*q!xVf?Reb) zy*xyerK!QYS4F#u$7VdwL4nb~irVQ9bf!<~Ps4)T;KVz?3;en_qMy2o?>Cs2;rwXz z9cpLfgx6mBCe8II@y)34YZ>oWrSQzFT$WLNdPP#7_$(n_rPRr7q!s8AGGoDhp_g^P zCB1rUuX2e@Mg4=4J3@g#^BY$xBuIJ+h9^XXGj;I{VdsJ6q+{5UHV*uK}8T>;Z- zS-uJ%+{C^8G-{kL?YvwWb|Ha3_Jqwa@{LiAteLfkqIPm|Ga%<)yLu=-Z zZ2{5*?j*LS*%Jtc!He&|{_smyd?nHn2!=SUnNJA0f8xVzwWh}U&xy}zm=rkjF&C$` z^q2Hj-5{>pXg}&@huz&)*n~Q*V!@tgCUBie`LJ)Pyx~EahL+Iov1C3} zD^9Tllv~3$8l9ips?^>N0}{0T{ejZU8Hi_kBM^Hxwj^M=4tDA|0!Sfkv%Pg+zRSG^ zWJ@;TkdZ>Q=VCDMI&Ax#80+4}LV=_|JEE(M|{35IU*MZ3G zK2SOK#4NxM%DDTkS)FL6K6jVVFVo56dPUF}}Y8wJRxIlfEPCZi!@10cg=1>!)p>^nww_HruSrH`-X z-Y=((HQ?jpeV1@nE&6^|H36fGL@3fJ8X6tyynME791oq`ERKdru+4__vH2o9d=|c^41}TZ(=fDEAL;Fj6(JE z$fukG-qmk@{BSe**lc;-e?EoTo)__cE*Y|(rrJ#8R=2yE-0}50V=-mc*}-MfzFmos zS0a<}nyxBbYtrZ=^-o&AJC##Tw4Y_oCB*ODQD|vGDMngTbc4TB@ctA;GfisU#>;L^>xk{(;3LvlMje1=ceA54~Q@4W(Q4zLF`Ak zALZP#^YUKrEtttt*TO=mA_E^B?&4k}3?^>5bhRa&x zyb!;w9UA|M4<-4n92&nZs;6=n7Wu6(xWoc!4|@Z+;I+mGbNi(+*0n~qfwe}FgH8vT zp|!@W{Z5Ch#4d-`S-;I_P)&-0-`0(i+$|Q9yx}!gG+D0?1T}B1Q5sa?pd@cdKp-co zDPe5%-=vJ|asaA)H*LY>DNyvMfub1Ef6hDqO%qoh1Y`T9l(lB~$S#L|ze8LV#Qx*I z0h`g2e*ZZw?e&Mt3qE5+Kl;A9sf#1VQV5Zt0ZvH>aPTZSgOXkWb8e2zTEOl9{)-Lr z2u49wVn%a6=jJ73%=UN@_;Q^zzsg>-G(JFGedvY=Y#-8QXxWzP+JAYH0y_htKOH9k z4?gdz%hi)qz^MG(=;};4-+#_!$Hi40>sHX(nfl%pNqN&%f^&z={%Gly@VX9hSHtc5 zSJ+EGbr_$rvU4Qodja2ZMs;!1#*0u0?saDGZpXJ<5se>rM)s^Q&yl`pu$IUdzGR@i1WR^4fbw{5b1s{FS>0Z|3gzzRAzCm(2km(&C-z6 zSGCQ^g<3V6oc$nOlNve;_`>B+CYK}Nd#n{KtL@M?V&ks6!ILTcmyT&~jfva7PK_Eo zS~DuG)(O1-TSsO%|Lcz|a@;ryEE9Y4Q4@`>kv7K$$`g{N@1r>SbVk2U>#T{KVo-;M zMG*pMB6laojx7x}t9+}id|aL%OWQw3Y2g)CPZM(~>eVa~D3rY{8L2b4(K)EiGAY;d zDei6W!mjZivaM=&;d6S5Q22w$w=|5=po~rS8tE(9SOj)jsnZNE#f;cifu`5~igtPQ zVx6rPFX1hdNZNA6auv5{^~{pjDvCmc)8#O$_9~&C4aZMCW!^BV(S3`ZYpY&fQyD8^ zL1iLg2^Zap0*7A>$%V6zD~=J$o1bfxFLsk$d2NP|XQgx(R)3qHu-|f3ZN{|D+Wir& zn(9kk7;Sr!G?u2ZNlsGiEw7rZg=s28yj;fo`YLk?@0Eep*H;i$bCl{u3(N(lxk)_*)U4uHx)+dm@{BDy6V2Smv&OvI&aw#bI@c}ElC)8uw^84xIn!8OjPq) zS7b^k5Bg5w<1W0@$D!98qu#;1CmTu}?Ix|fK995<>#EAfmESl2tXt@QlZiwnl6f<4 zDRXVZ5S^JnS^e=PR7r7}+kZjrjSZuS>j%SqEG>xo#`wvIDGN0=GPdlG>%nS?s&}Y? zaPVq0xF#>*nqK&Q_7&w6r6=`zIlmTxjXA>sAm`+{DVe&c_qCv@%|34DrZ3gf_7o3oDu@;!d6z z!`uzps~~v&43`^?c`EsTFTDNn^5isqU`*pHe|3P{W|W3 zIWaIKTsL*mv&~>aRO74dSn#{|v>X1i@WDGb_0KH_f>m~m(fuWyJQ2A@j#I$uhZf)j zb+jYLQL~dvE6$*lGpG=O2abbhiv4DIAICwQ9xg~gm{u)RvWUGw<`1fqr_(+sDK_iy zaa8u-ucEE&4RdMla$n-~EK%Zc4j)4OEIH>8cNK78Dy((YemVp}0(poO>*{!{!Ol{UgX?tQ`^%HmcH4o{D_9?#N)+B2$q4fh(VE$tWP9h zKwcdM7%LF>0=rqjSnGPS%YxOS9o{!1MN%Tiy1WBrr@{S%kNIDI57%?37EC&551Yfm ztIw}#*a(!@b(+!dsHgN4N{`WzMZzR`hg z9{4m5b z)s726`-d9CyMb0%S11>mfYD{2LQO!tHQCPuS9GieYer+3u}@YutHq&JLo|&2>*CJF z!f_Xmzw2;wexis5(s86n`LbrkL&~z4Rjp?@r^QXP^3=)GaWdDVRVnZ3YRB!c*q8A8 zwQ6$ZF*0a0vp)h)SflUzbMveNzX~R z2w%UTAX`H0n>*}n`@C}Gu~`|X@8q>M;3WtRu!_MVCs7Q6j3fGvWBQI0@+St?BGFpn zkyFIGiqYEy49P7Z#ns|a*Wyvv5>VF?QP+}C*ODp8(bym%A0r{#AR`|mBio=LABV{< z;1^cLx=PX8M1(6Y5XIGUP}g!%*YZ%;@=@0cP}jbvt`(-P6`@{wx}_$D^&!M2ykw@9>2GWNH~j#>kSxbO95#5PYu(E1QKscQ*3TxO1yaw#6vXymSu9^ym&)E8j9-xm=xRUA*iNFo$Y? zJ`m}hG+bn9Hl)d}wR&%}uc#_MkBU)eZh`9m^s%Pia9UdZDW|3O+AV{}Tz;|CQqskr z@uMdfaN}dyGStcx2CF_dv}7X1fEz!Ko7a)^#l;+Q*vj~zMMNeqpvWzDi#ZG9D zc(&Y^?lGB6_lj=k%)tzi6sHPGf);`+aD5OX1ASXoz}@_5uD6%b6F4nB6QR1#`cAT% zUOV?%>t@khCqc+ShiocDWc-kB%5Uo(G>5RgKtc~`Q07|*ip-8{lJXJbyL9XGUe($`=wg##24hanRMW){hWigH7baT=$jPygO`v#ZGbT&DE}Yyhg#adB?!-6zz}0 zfo+`conz!!4cE~u+0cRvh6bhXmi;vmjM|I7gsEJu+t2Irh*j33uSKNM1>#!t|K)W= z%Guh1ITqKUAuPr@-Ax+U{v7hEm3wz+|3{X?D4Nt=<1u_eZ&(|&I<~1Q&E@3Gv9=q zVZP}t`0orSM! z6UJMY*=l%aujCsxH-_EXrVoGWneg#Lod~wqD1^CI9bm0p6jc2R{1wRQi8n8ZmZ40A z_IZCfsJ$pHcKRopz-}2)9MOcH9SQP zfd@37z@z*@ea~eNtL!3PH(uV0ma@wNB8R+FmB^oOfC|DBe}5if=!bj5FbtiQ_Su5FPzMRt_D!ZV)Bg6ce}i3B2sf zH$GwHpEr_a;Yi_W(SF{&p7cfZ8+Zk>lAj4u2jI1G4ktJL^w;b&Wf8SEk4#YvwnBM^bS70v|!~yUV zOjn|jV&Pv27O+fX>2-r_!#QNZvD`%A!+vS*^@UPvf|EN+m5k#`(dyNrDprsU5S58T z>_8{R8{Hr!ZcsqZPb()^5ggwzTXXLPm_3I5H8I7IC|02K9bT~ny<^CXIqmmQ$(zcd zk9k-n!;&x%&|`xDu?l3K0VW9OaX>)$%sd0ixMWG%mtt?Knm^{@#tppIN)9ul#hw4d z#1*Gh%87UX3{*fs(hCAL5C}X2jZ#jeN!M4Fy-YRCHhi%s($d-$YrK=jGEal zxN7#ZHg##jZ?mOd)iH0E0Jnl)D<$lQX=2KN00{D)fglKKxlY?xa#eOty9MqP@o!%2 ztZg0rFB-l(p6d7g-*%8$Mp?&*N_IlVA<8I(LiS8ZW;TbAO_FR%W*L{uWecji6-Ph}NJPx$mZ|8QIS(q9=@2`o)@eng>8HRhF&l)7wXx`NT{ zugXTAN=K9_%H3GpNwiB%s@|b;6EbyBeRgl_jm^EBZ||Ckv)*d{*a}o)!rqZ`uZ?0N zm1ktHdlE&J6-#4s`(?~O-G%E61%E=dqLi3p4UJDnlJEY^Ws%4(9Y5a+!?13 zB}Ao;M>+dUydGT3z1BO9?v?QAr?;n>dR;Q=n)CkChFPIT_ffv1#>n0qU7WBSUEXQu zZ|9}QG~qn9u20yc6Nnc(&NOK79M?D93Zat-ny8`Bm3~Zr(kar`2B#l%2z@c1C|Y?B zL1;-`EHd)|p~rlUsW6VQg^~y|Vedc)V1-{|5&qAKx+tZaA*8Q(D0J2YCI3*Y>+ti! zQ@tWc!JP@K<4ZrELWgIF>nljN#fE3qiH1dA#UCeL`{N+Mj=J@CuEw#%^K*5lz~`c| zdLMGGVQkipJ-R9Rv%!ieVLAa#S$)tlzt=}Sjepj3ofL%4YJ|-Zgw36Vje8W0(BeCH zJQZd&xvlPR+kJQCr>X{-`*Xh|eAVz;-gqU&PH0r!*HK*3epmNKi2jT1-Nc<)rfO&f;)Yqp)J^djTZ^wL+Z7_+k58We{hWJ#yJuv` zpNXN;JuB4Iu&)2UfZ&YnmoQ*4(0u99SGjxbRGD998}x@W-&QBdWqEI7AMx2edFyoK zBl_zU`x9<$@&JRmEMcGjOEWFrfSq5b>8B!vdfE+waag=Dze0^(pz|++`BcK@H^dG#fDkDiD3Clxs<6#6zA`}4H{p5GknSL-9(itZjY z;*vKjnI(o$7rc3TqcQs=X+mC(;}d(e^Uot6?sgfk`al1f*!OF?OOYo&&xeZ;gOym_ zkLo>YSoqdM*-wF^g;B*(N5IA(L`v@vn5{ulKL??tRZN>LO01k_Pv56cg==0>deO zURvAe{kQSQKlUH-7q%;b{;|o(ze~xciD-wV-N}y~4LW@?2m1r#E)V;&AV&7f^o*iX zcRBY_ntc;($k;AyDNze_IDS``iGBCh2J@dJw-PwlqZ3DsN3%?Aj$)BN9n(IwI3Dob z(LiS0&Ae4jKHRcopvvPan?uiWSyh2nJz&^hF>3ycOZ4uC7J@bOx)LCF^C6o2GJgwF&i~wehIkc;m?Jg^4{O=KP!zIS{}#GmGlWsU{MJlpk6J! z?RM6q+^JQ^82?h{z34$EqiMq%Gt2R{hc|yc8Jo9tn>aEF!PMPMC6keXQOgM@v(wJf z%f7X)K!?%4)mQlVyWWS^aq?}$VW@XuVUYmQUgvw+;cNHuLb5p7Q+-jhRno#~d4{o* z;SaymI9`&r>6!a5{4hyNlYQo%w7>E`{lE*UffE4p%j#Veg6 zZXm~yD0hgNnLiXFErznZ%sewhJeB?&merzVwjdqm_T|&O5FS=%u8wz+DJRIir^m?7 zBD`5RbyVzN3z-N;7LC%a!q1I%D$XMnS)n8;Y?F2y+Rxr8#o*nf2O|w?xT>?YtXd(U zj{Ng05hpRMiUfu@I0 zVm)1#%H^Q$(Fb4~ozRh%ugQz9hbPN6U+-IMY(Bf3u~^&x)S>B4ZB@6cZpLw$*mnHb zXPNDIy}7!SXy2Tf0`5f)yUuOd?f9w^u{hrx4gW467-aUI<|!`fssEcKR(nrqX(n=7 zqE>%GXjpD2T5N`A%PM6%p6M2tO?Lf=c#>Aeg^hAka_O&Z%PiM&{aBcx!CpIa@AnA) zQ{uG6HvG$C+)r;5eMYDzYt(LruMDhNfBZ~Hu$$7vigpkG49`GRSG+`=iRC4!^Xnux z{bfs8Zp1~tCGEH#FQ&F#>y=PA{^v&R$=``Q`6<#`r&VX&U0Idq8mp$lIL#Fe(n+twZk3t4Q2ej?X#`fA(GE-{gycf0tx*m-dGDKT9Rvv_Il| zKDQOAmM&|=v02`JrI=HQ_9klN%5ZHQTSB=}xEp6%yNNM(tNvzhyHggNSJu9lHjSC< z65{K;O7uEB`myDiwm&P?S6kEN6j|TN{rS&5!6G8=sAM~8rX|Cs_S@&bCQ8s(J+Cr_ zb!5C-GjOdYY-T4v_0!vwuk&ZBJtvr_AqlCc6R|qK?JCKg=a@z6dEF9o{CsJ<_3-t@ zC`8)xS3YXGIPmI}ZG1h`e}*SSFJ5Im*tS%tNg$kmO%@_^jc(h%HF~QenguZ@zhM=f z<&?!3xp%NtF=?6J=+88bOOC}SJ+BF0n{uGngYyleEeK@j#s|Uhh#=@@U=x#q`6c)d zGP-19*d`$f86i;=L6F+t?GTOd5beOGTdhUzX*bN77dYxPuKc(}mrMqGMNHC0NVGu^ z=~uV4;`|DdBwhQVHukUS(J5}ildMkww#`HHnU#r~NeZ7d< zuvaqr_vNdp8zQ4GSo(N}*eKq~Ldb%xB{u9a54x3Hb_HW86PAN0;@3!4NpF<5gr|9rJO2F~(b~Y)-o7a6>2|XFBh8W>?u`9Qi96M| zx@NK<1$>b+?@80(C#17u<6*(~eC$i4*vSd*3wF_3f35R1^Vymna(kky#IMVkS?J9N zai^aJzn)fB%^Emh$O@~r+^yKF5xMt1NqzN?(MkVxv%|pbO;VAtgydZjem81c&+!_$ zzx=g3FX3mG1o_U=Ig9sx%^KKOKg+j6e*QRO(Wz?HZUaio<#(Ir=urn0LuTB!Z}b^MOpW4YSQBAS;zj*5~vs!uUrx>z%qvKKmXi&AF4zhipF-u1#Yq`RZ`|Lv-~TsmWv(I^Um_ zi}p>bM4meayR61wbg=l8wh*O1xfb0F(JkYniw_^K+OSFYrm`~5c1hjC@SRNNrrw@% zDBr=5jLzgS>$~7A^Bii2UL<;kW=ZVcL#q@Im#+g+h%if}#_OgQMa{gho#h>3{lD5!R|OF}44 zVfxKv%FQqZb5d>#GWoj1AHMTUC+lsu82}!C5h^*)+90-D>!$agwU+Oo*{qXb_ z^zxI5A{3lk>VHGxJWSL|nae2~VHN+liR%f1EDYMXZ?}jsSzP;62AiUzSp4Dlz#vRD zIc!;=SzftW;kFe$ljYSY5&CY8kQjp(+|jMBv=-O7E%@L+E_cg^d@(rYy8Wk|*5cMD zf0$9(rV>k(4QI3A?Q^;n%Zacb%)sRLg-IJQ5u3FQ=?!dpdHaM;kB8v}F;V@6eJrqd z1@?JXz@8o0?*aSo?Z93hK%N7WD+Jy0%A`Lwl|EiD)PT7{*e$2bNEabyUK>_G%&iSm zctQana+}I&wW$E}=YpXI%yLs~bOHG%|M0iw6JZ4~z1y9)yr=lz)R$(GRjs%g!mqa z-cm&0d2w5*Q%LoF-uktFB*;Jce)k5@{&{kz!x+>$BzrXY1rs3soWds;a`P93Hu6qU z*dt_$@95vkahq9AD5*;TzaxkRi|o_eO;&Yog7{nh5^ka(Q%nr!|JsXdv#TIAK+ z;V(oI^?N>BlFjvAV(O$9SCv?s>w)8cx!--1tdW6N>JzDlsKTiu*mI(~lpyr_=>i^l z-Irbt*(&ZKl^`=LCu&)!om2j|fc)-}zxg~AcsaU%odjj}UFtW~c$uV}nzD|Ccd5)4WVuVgO&JfLp zo4qhMMkxOjWqgd3Ic{jjm-=r8{!KR_JY_mE>(lVYy9HoSJ#x0MCO0A>Z;0Gi*G!s! zv>sP02JLLgat`*%rb1yz;RF-8dfT*6T%4rKU}e_fMBr|ny&<)fS{ahG37_lKub~=o z97m^{LS68;!L!W!!FSgecmI_Tm!kJ}cNk5#4^8u)kv8Ht3Zn+8haQBj?hix1ktL@H z8@$#?U5#DJ9ItzUex*oQ?j4{(s+T1|ST!HkT z>?4)t(y(UNZD;YTuM1}voogGH1SCYvRw_;rh2KZ`e*9d3Oa*9RDNV9_Na=Kn=>Aj+ zV~lMe7IUvqxgp!uk?b>HCS33V&9ux zA~TbRO08bCzLz~f#u`_lQSrz(I#@HpJ!J1J6?F2d(+xuFJ_uOzL)mG?IBf?zM;8ix zq}9yx*5j}6`L$rZKiK)6bu(A9Ell3>^k@F}d5zOHIU@^!%`w$_w$9N;wTX`q4%0OY zdBzNopRaQBv@{=iS3*uzm@FRjQce$~vySv`fJ${B-vj?ZDEJ8V^Gl=_X2*%@`v~+g zy5GU+GIslF);CD<2=!VC)4tBVUx`oh$0sEel|x|*(EcRkxSex~I+=rt&z|lfcY+`u z>u1<3py`0*FsMCbCPoo6P(trg_r%1w?{AZ2KU7;IU1#HiH+wCl`6Lti2D=c&);)HsXd)=Rr-@iJ1<)*BGv6+H7L{8at{vMut*K#q>!W3_&^ z%aU`&MQ8I1DpiPjRtW^;qU+YEQ@)2?p{ZidG_Vuu6P+-v6H&9GDX{WIu9z&Eg_2H@ zyMA7=P@8I1G1$}Im=dvh7vcb$trxB^G$KOTs zlJZ{caWXiNmbDq`Vg~U~*;f=#1@nIO9eOhLB1zLB71zgGCFMN+SEp3nulMko z{rYk(oX=AbZj4HP_DA92q{n`k8KIMlhC;!=y3gkE4#BD=v~Xjd$L#SNB(QPHcw=FO znit_qb%f17i9 zX=-P8x&EaRH@_hdaiE2bbJ9%`&`lER-pdiBQ}Z48*!L`Tl56X}?j1E2rE`M&tXv;F zh(fA}de)B1`GnaSeXau3ZW}kaHYq_kd@^J^`GMWa*Hu( zJNMiW#?gb=O}yO60cTw8L88zjwB*Tvv6oI}y%aioWhk^%+AT0)1>f^@t+d%+f_CZ2 zwuPF^;#Z-V8kO@Cdk&+lj~v2CYd_Zt*opgss@2b6Ggps2#ML5J6W*9%7poXidgI;& z^61hhVF?Ke>;a6Mmfhzc7kC#vxomNvCm3@|NKdTSMk6-Oy+veEMhJUqh1aoYT z@+g)bFJmnIZRO4vYW?@$&a@tWTM=S-cCZLWdbVc=={4WRIJ&F{PSh@bp{q>pmQUrC z2#?k{bv`pE4l^i$W`D?3ig0`0)r5@N~MB7 z!!+}JDl#F<_mrC$cINswK`Nubn(UyC%HZ@+J%W|XR|jO zXL`U;>HzbjU6`{X5Ig|_gk?HalHuBQyZ~lSkCXi?D5<<|a$J8V}Ny~k1 zdCKlAZ2zNuC{`#E5)Q<0Wq8;p2cq#vJP$LNiAw zjY=U|J!bkuPW;>-Q6v0Z?}I&v^Z~>!whYx`$B=#Ro1~u}h~M)VSJiw#_A-5jSPHEo zBTmRq-INDo>`H9WEU_mLy`{kuv&Q?{Y( zz=3ap1Lc7Ob>TUogkKWr)gD1cam0dM~8n-MGXLQw0m6sZF&YId>e- zz8x+sIL)mJOPl{45)#48aqHqy_5mA+lKI(VDw7sD|LnZq)U1W`3My7S-o)dlU*%Hy z+ER>%s~#N$q`$vz>o39Y^NC4#gyq{Uxkq_6ElV7qXPhR>EnI~OnsEfM6B76|bD*;m zm=MBFI*_kP_TQcS-8l5`M?FP)|cB)yW?ZrvO(Qdz#t>0Xb<;)!LIzM|fyJGY(YiLnAOT*00Z|4|2*~|9L z4w)@C{#&H?uZspxpBAgb=AB&L@syp{SKdjyuX~w#wf^`v*ICgw7UI;IPeej;;$(Va zYC+@z4t3BkJxW9T)8WNU_d;|&v&1it;(4EwBnf{e%|#4GXoLjG)@{dL#rg(U=Y~^f zjp5?xt$AP1$=YJCH{Yxoa;+t*UTxlC^4dA1trnTA(=YXJ;IiF65hsnMNUEXFna8OG z{hq3rl|OeD-XBc=Q%Anc##OrwA~{D}rLNW{R}@LO*dSIGJlOBj{9^9ynJb(bM+yRRikhGGnVp-a};!+&9=A zX5GIBDwr$4MLpa@Isz45D8TPD)5j>n|wpy8mE<%dntpc1Oo`B>F0ZADF$!`LZK?0I_0+Kxf5<)@} zdO{K|LK0Cz5=BB1Z9)7*VjQG8tV6S$sG(97Y2Vr-8$0;o-D!7#%#E?l?<1 zoIo*}K=D0+;zt6-WUvC^TLmtLcp{Q7_jxTzVQ)!c*`%;$QrH41jED@zO$NJ92D2nP z&7!~K?!vJ)6)H8fb)U`6zW%*Cgf3*|T@Uupexv=x;%o;p&S3wDF(W)gwxl2U;601s zGPPrg+Q#gjiN@#=yS~PU60-qQCKOro(*AeH|2<@^omVim=iOOhMY?}*urzL#bga)@ zymZ+s%>EZ|`?iL4KKEXFg)d_EY|y3e*>JL_90z}`oYeJtevQRcle%untZ8F=mw|_` z3rlbPoD|P;nz{J^<7`in>16q7^6`ykU%%bxgH6i?IZo4s2Aq3&^;yJ{uFt)l3OjnV zuVk!~q#N5fUYTz0_YZ~jnmp;9e4O(7o!nbxb5ZEYsZo|#d;Jbp?e z!kk1TVDgcec5JZ@66{u-Gjx;d6Z+^I;FGZN(}8@h9uh6-ZzlCKGmD_YpI$cIa8@2V zqFOvp#G6v??anK0^Ql`%Y#dndXXoxN%(|YY5)?w5$F8kc^zEAFE%%!&@WxA#h$Tfi zlbVA5xGu~Jr}M&lRxGGfw?Vwh=_;zJC% zdf<~0C?l^ijxGv=s1r79JCLI2EP%fDixQM;L)B^d__*yTvevKv4J0}E(j;Y^+2xcA z+iP?mFsQ|m6M9P>Naf@K@mnzXU!qI=pYGHDhNS-u$^RSvpI_?#hBORf-74_k2sG$F z7`XbsVfTN->3@R)0f^*0JBaSde+wvn{x6E=z?XSIsPI4G7X|1?31 zc&$URE+dfOzd|TMY8{fFFN9>cwISvQ#1D3g=3rQMg<4cPk_v$6E^Y2(O9OT6?v=Aq3S|CW4#k{z6ze`L3=vFUGD z@6zjLwHte;?GOcKmBruL+`t;?Z+ebO+JqL@mkO33%1EfY66$XsKbVAmSu(X|MoL>y zoBoB+iyJ%j5?&`ok&(GQMSjQ? zXYud_NIGYmt=EbCmxp8W7L{j^Ma`*JOKE!jzB)=<z! zGxh$*Xvy|{=1eIVGg@{<*yQhxWa4`VC$`T-anHP1Aceay+DCVjIN$6sa%`b`5LJzi zK3GTTv(Dbs=HZBnWqWdcn8)79(t4W^U(7J4YmDe(Na~-`u9&6E_ZQ=O8cIDXcPSlp z^7CFx4uq+_m2&)K?%Q?9A6mIb60zTNTi5A$9=xv=26@0Z3txr}80E3Qg%8kh#e0w} zWe?DNd8;53V#_zl)w*A)n79=Y#T6zQ}4iWcE&K!uw9YQ{U4s@ZYJY3UwL*@A01P7JP zwBd7o3;e%kN9F>uZk@R==VO-~|CBm@6)UDpQVEDk@CW#Wp8`-w|L)3s?@&E`|`Br zjcsFi*TeE#jfvHD?kcxZbv_AKErwY8&kHx6?oSy7uAkLJILx9_~JGy65V=ptL=YSH2#?rO%IiHn)ZuZJ*Fu&nPWe zTcp0X9Tw%<8{l+9(#$2e#XBsEbKX!8#Z%3(D|a%V&u={oy&uVg?oG7V|YIkhBhq$KM^ME-<9&4CJ5%J-LV&xSp@M)xvCwSXFSNJbJ#Ow>ylzkbR& zbY`L8jqM3J4pi{Rx`*IjwuO@ryuC#5_6osUHaLTtLI!tvAal4X@iiK5+CVscApCM5 zoG}p290d{R;sdyn_y|N^g$AYP{!F=6)0i*9e4b1;Vuh;ktqFXHw<- zuAw}>+|L8yMuBjXzfrmUbG!^>FflTi4jIgq4EBKxR{kzjiQCaYig*8=DsLr z@Cf0Ho;ltRJCrQQ zS7wxpT82;LqXqAy#p8-}IpQR@4aM3nQ_8h^H8)bTAbpN*w`CRQ9i$-UDo7V264v}y z)S^(oPpNR1qET2&@kVH=&t2Ya;j=W0mfhs##lip{(M-MgKHJP$DVD{xxVIvi7QIP2ZmdSumr*8=ud@xClh zvWjR)r#g>oi#ZbR0S7CKo_k)1$6pT8s=sTtd2ReFWN}JsAY9y%11qs(Qdqis0h5t& zS!DKEN}tp0mOSws^27>&IsNg9KEZ#+=`cPMXD*EU>uB zi$W3k$+$iz`MY_yICg5tGv;0m<(d0s^8u?5l2+-)=|Gn|5xQ51SjDbzK8q5??}=p0 zVTX5Q_Hg}vDrm_pxfO(BNCbQ`3eCpYU(K^fPfl#jOFKkKUoHpr&HT4jhJ&{j1-{G@RJuDP|#pjeQy zA{W1p=BfRBHRek&w{2A?9(6w>0Dk@bdm>NOnrg$Y^DlJ8@-O$}*|v^}o$znlVq>d3 zm4diI3bNhx%>h8F7q|DjnVnbi%R4jG#>tEw->Gc#xN)%<>8^sQ!c_Ur@|W=^=?sEz z-bMLb60Le?!)h1?t2-P*f;SCPh`(BwR-d~aK^(=ShN;N2<8|X#%1eG*c9c*@^KMCD z7-k4!s%RgKRkFd`w|A}>%=vhbx-q+)PWv7*3vaAxFvlIpZPUpgDZ`Qxy%A+5xTXOR$_CZz3yBdy44rsogC^`y#8{YGC)M)Q+BSvri_4FW*+f^Q zWOP6c-OCf=MI?$2MDXp%A#tvLNZIkax1TP|3GmKd8mJN7Y2`>e|lePXE-S&<3)?(`V!s}@QC z2q49yIrNr^Fl^Q81&uZl^d&~Kw2Xi`?K{|tpR4N<>AHi@nL5~F#nrb^ToT0X!H{rO z2L*Osux*~Wo@2*$Mbl}@&;}F7#3UT-!#ql;+$OYYMF4dFpdk&i@JmPP1PmEn_%c5j zBc8JurJO6LF@Oz2$sA0j~eG4v2%lC^=TTX;f^UzDX`k-YUN1Cy@C zLP!79hd?jiZUm~Pe$i7954^~7{X>yWud7!0+IrOfeelcu+YKWx@IBem$r@jpl~C-d_g=I7k`wp zCEqUvg^p)b1FOoqGpr2&m9KJ4K9gCodW>o30awC`Pda5m?t`1*73mvo83ChHjd`Mb z6Le42TKRH|cY=ptBmhduHrA12eBh@y8+>q3yOy*!S+0Tg%G9gtiJtitsuEE7e?#2w z?E}?R`M+EC#r8vwsJN5+TE$R7(x5rng~ifYPr)$hsjQ4*&{~3PMa=Q*RPALQuSW7C zs|-kpsE-!#M@}@Xb|H(IcDY32qV}8|o=x(M(*g*wERD8Qg(1UB1As!jZU{9lA+vl^ zcj|hUbx4UyQCy!QL001IY@r_|NRNn{>5`{)AeJlXfl|T%WM^dGsmqu+h}Ffaa@Yyf zL=RTLiq{N>JBuKzsnWicu23y+E*?#uwIZO2wwP}|Wv`UF9kCM&?rqwC75B9;--TCD zFt!CD;c>oTU)uIRqI#wWvCir|J(uOL_ze`xg-oWP42%90Qro<4(hJO>;OeXHHm&Eg zRD%y$<3awGE=AFl=wo)r4+)ImC*8%zM%du0(OO&pqm`#Rkqq?Y2HR7-0pWZy3vfjR zwlX{w`<%-=$`GBO0a%(~l+%KL^|j=-ejs7IMg@%l^QBf|eL*GM-1Q?`A|$-vUyAR4 z^abxKK^T9lz=NE$Bbar5V6Jjgm18qz$r6k2&j85@0nfWY3RWE|5}5tLLq~WYfpo}X zLpZ5hpyM}Pbj@}w|3u_UkPKPicNx(JR3ncJ$ zXH^26WjY2-tX+WYM{@#0=zvjtnGCc{n*lM!#0roc009lTf^hk60i|_SE;MNh%3DIX zOew(HAt?xih=OA)6OES)?$7&mFPJ2fOa&;U=mMg`D9`sM0{p-$+{wh|J84WpGk!l0Hxv@D1zAu_ z>}%js${)E)u;I1rNqB%pUnk)BKxqgHxcMmY-8eM?K`=K{>=KwW?hCqYj1PHpgt{?q7-~uo@Ei@rOsFhfR zqlWxMZ9vyy=D`Rq)u0%Mb3I8(Q%?hgwW(x!gIM^Wfc_6`eoBo*lwnk`Gns)1WZ^p~ zprPuo{8$S{sKdBqV4=sFU6Bd6L!|Xx=Gmu!G1 z0=yuFTT`e2!hB);(`?-PUbx)0qk1ym(7@a&E-x{`-_8>vp&7J*(hunmSntUy#X9sj zo$bPK>8+rM`$@Ufg0rK)t#EpxDoP>cR}lD z;n8gY_CA$ci=JHjm>8N}GiUTsgu#*`5PA(hAmm7<3MdVYNeB!!!JH4bmww~ z^871DkQC`-tY##a&%+=bwA5Ku>7dqc7XX38Jg{>K7?A!Hc-!1(sJhXWER&yD0(Aus z`~s4r+O`E*0p892v*|9n0bI|pYQJcKPp(AeI$X&n$9#r=NV)4EWWWp}tQ-hmhG7g= zW+RB(U{+;lfX~l~_a3%~hiUeKjx0Rv!ZXZo0~y^Dqr7i|Z8y#0V8-G2n}xA$DjE4% z79!c66id+^tcv-1e7&&^eeo=lsV9-)I#5MAqfKBwku+VL*xa-@P8 zjRwX5rIP7vqPK@^Xo^G~roi4}h=K(G^bv^YylSjZlpU3(g@ijMH_^1*wM&PySDu5k z^lm6n<~5jmybn$I`rR1!lz`YEkO8I!sGBsMFR}T*K-Vu=zIFJTJ|aDVaTsDF9xJYk z?IvL=0@aDzmalUg0~oiW=t4nS+X7R`L_0_UnK zU3sDinBj~@dU3$c;wfn(EYo9-rSAo1G4&P)>a4^h4DY5Uyw*L@0_IIe2c9x}Tm+1* z8z2@1DY^RYC!%sKX02g)IErGbHN9VfWO1}i)`)_fu?=;Px?HxoIb6c&LDfV!X&uvv zH-D6jIb?7-DqRTV%+yS&>;4|kX1|Ix8Zbm-dEzTS1}eJP5#=twc8*^9XoeXnIVaIGPL8Q$`2v@UW< zZzbqPuq01tAp!<0b@cP33>s~wWH}a#FD|H7{wl0ZH;tUg!Rs|z<6$_};6P@go7!~b zPK+g=a{?t5Kd-HSwdbXQXSL}fvU{9lvT#*SbYPGK)r{iS-2R8YEA!nKG1)cjxuI1I zg#tQOuEr$-llxwzMaAntKBm5~8)mw=1NgK~MVyQdaY0@GSK1A^**Ch}C_W3p5|&J_ zL3g@Z#_g%sN#QRCTVk!#MGFJJmaojQnSGGno#00w=F*-)N^89|g^p4411m?paW7S4 zZ>Bdg>pi+_Qr!Y`laD;(e{bl9vrFX|8c3rail*EVm$KUxh1w5E+ku2mm!+dDop_FU zI?WXxcr#KdbdW_^>#_L!K16)rkJ;4?gz1m(t#pUF5M0t9esu`$+b#BljhC;6YVQnN zm0Z*&*c7I!OcIv+M!`|)lkr<85oTf3g42tJ*RLk8<@+ zGG6E#`NrZIeizl@Zj?1i&A5^n?Fnenx~oh$#|8hDVQdV8L&-q%jV8LA@P+l%RP}eZ zLV}NSc5Ab+35PwL6Um2qp3)u{LCSm!nFX;2N(@}1l8cylc3VU~NAFzSd26Rt3?ZY)) zgU@*gYA53kxDM{5Z+AR*H9iVt54fp2O9S@_!}pGxL;x=l{T@6T7tR*n`yAAE*o1dY zWI&!G%u``e+F@NQ>qaJ2J#Aw8Rm5z9vy2{Pi;DT$_k7Yfdpf2a$yBCmD6gw3(+Y@r zir&6m4?p3z^B2QZ7d+^|Mf~av@qB2{c+6xQ&_{XN>RK6M8UFd!Uxa*lT{;7#W6hOO z^M!WcB`?@U#<>z9e|K9@^R>#;U{Z4{EkYChEI@aNK2$qGG0o;=EYE!*+5@@SqK8=F zuD(@pOjw;qO97qN3-ho|e05a{hJ=VvpO7$z9I^$q2%zX2*K-tn#0jZS8bC_03A+R; zJOSk|42)Jm*=4qQF?^&)%AiAxY%Q3B>YqjK2i=IeDSi2%of6)Heh%HQSRB57nMc*vpB7(XgfuVqy0qQg> zvAT;KFvB?GB-brIB#s#zvJLga{qwyXRo55pSHX919keX$@wOTaa()p;+04zFYd@wm z^af!WLGek$%h&kFq?BZQlpovu!mI&ueItQk`17dgm?fS+=uUJeoJ28G_KX(1I8#;f zwj3&VmuLVOS1FrTK;0_@p!6LZCW>nkEmhtUfaXOt@fSirSWDD_Z@{gxv05tc_yd;+ z^1#A*AKx&?R$9&-Z~`iUP)#e^6tW1pz_Qa})5COWyj1;U6t)s;z2@S4eLaC1nB#Cd zD|novXtz*R%GhqhFmru+BOE<~Qpxwf>9#*gY`$5~!?%Z+3?HF~ue>xQDWh^r|GdaN z^x_4Y^U)D3Ie0ITY@Uu~J=(ZD5qtUS;5&0^m+?Vi@$f_fRZUCVeWEx~;P!5A*?1^) zpgZgO*s@;snQE}W0;CuEush=G(MIfdac2>WFvHSy_07A+qG}`iIN?89n&(b8#j)-A zk+;Dq%z|;$hk(?Be?il8jt6|4Dtwt!xkMCDDH=?Y^vm+zdxm#UMA!VXPNVU%g{nmU z#KL$`5x~6>Wrr2NI6LRPyZHmIrrr{NlnMSKUPrzf$*E7my4BNpO1$X|(IT@VmCX1! z&H2Dmeq$v*g95LQW*6M&@qR?{?rbafDF!J+^ngW%D>SA0adGXYGoVx%Pp0hG*tpFG zCFttS=K>2|^Kq=WJnS~oGKIc{ioG*OifdYTqKlIxloKe3aBub0Ja6J7jJpZnww%sS z-;5|!6kWm0EPLK{nc0~Gm{e4#_-w}(sq{82XMYZnsi3y+51ZZfZ*%p;StKZLb9xKwS;*xlt` zQ5!wGM&rCic z`LiKSG{_eEM`nS~iic%fl1y{J)twu0fk*xMl1JpG%AHSP!JCj`6rfWrXTkg z)HxOxssyzfo^Se;2b~$;H}6;UI^8Fm<; z<$hWzy#DlyIJnXkeoYQpMfZC{5mn~Rs%Rts58d|{|B7?nZZ!=}pisAjbL}(+{|rqV z^lu$YfX;YpqAcRuq!R^F8TNaPpFO^XqjwbBC8#yessx=fcTx%)!rYpK z0wm9ar0sF{3vGtGS|%ubn$4t6%dV)Vh8ra4Q#Rp{szNAXg$^f+$`ugXE+h^x_H~PDL!27-~3|B_n~2WgPD=!ZDjM=;SV&#b`Io*DG8uqrrwCN)#k3Ux2qID@9_?d z8pVt2w?jk3J=W>DyW6LuA30P6iShjj2QH&a{l2k4?=w6iAh#G1FH#G}vH=qZ`FRJT z0Tb)`6ZUtJS%WzQE~=uMjn7A^3A%-++N@F2N20w6hztsXx0d(4Bq!&EMt_eXJpAT5 z)UV4{Pdq`2byh&eDO;D^6ulmJS znM}o-1%`S9`+lhasSUAj%G`p1^T`R()J?b(ng@`Hi#k11)5fwPql-!GenYr%sDPHoY+~UzF^gDGihdP zQD5T;dk{w6*o0b~HYrajGU>!vg{Z(QemI0k^m|ODHgYd4tO|D*4J8lC5pQ?xAxh=- zEtcqKh*tKEPw(bOV8{pD4Lw#{3JV9_fw!_|4sXBDeG3Ya$I@|#XGx6-%I}2Fr9?W= zlV+7sI73B-@OO)7%WFZy+N-=@6ciOyYx$kN{9dJ;sN}=ZWRj}9C0yq~`4zTwf>i z*20m9*>Tu~wpQmXn7p7hpPhXo?NX4WO<{aOZxI(p6pwcM!+4Fsi-Fm9g#yF}Bb$xw zosKWX#TF3C+ek(y;uW>pWhkBATn=ObI?4lvfsrp0ftfNU%GtYcDV5gfB31{3{kQ4@ zhhqslF9%Gl59Z~%Lc?1E$yuB`@wXQV;+Z#9cQ<=#3YA)(UORLu3L+x;7nWEnoi2XF z2oj5>*q*5Fslu)Gaqr-=Iw6arG79S%U>6ec%K8H@Yo zg%mnKGO5wxJQS#Mf7;u=q4|(Vu(k5xSn>At-qHE4(0AVrJR(k8}O^@wv5_B#GkOgq25@ z{Iz*#Ft2VKEi!#Acy*XEiSF;hU&};Z#F6o<5J#V%9X5+WKoVg)ad%##3I@Dn&@{Re zAE3{Mi`?_GUVzEq*m9IM_9eqz?^X079`15Pk`V)&Bx%gzH!*hBR1q}JT@d5KRp(Dw zXiJ$F5y|nXaz5b7L+v4`|K{z)zt&R+9?`2*6HP@yIwgK|OqRGA{FZ{ne$#5WIvyh< zPEIk2Ii=#r@$f={8U`&x$SbnEdDj@zgs#&a6k?SQZWz9i=QN zlPD!!5~NzH2zt%%RV#-MyoZiR6<>=P#6}ovjwJO+4yJG`N7Z1gGomt%Rz0vBimx&E z(Z~F*&n}jCg2lN8@^ZLTI|H&t_bK`2Y$o{IR@hVqwbJ&@9_8u1GMG;VhrLxu(i;PT zVJb%C#L$#rTL>@qgFJ0 zZme*?J9TjIJ6C@5x(LUn-5xl0a2IkA@`W=VJsxcpYB7=+<}%X;)l?Yg4%@s| zl8Ete43LNAKi!UcbJK)QdRY^s$EtEyqor`6bF?r~?s~k8aZM`$OQ*j=g7pXUmNaAD6R#ga>#0D!RqZKJ()@3A|4tnum z8z8}3F&Ok*W&Y{8Uap#IPD8~7Wr3FYs$nu>{_;j_QLJlwGG}zHWT#~OnMvPSD>hhD;^jxhY3hy3gqp#HgnJ)yQcBV(2nZ8+Lx~ z%`)LIRx5!{weg)VJySXbDV7}ft~1Ee%fUQdg@A9_YSL4l`yiLr?5cUh@~R}?{M-B* z67P%@g!4wxiI@|;G7N?y37J!lO_OQOk&x?n(LrvviOQ}nqtJSD1lZKe7Dv9SSfU8ppCn_aY zN7248^`?7xt7cwYM7d=&Pc$E-J-B7Mtv{?USm-dPIHZr`xWDv+|7L$)X%BVB)gQZR zaXa~Sxmk|m%^nS*pn0{NlrTp92EhQr9OqWd;-)y6O zr0+bY1!_yNqUZgv-@H1u;54gr5pB#i7Fpiw=fz;`$w)~@?25^Jd4z-mgQi76jlxe- z!I}=a7lkHr81(@N~`HRe7t zLXla^J{}BzczU+P_N=((UXTs&WQ0Rb*6YzD79uAP9ww2gQ}sFq@|9G|TLG7^I5tz5Fq#HRirC!3jOye&)yK5b{EQOYq!Qb-65Es# z+l(_JiEUq9o7Xk__-E zQds&hn2X6+zwmFOk%~hjD}h0^_=7qK^x$`ko3B=$-DyW=) zcM}^m-J%vn`wseJ%xO~S&06hwvb8Nj`6=XGtpiCkZta~YtSPzen}|{YZB2^N4)vNuZefe=O^Dn zK0i>oF@~SGA2UaX&Qtalfj_+P2-4u?TG4d9cwnD;NpCdW784divFUX3UX+o4Vnk2% zcF*?ewWzdU12-ygV5C~SWvst69Z&y8+rRyjHoS2IyLBgw;BNGwvdPhk@(5YVGOPnf z%IoBKUe*}iO=D%pJflJ~I&nBOyqb-pbH|a$>A_<@^(E!yf!$0obVK1(iK|fQOfYr6 zjg7|I96DpAl|=T}_W8M*dpw@&)Axzh-yU~-PBe<0A)&?_TrWH}i>wWcBIhp(3oQi) z+w@BoIP*^UTn4K4`*9K7L)ly3^rJ5B@=es4NPq6QP+L*Mrf^Ukwku{&R^0QyhV_6^ z4_Tq@oQ22kPpsqGBzCB{O!pU=Io(V3WWp2z6L1^cne%9k+~Tl7C17SmxoC!O6s;eL ze0vi3#?CIsN?-v)|G9M$k@kC&4u)CPlCy{eb(~}9cD?^=YuK)K0!2z;5#eF8QT@ne z_wg3{_;Pi7=)!=Qjs_*c+SR++!Ps?#{ya=26+55s&tG+T&{PD@mKC0bBrAg?^RC?_7G-iEELZ~u zPq>y4jBgr3Uz8%Rs0SVFj5QehGVvv4DY8pZOt6ZXBSW>--Vg~(9qX+O?9@oCOSKFP zSCL=BXg`- zcv^C3{adf7C0!fm{n>J>nSh-`?eOanC`Vgyy(>haa}j!C0=u$`M<|`&ka-7$&Ta!B zrf@s8r?|m8!I7J|vSs~Wc^xrwD?Oo6yj>UvL@elXesa-mejCz^biHHp**@UmZ!FuY z$L&Q@pS#SMc{LswK-uVwB4W4eBXd22ay^S*eZd6j+ySNUMp6q&A`F5^mU98OPr|}? zY6iujtI-;H%TwT#H-x7v{|?k;VRSyQ5)QRq;mQe!UuqL$%R%)7NG<6Ho@e)!gE*|5 znwjN3iWuZ)hOuwdszb)5jx*jXa5&+O;t!JxEfB*Ycv)@Q)SB^RDo&ejTPN%|xA}06 zW$d6oq0uC#7S>N6`5nISy1tu^GG4*H9N>7sJoTVGtiz@m$6aF|#PF#f^cri_nv<=2 ze3u-L3?Unds&A?+hQ+Kr8LG;5x+<=C;lKQfspHyvsVy4hRkeOsam!ISlI7t%xQuMz z6zHu6S)cM=v}D{N=2k3!=3-G;JXi};7*BbQpv&i@U&=~gdL($&MrLPSYl((Vn<7))b>WA`UD{bU`A=ck8rcok=C^FC zj;#xhM)H{%m{&6;kFd9q{QC=!VVB0eD5+T|_dW(_#E7lhQTve~u%!Y&1?(`5<$V>z z;+t*MoupqD(Ni#unIyD_mknN$cqSK`0oWHlP*rwr^to2%sqWACwIlG9W!@t+h?8F% zn_WQq8`-^`q=yRO?;3xicz%_=Lt*C05xqOQfO?Wy(ubI|dr&1Awqm2(YB@i^Xk>+o zFNn{`*t1I2Y>zY0h<4^AzUxM)91n6kAQj_T$IPIqgqnTWct3a+-W%srU=7rU>hRvj z&K5InFn7IQkNsYBZxHPvBilrFcZ^%-^I`e6#fNlM_)y4q$eE%?!cOjz%dg&B`gL<- zAx5xwL7IA^FyET16Q+4`UvByoeco%74~QN;S(6f-3_Ojpsi)v)e$3*wKmJvR7kqe$|7nGj$iz@p9LjERsL!n-rQFp*h~ z_|K?4raghq+#x~S(5jD&Og=2qpb37_q&T=&YT@Z?(UKG(q!qA^^w*lYdwv0m#u_zi zlbBxfp#;WtSHKj|z;}*Vk0ZIejM#E^N$sWN0%^WP-`b)(b8)WWTC;=u0`tFZQz`pX zz;0H>Um2EFoNc1gGoZNwkV3%IjEKL>mAZ*%eMq@h2Yc*x^%y zNCKaGHsG+uki{h|bw$lQK#@}b$SHz-CF&ML)g*Gnq4~hC{q=m4^aI$Xp&>R&!YK#0 zIvQJv+DD_w&N!f`?bbWytBL8vb75n?KcX%cC#=9hy{^e69@rP*Qn((82C0Bi1x|!m z8VS9Vu^G`a69zG1y9yj`iiM#i_XX7gLsQh2;UM0@T+HvIxd7Z=AE$QcWpK>{qO=Ay zDX;@RXFlIV?(KxPY%&<#a*x~&Q67OOK_IuvN~DOQUzw)A3HB6Fng*IYbW|MbRKau2 z=xqqmvZCEnVXB^FGx_&3o<{_mc0kH=GXFaS#2}qZCW>Em!cQB9a(o1Qt%5fgB^@2?QnFYDYO4?1iMjwVH68VWhViH2XyxT6Dj|8zq*yK`r9CK7YTpf~Ew)%-I=65Of%5lZnvtbzi8BHzTd0lz+o(JRbkhC$_@i*RSLmNk}GTI4xK z+$Pl{q>nc_zdyH#hcFyS5Q=2zkY|^{Lk13Xq~XY*9IP+L#ogEm-DZaBZlfSI+V#2J z>gEqX*aYIeL1CEO5kYGHsH1|#z!IyJSPZ4-L+ZzjZECvh>#{#J@f5o*?}|pohmAo3 z730%l5?&(ohj3E!lV{@StiNr|R@Xy@y(CP1w1=xeX%wm6BhHcrU>ZyTz5!R2F^%}` zb%|3!(0f|IZ224F!9jlbm@DtJkI<+m!u_(#wn6j_3AKjob?dJLffPa&Qh@>9ll@#A z$}h&^QVsc|7_F@5Aajbp@U2j#)fRHl7P6Iyj=A_bHGbWv zINWu-Z4tLHs~s<=fQdKz^}W~j7oC6C8+ZuOXvk$aK^Wpty`za?A8!w#xnBy&L4k*3 zerk+gsh9{Q=^L_O9_eP?JTFm$>}T@PDd*N)h&X@isT;4pNSpr|iFM5zB!Es_4rh1K zlK5eyE_kCe2wY?QtD3Rz!kWH_%P_p`pos!c&8KqTlQ#cu3qt9Q6eqXA3U=7&vw%b`nx{&zA;)SBeKw7->`^EWmhT}2L6EY_!&Rp?C zIuN<)l;78(WC5KDGnw`E_Zl0HSGM1FaA}M54R=&?HLeJuY;ArHTM4-4qycODb;jNs zX1e|J?RX5cvSV)EJF^CyJMT%sUq>>u-)6u5B+USCp}jxZ zj52_g8D%(^sEMY8-(wrpTQstGW4Pf`J&1%US}DU8?t_NM02nqK8jgC(-Hmq3>)`WAm6CuOE@5rwZVvO3Cdv{v*9h^C|LyPWWwEC8j+&Ii zkcGRa^Y$Wwt1#`4Js-3v#Y)B}Xe%M_2)WOVXQ%8w7=M2cL+ zyKvl*ktS9*cud4o96K|@>3<)`={{!!TJg*9R$O~ZPnnQHHIn>f}% zO#NTNFb}kvdF~YLzY`~}t5QVRyD=-_gkiLkXw7-zy;uE7j}4ADh7RP0ez)m8E{N6p zOY7QkfZLlvrq=93T=wpR?Lz$W)9u=@lvm>}upK|AGi;x&U9Zl87UluK`H`q1mWbW( zJdgDl02kC3tls0O!@#++&2{DfG9>WA+rGMP&0DlG-$0Z@1z@Tk-M1f~jtqPG)+Ul@1F?i$F zrh8h`Cs-JsW9{y!^K}|S@w-`KJ`W1-Bt+Q$5JmXNd$;zsir~tR6_l!S`MjarNrXzv zaw%MfbgKzk5>K|l+nxwfwIF=s!`I9_+dY%Z6RC}+*&8v-rX;__@6IkA0-5{p((~Ld zx)cG;^Za1Lz37#SUHeIUH^ZrN>b4&$Q6M|;lx{9-(G=P>;xE>qjcZJxZ5QE>+)8PrvYES8yFudwKBevYLN1^ZZo!ahcJ;SK)a;+W%c>i5UY?Vt#LF- zVc0GmW`C|boH!_3>CLYtWsWS*n@a(4sQZzPdiL4EGB`Z`=8;6Kr%fN^8MRQZTSq60 z%>D(&wWelvZ{axaTpwLZH5PT4zU9802{fbejCuR6YlPHCsi^ZRyis^pe&NOG3RUbC zrb1!SN#ETg!Q0i9cbn)_>wP0gW!)GGFPnSss+Q1(i)}5_JRcKLJtNVGyK)|a3m>1o ztG(El4C%yNaIKJDn6oj}K8Yp<|6#-yJ1YjzmB3QT+FkA5ZBaAe~2n=##z5 zWGsYiFZQjB2ET_F#4L_z!%7*bgFv8YBs&I|&U6zIk&SrXG~g5+Fr<-*4!WlD4s&AOfjEm~22 zCQxDjRl&%2>5WM=(qVZy%8X~Et$0*IoO!^R6h|y4e!rpv=-6IObZs&KI^{z4i>`+x zy3qXBfRm>_k8BxC5^0D#RKt9hz_V-o}bkr<%}G7Ved=EGK_$2a|{`aLQtW z*Ujj@Jdhl@k=`05xJyEp3o?=qEC$}K&?N9HSfw8buYn%dmh517-9KYYLz!5}5fZ=q zT+Hu)(By>5e-&_WztfMj7-0rwil?K(X6V(^VQDns2+uh_`v^-;wpmLI>{fxB9nL99 zFD8S6rM~EmQs2WMIq5ha^i!6vP&?1+`K!HGv&e)t90y!0!`&DjoP*IB7t*5B+t(N< zyC}!P*pe*ErjNc?@frVvIw{Dc$o)BfLu;RFnT^&j&M59*kT8EkZp(NL(s^G1OWe^a zDUlmPm}kZ3^C8{(i8K-{FHHq{w>b9#;)ZbSdKBo?t$j2cjxsH;fBN=RK%{tZ;!NV5 zS2*3pzg<1qvVXqWp~S^i*LIH2-J4+*qL=&a4xJ6d5){&X3^{6TXPsMHC}ai4Imsy2 z{ITeRC@~bckDzNCJ}yE&hCUs`!(LDtlXM*>gljI|zj)H&MC0D?eazerp*cazNOpi& z>K>q4o8>%ueL_li9XJ7=`NN(_<<(-eVJW@PUi67ti21}C&KHQA1HHRA{e4vVopHH7 zYL=hRw>|Q~a6%uvHs*;F4XF%Q34?_aifk5zY?>`^izngtr=VkolzVe(yyo;h=5P3d^-80Zxfcml2 z+sv4ac|vQ#&LvpT^^Q=az;iJeB{Hs2uxaUq*D0XU+~4gA9#_PEyi@f|+1pyhYE=cI zsJsAJu@tuF+%Uz0YwD>0FZ$G z*nIvt@C7E2&tE{F74Nf(h>6ik$cW0)8yVXgTN@c$8@lOQnps;o&^fv~?f?V+i++m$ z|N3_{+TYREHjcFJX125r`XcZxWFSHJ;`8eG&qk5{j*^r5zs8-Shh=&h5Cbo~ zLz{(gq0>Bpp)^Pdg{gkKElZ7guz(^czmxP3fECh6ZoXn zNPN{zO1X=d3vN_kuD&~BU5eo4dGVQZkB3SvpN7ZnJBxZMCAwUUh!yIs?&Am=4PLeD zTYM z;FmWhqWQ>kSckX>%Kek}JKWqY`XK(R=^u4K4yptnK2^a61_1u_(|>wIQC3+&NJQ~Z zM|edG{1Hgxp+ic#Kh#QD3w0{52&lSFnp>hgwc1#AnY6IbOgvvHKEZU|r>4Usu0mJ` zGLiPikv7!DI@0UtcM_^u>fLsjWXtXNPdKN$>=Bq!rlzB#g360~I*^M&tAI#HS0okh?KP!${ zNTb5~QEV0b7I<#(&hSPg4&)YwMSMKGB&I#v+ig%+qSho)(==M>%`Sbht4{M+U4uVX z4k?ZzWOGV3J4D8%V8p7DCUOMd_Li^g3$xk?qIj;<&IK}af!_b-e1C*Wv!DgQdxBmDp5HKpp|M|Ybr}t*4!0H!$0^dLDpMU=< zd47M+jPxCiogB?9>2-C@tj!#Cb?I!~{?72n1n@{8Pr6Sp?)L4L(L8^zyko6f&jp$9senx3W)uUz~0zW-_gw3*vQz@QU70rFtIk43i<5D zRrb%Z^Bxj|5#a|BL-c!`7eoT|)5#M;jaf==&T?f1>hVCC@MY_8(vuGwXk8 zTLV@CsP)r_9Y3}Iryou(JI a2KMnA*f)8~}{!2bhO#N~nj diff --git a/splunk_eventgen/lib/timeparser.py b/splunk_eventgen/lib/timeparser.py index e5f32b8d..56abbe10 100644 --- a/splunk_eventgen/lib/timeparser.py +++ b/splunk_eventgen/lib/timeparser.py @@ -1,18 +1,13 @@ from splunk_eventgen.lib.logging_config import logger import datetime import math -import os import re # Hack to allow distributing python modules since Splunk doesn't have setuptools # We create the egg outside of Splunk (with a copy of python2.7 and using Python only modules # To avoid being platform specific) and then append the egg path and import the module # If we get a lot of these we'll move the eggs from bin to lib # -# python-dateutil acquired from http://labix.org/python-dateutil. BSD Licensed -import sys -path_prepend = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), 'lib') -sys.path.append(path_prepend + '/python_dateutil-1.4.1-py2.7.egg') import dateutil.parser as dateutil_parser # noqa isort:skip From f535790098b4c1c350b15977a374971985249796 Mon Sep 17 00:00:00 2001 From: Tony Lee Date: Thu, 24 Oct 2019 10:42:15 -0700 Subject: [PATCH 30/53] cleanup memory (#326) * cleanup memory * addressing comment --- splunk_eventgen/eventgen_core.py | 6 ++++++ splunk_eventgen/lib/eventgenoutput.py | 17 ++--------------- splunk_eventgen/lib/generatorplugin.py | 2 ++ splunk_eventgen/lib/outputplugin.py | 1 - splunk_eventgen/lib/plugins/output/httpevent.py | 1 + .../lib/plugins/output/httpevent_core.py | 1 - splunk_eventgen/lib/plugins/output/stdout.py | 2 +- 7 files changed, 12 insertions(+), 18 deletions(-) diff --git a/splunk_eventgen/eventgen_core.py b/splunk_eventgen/eventgen_core.py index 32d3763f..b3eff4d0 100644 --- a/splunk_eventgen/eventgen_core.py +++ b/splunk_eventgen/eventgen_core.py @@ -263,6 +263,9 @@ def _worker_do_work(self, work_queue, logging_queue): work_queue.task_done() except Empty: pass + except EOFError as ef: + self.logger.exception(str(ef)) + continue except Exception as e: self.logger.exception(str(e)) raise e @@ -279,6 +282,9 @@ def _generator_do_work(self, work_queue, logging_queue, output_counter=None): work_queue.task_done() except Empty: pass + except EOFError as ef: + self.logger.exception(str(ef)) + continue except Exception as e: if self.force_stop: break diff --git a/splunk_eventgen/lib/eventgenoutput.py b/splunk_eventgen/lib/eventgenoutput.py index 1290e38b..b60e4f9a 100644 --- a/splunk_eventgen/lib/eventgenoutput.py +++ b/splunk_eventgen/lib/eventgenoutput.py @@ -71,21 +71,6 @@ def flush(self, endOfInterval=False): Flushes output buffer, unless endOfInterval called, and then only flush if we've been called more than maxIntervalsBeforeFlush tunable. """ - # TODO: Fix interval flushing somehow with a queue, not sure I even want to support this feature anymore. - '''if endOfInterval: - logger.debugv("Sample calling flush, checking increment against maxIntervalsBeforeFlush") - c.intervalsSinceFlush[self._sample.name].increment() - if c.intervalsSinceFlush[self._sample.name].value() >= self._sample.maxIntervalsBeforeFlush: - logger.debugv("Exceeded maxIntervalsBeforeFlush, flushing") - flushing = True - c.intervalsSinceFlush[self._sample.name].clear() - else: - logger.debugv("Not enough events to flush, passing flush routine.") - else: - logger.debugv("maxQueueLength exceeded, flushing") - flushing = True''' - - # TODO: This is set this way just for the time being while I decide if we want this feature. flushing = True if flushing: q = self._queue @@ -113,3 +98,5 @@ def flush(self, endOfInterval=False): self._sample.name, 'events': len(tmp), 'bytes': sum(tmp)}) tmp = None outputer.run() + q = None + diff --git a/splunk_eventgen/lib/generatorplugin.py b/splunk_eventgen/lib/generatorplugin.py index 1c190079..a736ee2c 100644 --- a/splunk_eventgen/lib/generatorplugin.py +++ b/splunk_eventgen/lib/generatorplugin.py @@ -37,6 +37,8 @@ def build_events(self, eventsDict, startTime, earliest, latest, ignore_tokens=Fa """Ready events for output by replacing tokens and updating the output queue""" # Replace tokens first so that perDayVolume evaluates the correct event length send_objects = self.replace_tokens(eventsDict, earliest, latest, ignore_tokens=ignore_tokens) + # after replace_tokens() is called, we don't need eventsDict + del eventsDict try: self._out.bulksend(send_objects) self._sample.timestamp = None diff --git a/splunk_eventgen/lib/outputplugin.py b/splunk_eventgen/lib/outputplugin.py index 663042dc..fbb72c8a 100644 --- a/splunk_eventgen/lib/outputplugin.py +++ b/splunk_eventgen/lib/outputplugin.py @@ -42,6 +42,5 @@ def run(self): def _output_end(self): pass - def load(): return OutputPlugin diff --git a/splunk_eventgen/lib/plugins/output/httpevent.py b/splunk_eventgen/lib/plugins/output/httpevent.py index 704e26d2..e33b0eb0 100644 --- a/splunk_eventgen/lib/plugins/output/httpevent.py +++ b/splunk_eventgen/lib/plugins/output/httpevent.py @@ -77,6 +77,7 @@ def flush(self, q): payload.append(payloadFragment) logger.debug("Finished processing events, sending all to splunk") self._sendHTTPEvents(payload) + payload = [] if self.config.httpeventWaitResponse: for session in self.active_sessions: response = session.result() diff --git a/splunk_eventgen/lib/plugins/output/httpevent_core.py b/splunk_eventgen/lib/plugins/output/httpevent_core.py index 56eff9c8..e54fb869 100644 --- a/splunk_eventgen/lib/plugins/output/httpevent_core.py +++ b/splunk_eventgen/lib/plugins/output/httpevent_core.py @@ -207,7 +207,6 @@ def _transmitEvents(self, payloadstring): headers['content-type'] = 'application/json' try: payloadsize = len(payloadstring) - # response = requests.post(url, data=payloadstring, headers=headers, verify=False) self.active_sessions.append( self.session.post(url=url, data=payloadstring, headers=headers, verify=False)) except Exception as e: diff --git a/splunk_eventgen/lib/plugins/output/stdout.py b/splunk_eventgen/lib/plugins/output/stdout.py index 54734b5c..8471bc27 100644 --- a/splunk_eventgen/lib/plugins/output/stdout.py +++ b/splunk_eventgen/lib/plugins/output/stdout.py @@ -11,7 +11,7 @@ def __init__(self, sample, output_counter=None): def flush(self, q): for x in q: - print(x['_raw'].rstrip()) + print(x.get('_raw', '').rstrip()) def load(): From 7e3fd76faee1b8f922adb956dbca28afb713dcfc Mon Sep 17 00:00:00 2001 From: Tony Lee Date: Fri, 25 Oct 2019 10:21:39 -0700 Subject: [PATCH 31/53] Fixed standalone healthcheck (#328) --- .../eventgen_server_api.py | 20 +++++++++++-------- 1 file changed, 12 insertions(+), 8 deletions(-) diff --git a/splunk_eventgen/eventgen_api_server/eventgen_server_api.py b/splunk_eventgen/eventgen_api_server/eventgen_server_api.py index d9133d45..01589aab 100644 --- a/splunk_eventgen/eventgen_api_server/eventgen_server_api.py +++ b/splunk_eventgen/eventgen_api_server/eventgen_server_api.py @@ -34,7 +34,8 @@ def __init__(self, eventgen, redis_connector, host, mode='standalone'): self.host = host self.interval = 0.01 - if mode != 'standalone': + self.mode = mode + if self.mode != 'standalone': self.redis_connector = redis_connector self._channel_listener() self.logger.info("Initialized the channel listener. Cluster mode ready.") @@ -433,13 +434,16 @@ def reset(self): def healthcheck(self): response = {} - try: - self.redis_connector.pubsub.check_health() - response['message'] = "Connections are healthy" - except Exception as e: - self.logger.error("Connection to Redis failed: {}, re-registering".format(str(e))) - self.redis_connector.register_myself(hostname=self.host, role="server") - response['message'] = "Connections unhealthy - re-established connections" + if self.mode != 'standalone': + try: + self.redis_connector.pubsub.check_health() + response['message'] = "Connections are healthy" + except Exception as e: + self.logger.error("Connection to Redis failed: {}, re-registering".format(str(e))) + self.redis_connector.register_myself(hostname=self.host, role="server") + response['message'] = "Connections unhealthy - re-established connections" + else: + response['message'] = "Standalone {} is healthy".format(self.host) return response def set_bundle(self, url): From ac58f6b9b860eb9ffacb72e552533eea42241eb7 Mon Sep 17 00:00:00 2001 From: Li Wu Date: Wed, 30 Oct 2019 01:04:33 +0800 Subject: [PATCH 32/53] Add python2 libs required by perf test case (#331) --- dockerfiles/Dockerfile | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/dockerfiles/Dockerfile b/dockerfiles/Dockerfile index 708bc244..7a99dea3 100644 --- a/dockerfiles/Dockerfile +++ b/dockerfiles/Dockerfile @@ -4,6 +4,8 @@ RUN apk --no-cache upgrade && \ apk add --no-cache --update \ python3 \ python3-dev \ + python2-dev \ + py2-pip \ gcc \ libc-dev \ libffi-dev \ @@ -26,7 +28,10 @@ RUN apk --no-cache upgrade && \ mkdir -p /root/.ssh && \ chmod 0700 /root/.ssh && \ passwd -u root && \ - pip3 install git+git://github.com/esnme/ultrajson.git + pip3 install git+git://github.com/esnme/ultrajson.git && \ + # install dependencies of conduct2 used by perf + pip2 install filelock twisted requests queuelib ujson psutil crochet msgpack-python unidecode attrdict service_identity && \ + pip2 install git+git://github.com/esnme/ultrajson.git COPY dockerfiles/sshd_config /etc/ssh/sshd_config COPY dockerfiles/entrypoint.sh /sbin/entrypoint.sh From e665658eeaaedab017643929a2bc1b89f60ddd34 Mon Sep 17 00:00:00 2001 From: Guodong Wang Date: Wed, 30 Oct 2019 01:43:01 +0800 Subject: [PATCH 33/53] make splunk_eventgen as a module (#332) --- splunk_eventgen/__init__.py | 0 1 file changed, 0 insertions(+), 0 deletions(-) create mode 100644 splunk_eventgen/__init__.py diff --git a/splunk_eventgen/__init__.py b/splunk_eventgen/__init__.py new file mode 100644 index 00000000..e69de29b From 64d0935c4db731bc7d44ebbda534f7de886348b8 Mon Sep 17 00:00:00 2001 From: Li Wu Date: Fri, 1 Nov 2019 01:33:38 +0800 Subject: [PATCH 34/53] Fix oom caused by ujson (#336) * Fix oom caused by ujson * Fix test env --- Makefile | 4 ++-- dockerfiles/Dockerfile | 7 +++---- 2 files changed, 5 insertions(+), 6 deletions(-) diff --git a/Makefile b/Makefile index 19d32c6e..5a65cb0a 100644 --- a/Makefile +++ b/Makefile @@ -39,10 +39,10 @@ test_helper: docker cp . ${EVENTGEN_TEST_IMAGE}:$(shell pwd) @echo 'Verifying contents of pip.conf' - docker exec -i ${EVENTGEN_TEST_IMAGE} /bin/sh -c "cd $(shell pwd); pip install dist/splunk_eventgen*.tar.gz" + docker exec -i ${EVENTGEN_TEST_IMAGE} /bin/sh -c "cd $(shell pwd); pip3 install dist/splunk_eventgen*.tar.gz" @echo 'Installing test requirements' - docker exec -i ${EVENTGEN_TEST_IMAGE} /bin/sh -c "pip install --upgrade pip;pip install -r $(shell pwd)/requirements.txt" + docker exec -i ${EVENTGEN_TEST_IMAGE} /bin/sh -c "pip3 install --upgrade pip;pip3 install -r $(shell pwd)/requirements.txt;pip3 install git+https://github.com/esnme/ultrajson.git" @echo 'Make simulated app dir and sample for modular input test' docker exec -i ${EVENTGEN_TEST_IMAGE} /bin/sh -c "cd $(shell pwd); cd ../..; mkdir -p modinput_test_app/samples/" diff --git a/dockerfiles/Dockerfile b/dockerfiles/Dockerfile index 7a99dea3..9b631aa2 100644 --- a/dockerfiles/Dockerfile +++ b/dockerfiles/Dockerfile @@ -5,7 +5,7 @@ RUN apk --no-cache upgrade && \ python3 \ python3-dev \ python2-dev \ - py2-pip \ + py2-pip \ gcc \ libc-dev \ libffi-dev \ @@ -28,10 +28,9 @@ RUN apk --no-cache upgrade && \ mkdir -p /root/.ssh && \ chmod 0700 /root/.ssh && \ passwd -u root && \ - pip3 install git+git://github.com/esnme/ultrajson.git && \ # install dependencies of conduct2 used by perf - pip2 install filelock twisted requests queuelib ujson psutil crochet msgpack-python unidecode attrdict service_identity && \ - pip2 install git+git://github.com/esnme/ultrajson.git + pip2 install filelock twisted requests queuelib ujson psutil crochet msgpack-python unidecode attrdict service_identity && \ + pip2 install git+https://github.com/esnme/ultrajson.git COPY dockerfiles/sshd_config /etc/ssh/sshd_config COPY dockerfiles/entrypoint.sh /sbin/entrypoint.sh From 1ecce274b4bfcf1b378e4590d8c6ccef5ba7817c Mon Sep 17 00:00:00 2001 From: Guodong Wang Date: Fri, 1 Nov 2019 14:52:48 +0800 Subject: [PATCH 35/53] add release automation script (#335) * add release automation script --- docs/CHANGELOG.md | 143 ++++++++------------- release_tool/README.md | 15 +++ release_tool/prepare_release_branch.py | 170 +++++++++++++++++++++++++ 3 files changed, 239 insertions(+), 89 deletions(-) create mode 100644 release_tool/README.md create mode 100644 release_tool/prepare_release_branch.py diff --git a/docs/CHANGELOG.md b/docs/CHANGELOG.md index 30f02118..39bae45d 100644 --- a/docs/CHANGELOG.md +++ b/docs/CHANGELOG.md @@ -1,89 +1,54 @@ -6.5.0 -- Added metrics output mode -- Fixed regex token replacement issue -- Added test coverage information -- Increased functional test coverage -- Eventgen server complete revamp and standalone mode support -- Added contributor license -- Updated Dockerfile -- Added documentation -- Fixed bugs / stability / optimized speed - -6.4.0 -- Fixed exception log error -- Fixed CircleCI status badage error -- Fixed navigation error for app if installed with Splunk Stream -- Fixed generatorWorkers not working error -- Fixed interval error when end = 1 -- Fixed fileName in global stanza error -- Added 3rd party libs in SA-Eventgen App -- Added httpeventAllowFailureCount for httpevent -- Added 3rd party libs in license credit -- Disabled logging queue in multiprocess mode -- Changed implementation of extendIndex for better performance - -6.3.6 -- Added functional tests for jinja template and modular input feature -- Fixed default jinja template directory is not correctly resolved when sampleDir is set issue -- Fixed verbose flag not working in splunk_eventgen command line issue -- Fixed index, source, sourcetype are not correct when using splunkstream mode issue -- Fixed ssh to container issue -- Fixed perdayvolume without end setting error -- Updated documentation for better reading and remove unrelated part - -6.3.5 -- Added extendIndexes feature to support a list of indexes -- Fixed timer and token logic -- Changed end=-1 to continuously iterate without stopping -- Changed end=0 to not execute -- Added a linter for code quality -- Updated docs / docs format -- Added a suite of functional tests - -6.3.4: -- Documentation cleanup -- Jinja template bugfix in app -- Implementation of 'timeMultiple’ option -- Templates for bugs/feature requests -- Fixed Jinja test configuration stanzas -- Default behavior for 'count' edge cases - -6.3.3: -- Added performance metrics compared to Eventgen 5.x -- New config option for generation-time metrics: outputCounter -- Jinja template fixes -- Timestamp parsing fix -- Output queueing fix for outputMode splunkstream -- Count rater fixes, now supports indefinite generation - -6.3.2: -- Fixed verbosity bug -- Added documentation - -6.3.1: -- Fixed Eventgen Volume APIs -- Improved Eventgen Server Logging -- Corrected Eventgen Server and Controller conf syncing issue -- Adding verbosity options (ERROR, INFO, DEBUG) to Eventgen modinput -- Implemented future event generation support in replay mode -- Fixed Jinja template's missing default values -- Adjusted logging message levels for less verbosity -- Fixed event count off by 1 issue -- Fixed unnecessary empty data generators being created -- Updated dependency list - -6.3.0: -- Bug fixes for the customer issues -- Documentation upgrade -- Code refactoring for version unification -- Logging improvements - -6.2.1: -- Fixing SA-Eventgen Dashboard and log searching -- Improving internal logging and fixing splunkd logging issue -- Fixing timestamping in default generator -- Fixing custom plugin integration -- Fixing SA-Eventgen app settings -- Supporting Eventgen 5 backward compatibility with additional features -- Better modinput process management -- Minor Bugfixes with various customer cases +**7.0.0**: + +- Check the release note and download the package/source from [Here](https://github.com/splunk/eventgen/releases/tag/7.0.0) + +**6.5.2**: + +- Check the release note and download the package/source from [Here](https://github.com/splunk/eventgen/releases/tag/6.5.2) + + +**6.5.1**: + +- Check the release note and download the package/source from [Here](https://github.com/splunk/eventgen/releases/tag/6.5.1) + + +**6.5.0**: + +- Check the release note and download the package/source from [Here](https://github.com/splunk/eventgen/releases/tag/6.5.0) + +**6.4.0**: + +- Check the release note and download the package/source from [Here](https://github.com/splunk/eventgen/releases/tag/6.4.0) + +**6.3.6**: + +- Check the release note and download the package/source from [Here](https://github.com/splunk/eventgen/releases/tag/6.3.6) + +**6.3.5**: + +- Check the release note and download the package/source from [Here](https://github.com/splunk/eventgen/releases/tag/6.3.5) + +**6.3.4**: + +- Check the release note and download the package/source from [Here](https://github.com/splunk/eventgen/releases/tag/6.3.4) + +**6.3.3**: + +- Check the release note and download the package/source from [Here](https://github.com/splunk/eventgen/releases/tag/6.3.3) + +**6.3.2**: + +- Check the release note and download the package/source from [Here](https://github.com/splunk/eventgen/releases/tag/6.3.2) + +**6.3.1**: + +- Check the release note and download the package/source from [Here](https://github.com/splunk/eventgen/releases/tag/6.3.1) + +**6.3.0**: + +- Check the release note and download the package/source from [Here](https://github.com/splunk/eventgen/releases/tag/6.3.0) + +**6.2.1**: + +- Check the release note and download the package/source from [Here](https://github.com/splunk/eventgen/releases/tag/6.2.1) + diff --git a/release_tool/README.md b/release_tool/README.md new file mode 100644 index 00000000..9cbecfe7 --- /dev/null +++ b/release_tool/README.md @@ -0,0 +1,15 @@ +# Release tool + +Use script to bump the release verison and create the release PR to merge to develop branch. + +**Note: this script only works with python3.** + +- If you have generated your github access token, you can use the following command to bump versions and send PR automatically. + ```bash + python prepare_release_branch.py -v -n -a + ``` + +- If the access token is not given, this script only is only used to bump the release version and push the commit to remote repo. You need to go to github web page to create your PR manually. + ``` + python prepare_release_branch.py -v -n + ``` diff --git a/release_tool/prepare_release_branch.py b/release_tool/prepare_release_branch.py new file mode 100644 index 00000000..f2d9df77 --- /dev/null +++ b/release_tool/prepare_release_branch.py @@ -0,0 +1,170 @@ +import argparse +import os +import sys +import logging +import json +import re +import subprocess +import requests + +logging.getLogger().setLevel(logging.INFO) +root_repo_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) + + +def parse_args(): + def validate_version_str(version): + v_str = str(version).strip() + if not v_str: + raise argparse.ArgumentTypeError('verison str can not be emtpy.') + err_message = 'version string should be of format "major.minor.hotfix"' + numbers = v_str.split('.') + if len(numbers) != 3: + raise argparse.ArgumentTypeError(err_message) + for n in numbers: + valid = False + try: + v = int(n) + valid = (v>=0) + except: + valid = False + if not valid: + raise argparse.ArgumentTypeError(err_message) + return v_str + + def validate_token(token): + t = token.strip() + if not t: + raise argparse.ArgumentTypeError('token can not be empty') + return t + + parser = argparse.ArgumentParser( + 'prepare_release_branch.py', + description='eventgen release branch tool.\ncreate the release branch, set the right version and push the pull request.') + parser.add_argument('-v', '--verbose', default=False, action='store_true', help='enable the verbose logging') + parser.add_argument('-n', '--version_str', type=validate_version_str, required=True) + parser.add_argument('-a', '--token', help='your github access token.', default=None, type=validate_token) + return parser.parse_args(sys.argv[1:]) + +def setup_logging(verbose=None): + l = logging.DEBUG if verbose is True else logging.INFO + logging.getLogger().setLevel(l) + +def setup_env(): + ''' + by default, we use this hard code current working dir. + because curent working dir has impact about the child sh process. + we need to setup it before launching any process. + if there is concrete requirement about setting the current + working dir, we can change it to cmd arguemnt. + ''' + logging.debug(f'try to change current working directory to {root_repo_dir}') + os.chdir(root_repo_dir) + +def run_sh_cmd(args, exit_on_error=None): + should_exit_on_error = True if exit_on_error is None else exit_on_error + child = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + out, err = child.communicate() + outs = out.decode('utf-8') + errs = err.decode('utf-8') + if child.returncode == 0: + logging.debug(f'execute sh command {args} success.') + logging.debug(f'children output:\n{outs}') + return True + logging.error(f'execute sh cmd {args} fail.\nchildren output:\n{outs}\n{errs}') + if should_exit_on_error: + assert False, 'sh command fails.' + return False + +def get_release_branch_name(version_str): + v = version_str.replace('.', '_') + return f'release/{v}' + +def replace_version(ver): + ver_json_file = os.path.join(root_repo_dir, 'splunk_eventgen', 'version.json') + with open(ver_json_file, 'w') as fp: + json.dump({'version': ver}, fp) + app_conf = os.path.join(root_repo_dir, 'splunk_eventgen', 'splunk_app', 'default', 'app.conf') + app_conf_content = [] + with open(app_conf, 'r') as fp: + app_conf_content = fp.readlines() + app_pattern = re.compile(r'version\s*=') + with open(app_conf, 'w') as fp: + for line in app_conf_content: + lstr = line.strip() + if app_pattern.search(lstr): + fp.write(f'version = {ver}\n') + else: + fp.write(f'{lstr}\n') + logging.info(f'verison is replaced with {ver}.') + + +def update_changelog(ver): + changelog_file = os.path.join(root_repo_dir, 'docs', 'CHANGELOG.md') + content = None + with open(changelog_file, 'r') as fp: + content = fp.readlines() + new_content = f'**{ver}**:\n\n' + f'- Check the release note and download the package/source from [Here](https://github.com/splunk/eventgen/releases/tag/{ver})\n\n' + with open(changelog_file, 'w') as fp: + fp.write(new_content) + for l in content: + fp.write(l) + logging.info('CHANGELOG.md is updated.') + + +def commit_updated_files(ver): + ver_json_file = os.path.join('splunk_eventgen', 'version.json') + app_conf = os.path.join('splunk_eventgen', 'splunk_app', 'default', 'app.conf') + changelog = os.path.join('docs', 'CHANGELOG.md') + run_sh_cmd(['git', 'add', ver_json_file]) + run_sh_cmd(['git', 'add', app_conf]) + run_sh_cmd(['git', 'add', changelog]) + run_sh_cmd(['git', 'commit', '-m', f'update eventgen version to {ver}'], False) + logging.info('committed version files.') + +def create_pr(ver, token): + release_branch = get_release_branch_name(ver) + response = requests.post( + 'https://api.github.com/repos/splunk/eventgen/pulls', + json={'title': f'Release eventgen {ver}', 'head': release_branch, 'base': 'develop', 'body': + 'As the title'}, headers={ + 'Accept': 'application/vnd.github.full+json', + 'Content-Type': 'application/json', + 'Authorization': f'token {token}'}) + response.raise_for_status() + data = response.json() + pr_url = data['url'] + logging.info(f'Pull request is created:\n\t{pr_url}') + + +if __name__ == '__main__': + arg_values = parse_args() + if arg_values is None: + sys.exit(1) + setup_logging(arg_values.verbose) + setup_env() + + logging.info('checkout to the develop branch and pull the latest change...') + run_sh_cmd(['git', 'checkout', 'develop']) + run_sh_cmd(['git', 'pull']) + + logging.info('check out the release branch') + release_branch = get_release_branch_name(arg_values.version_str) + branch_exist = run_sh_cmd(['git','show-ref','--verify',f'refs/heads/{release_branch}'], False) + if not branch_exist: + run_sh_cmd(['git', 'checkout', '-b', release_branch]) + else: + run_sh_cmd(['git', 'checkout', release_branch]) + + replace_version(arg_values.version_str) + update_changelog(arg_values.version_str) + + commit_updated_files(arg_values.version_str) + + run_sh_cmd(['git', 'push', 'origin', release_branch]) + logging.info(f'release branch {release_branch} is pushed to remote repo.') + + if arg_values.token: + create_pr(arg_values.version_str, arg_values.token) + else: + pr_url = 'https://github.com/splunk/eventgen/compare' + logging.info('create pull reqeust manually by visiting this url:\n{pr_url}') From 34f06af9f6dc74c86b7114569e0bf2c15a56a512 Mon Sep 17 00:00:00 2001 From: Li Wu Date: Fri, 8 Nov 2019 10:56:07 +0800 Subject: [PATCH 36/53] Fix process leak and start/stop 500 issue (#344) * Fix process leak and start/stop 500 issue * Fix test case fail --- .../eventgen_core_object.py | 4 +- .../eventgen_server_api.py | 7 +- splunk_eventgen/eventgen_core.py | 66 ++++++++----------- splunk_eventgen/lib/eventgenoutput.py | 1 - splunk_eventgen/lib/eventgentimer.py | 1 + .../lib/plugins/output/syslogout.py | 1 + 6 files changed, 34 insertions(+), 46 deletions(-) diff --git a/splunk_eventgen/eventgen_api_server/eventgen_core_object.py b/splunk_eventgen/eventgen_api_server/eventgen_core_object.py index 87c89a26..49762c36 100644 --- a/splunk_eventgen/eventgen_api_server/eventgen_core_object.py +++ b/splunk_eventgen/eventgen_api_server/eventgen_core_object.py @@ -24,8 +24,7 @@ def check_and_configure_eventgen(self): self.logger.info("Configured Eventgen from {}".format(CUSTOM_CONFIG_PATH)) def refresh_eventgen_core_object(self): - self.eventgen_core_object.kill_processes() - self.eventgen_core_object = eventgen_core.EventGenerator(self._create_args()) + self.eventgen_core_object.stop(force_stop=True) self.configured = False self.configfile = None self.check_and_configure_eventgen() @@ -37,6 +36,7 @@ def _create_args(self): args.version = False args.backfill = None args.count = None + args.end = None args.devnull = False args.disableOutputQueue = False args.generators = None diff --git a/splunk_eventgen/eventgen_api_server/eventgen_server_api.py b/splunk_eventgen/eventgen_api_server/eventgen_server_api.py index 01589aab..c882bd3c 100644 --- a/splunk_eventgen/eventgen_api_server/eventgen_server_api.py +++ b/splunk_eventgen/eventgen_api_server/eventgen_server_api.py @@ -164,12 +164,7 @@ def http_post_start(): @bp.route('/stop', methods=['POST']) def http_post_stop(): try: - force_stop = False - try: - force_stop = True - except: - force_stop = False - response = self.stop(force_stop = force_stop) + response = self.stop(force_stop=True) self.eventgen.refresh_eventgen_core_object() return Response(json.dumps(response), mimetype='application/json', status=200) except Exception as e: diff --git a/splunk_eventgen/eventgen_core.py b/splunk_eventgen/eventgen_core.py index b3eff4d0..48a7aadf 100644 --- a/splunk_eventgen/eventgen_core.py +++ b/splunk_eventgen/eventgen_core.py @@ -8,7 +8,7 @@ import time from queue import Empty, Queue import signal -from threading import Thread +from threading import Thread, Event import multiprocessing from splunk_eventgen.lib.eventgenconfig import Config @@ -32,13 +32,14 @@ def __init__(self, args=None): localized .conf entries. :param args: __main__ parse_args() object. ''' - self.stopping = False + self.stop_request = Event() self.force_stop = False self.started = False self.completed = False self.config = None self.args = args - + self.workerPool = [] + self.manager = None self._setup_loggers(args=args) # attach to the logging queue self.logger.info("Logging Setup Complete.") @@ -94,9 +95,6 @@ def _load_config(self, configfile, **kwargs): else: generator_worker_count = self.config.generatorWorkers - # TODO: Probably should destroy pools better so processes are cleaned. - if self.args.multiprocess: - self.kill_processes() self._setup_pools(generator_worker_count) def _reload_plugins(self): @@ -192,7 +190,7 @@ def _create_generator_pool(self, workercount=20): ''' if self.args.multiprocess: self.manager = multiprocessing.Manager() - if self.config.disableLoggingQueue: + if self.config and self.config.disableLoggingQueue: self.loggingQueue = None else: # TODO crash caused by logging Thread https://github.com/splunk/eventgen/issues/217 @@ -236,6 +234,7 @@ def _create_generator_workers(self, workercount=20): )) self.workerPool.append(process) process.start() + self.logger.info("create process: {}".format(process.pid)) else: pass @@ -252,7 +251,7 @@ def _setup_loggers(self, args=None): self.logger.setLevel(logging.ERROR) def _worker_do_work(self, work_queue, logging_queue): - while not self.stopping: + while not self.stop_request.isSet(): try: item = work_queue.get(timeout=10) startTime = time.time() @@ -271,7 +270,7 @@ def _worker_do_work(self, work_queue, logging_queue): raise e def _generator_do_work(self, work_queue, logging_queue, output_counter=None): - while not self.stopping: + while not self.stop_request.isSet(): try: item = work_queue.get(timeout=10) startTime = time.time() @@ -326,7 +325,7 @@ def _proc_worker_do_work(work_queue, logging_queue, config, disable_logging): sys.exit(0) def logger_thread(self, loggingQueue): - while not self.stopping: + while not self.stop_request.isSet(): try: record = loggingQueue.get(timeout=10) logger.handle(record) @@ -420,7 +419,7 @@ def _initializePlugins(self, dirname, plugins, plugintype, name=None): return ret def start(self, join_after_start=True): - self.stopping = False + self.stop_request.clear() self.started = True self.config.stopping = False self.completed = False @@ -460,23 +459,19 @@ def join_process(self): raise e def stop(self, force_stop=False): - # empty the sample queue: - self.config.stopping = True - self.stopping = True + if hasattr(self.config, "stopping"): + self.config.stopping = True self.force_stop = force_stop + # set the thread event to stop threads + self.stop_request.set() - self.logger.info("All timers exited, joining generation queue until it's empty.") - if force_stop: - self.logger.info("Forcibly stopping Eventgen: Deleting workerQueue.") - del self.workerQueue - self._create_generator_pool() - self.workerQueue.join() # if we're in multiprocess, make sure we don't add more generators after the timers stopped. if self.args.multiprocess: if force_stop: self.kill_processes() else: - self.genconfig["stopping"] = True + if hasattr(self, "genconfig"): + self.genconfig["stopping"] = True for worker in self.workerPool: count = 0 # We wait for a minute until terminating the worker @@ -490,12 +485,9 @@ def stop(self, force_stop=False): time.sleep(2) count += 1 - self.logger.info("All generators working/exited, joining output queue until it's empty.") - if not self.args.multiprocess and not force_stop: - self.outputQueue.join() - self.logger.info("All items fully processed. Cleaning up internal processes.") self.started = False - self.stopping = False + # clear the thread event + self.stop_request.clear() def reload_conf(self, configfile): ''' @@ -540,14 +532,14 @@ def check_done(self): return self.sampleQueue.empty() and self.sampleQueue.unfinished_tasks <= 0 and self.workerQueue.empty() def kill_processes(self): - try: - if self.args.multiprocess: - for worker in self.workerPool: - try: - os.kill(int(worker.pid), signal.SIGKILL) - except: - continue - del self.outputQueue - self.manager.shutdown() - except: - pass + self.logger.info("Kill worker processes") + for worker in self.workerPool: + try: + self.logger.info("Kill worker process: {}".format(worker.pid)) + os.kill(int(worker.pid), signal.SIGKILL) + except Exception as e: + self.logger.ERROR(str(e)) + continue + self.workerPool = [] + if self.manager: + self.manager.shutdown() diff --git a/splunk_eventgen/lib/eventgenoutput.py b/splunk_eventgen/lib/eventgenoutput.py index b60e4f9a..4399989c 100644 --- a/splunk_eventgen/lib/eventgenoutput.py +++ b/splunk_eventgen/lib/eventgenoutput.py @@ -99,4 +99,3 @@ def flush(self, endOfInterval=False): tmp = None outputer.run() q = None - diff --git a/splunk_eventgen/lib/eventgentimer.py b/splunk_eventgen/lib/eventgentimer.py index 90af68b4..d374ee81 100644 --- a/splunk_eventgen/lib/eventgentimer.py +++ b/splunk_eventgen/lib/eventgentimer.py @@ -108,6 +108,7 @@ def real_run(self): # referenced in the config object, while, self.stopping will only stop this one. if self.config.stopping or self.stopping: end = True + continue count = self.rater.rate() # First run of the generator, see if we have any backfill work to do. if self.countdown <= 0: diff --git a/splunk_eventgen/lib/plugins/output/syslogout.py b/splunk_eventgen/lib/plugins/output/syslogout.py index 31267aa9..e63b269a 100644 --- a/splunk_eventgen/lib/plugins/output/syslogout.py +++ b/splunk_eventgen/lib/plugins/output/syslogout.py @@ -17,6 +17,7 @@ def filter(self, record): record.host = self.host return True + class SyslogOutOutputPlugin(OutputPlugin): useOutputQueue = True name = 'syslogout' From a9e56005dd1242baaf30511bce68eb92d5e90e41 Mon Sep 17 00:00:00 2001 From: Li Wu Date: Sat, 9 Nov 2019 05:32:54 +0800 Subject: [PATCH 37/53] Fix high system load issue (#345) --- splunk_eventgen/lib/eventgentoken.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/splunk_eventgen/lib/eventgentoken.py b/splunk_eventgen/lib/eventgentoken.py index e3351e05..433602e7 100644 --- a/splunk_eventgen/lib/eventgentoken.py +++ b/splunk_eventgen/lib/eventgentoken.py @@ -349,7 +349,7 @@ def _getReplacement(self, old=None, earliestTime=None, latestTime=None, s=None, except: logger.error("Could not parse json for '%s' in sample '%s'" % (listMatch.group(1), s.name)) return old - return random.SystemRandom().choice(value) + return random.choice(value) else: logger.error("Unknown replacement value '%s' for replacementType '%s'; will not replace" % From 61938251f0d1e239c10b40338419af1f55625ce9 Mon Sep 17 00:00:00 2001 From: Tony Lee Date: Mon, 11 Nov 2019 10:31:41 -0800 Subject: [PATCH 38/53] Adding an option (#340) * Adding an option * Adding ujson2 for python3 as well * minor improvements --- dockerfiles/Dockerfile | 4 +++- splunk_eventgen/eventgen_api_server/eventgen_server_api.py | 4 ++-- splunk_eventgen/eventgen_core.py | 3 ++- .../lib/plugins/generator/perdayvolumegenerator.py | 2 +- 4 files changed, 8 insertions(+), 5 deletions(-) diff --git a/dockerfiles/Dockerfile b/dockerfiles/Dockerfile index 9b631aa2..4a60899b 100644 --- a/dockerfiles/Dockerfile +++ b/dockerfiles/Dockerfile @@ -36,7 +36,9 @@ COPY dockerfiles/sshd_config /etc/ssh/sshd_config COPY dockerfiles/entrypoint.sh /sbin/entrypoint.sh COPY dist/* /root/splunk_eventgen.tgz RUN pip3 install /root/splunk_eventgen.tgz && \ - rm /root/splunk_eventgen.tgz + rm /root/splunk_eventgen.tgz && \ + pip3 uninstall ujson -y && \ + pip3 install git+https://github.com/esnme/ultrajson.git EXPOSE 2222 6379 9500 RUN chmod a+x /sbin/entrypoint.sh diff --git a/splunk_eventgen/eventgen_api_server/eventgen_server_api.py b/splunk_eventgen/eventgen_api_server/eventgen_server_api.py index c882bd3c..eab43b1d 100644 --- a/splunk_eventgen/eventgen_api_server/eventgen_server_api.py +++ b/splunk_eventgen/eventgen_api_server/eventgen_server_api.py @@ -545,7 +545,7 @@ def setup_http(self, data): del kv_pair['httpeventServers'] conf_dict['global']['threading'] = 'process' conf_dict['global']['httpeventMaxPayloadSize'] = '256000' - conf_dict['global']['outputMode'] = 'httpevent' + conf_dict['global']['outputMode'] = data.get("outputMode") if data.get("outputMode") else 'httpevent' conf_dict['global']['httpeventServers'] = {"servers": data.get("servers")} self.set_conf(conf_dict) else: @@ -615,6 +615,6 @@ def create_new_hec_key(hostname): del kv_pair['httpeventServers'] conf_dict['global']['threading'] = 'process' conf_dict['global']['httpeventMaxPayloadSize'] = '256000' - conf_dict['global']['outputMode'] = 'httpevent' + conf_dict['global']['outputMode'] = data.get("outputMode") if data.get("outputMode") else 'httpevent' conf_dict['global']['httpeventServers'] = {"servers": self.discovered_servers} self.set_conf(conf_dict) diff --git a/splunk_eventgen/eventgen_core.py b/splunk_eventgen/eventgen_core.py index 48a7aadf..48721951 100644 --- a/splunk_eventgen/eventgen_core.py +++ b/splunk_eventgen/eventgen_core.py @@ -313,8 +313,9 @@ def _proc_worker_do_work(work_queue, logging_queue, config, disable_logging): item._out.updateConfig(item.config) item.run() work_queue.task_done() + item.logger.info("Current Worker Stopping: {0}".format(stopping)) + item.logger = None stopping = genconfig['stopping'] - item.logger.debug("Current Worker Stopping: {0}".format(stopping)) except Empty: stopping = genconfig['stopping'] except Exception as e: diff --git a/splunk_eventgen/lib/plugins/generator/perdayvolumegenerator.py b/splunk_eventgen/lib/plugins/generator/perdayvolumegenerator.py index 8a7fdd5e..10bc8159 100644 --- a/splunk_eventgen/lib/plugins/generator/perdayvolumegenerator.py +++ b/splunk_eventgen/lib/plugins/generator/perdayvolumegenerator.py @@ -80,7 +80,7 @@ def gen(self, count, earliest, latest, samplename=None): (self._sample.name, self._sample.app, len(eventsDict))) # build the events and replace tokens - GeneratorPlugin.build_events(self, eventsDict, startTime, earliest, latest) + self.build_events(eventsDict, startTime, earliest, latest) def load(): From 9ffbcbc15e4e33697cc6f7ad8d4a1840d73219f1 Mon Sep 17 00:00:00 2001 From: Tony Lee Date: Tue, 12 Nov 2019 11:20:16 -0800 Subject: [PATCH 39/53] Scp plugin (#343) * intermediate code for scp_plugin * code clean up * Added doc * revert windbag * Cap * added test --- docs/REFERENCE.md | 23 ++- .../lib/plugins/generator/replay.py | 2 +- .../lib/plugins/output/httpevent_core.py | 2 +- splunk_eventgen/lib/plugins/output/scpout.py | 154 ++++++++++++++++++ tests/medium/plugins/test_scp_output.py | 32 ++++ .../medium_test/eventgen.conf.scpoutput | 14 ++ .../sample_eventgen_conf/scpout/eventgen.conf | 16 ++ .../scpout/splunk_cloud_platform_events.txt | 5 + .../windbag/eventgen.conf.windbag | 10 +- 9 files changed, 251 insertions(+), 7 deletions(-) create mode 100644 splunk_eventgen/lib/plugins/output/scpout.py create mode 100644 tests/medium/plugins/test_scp_output.py create mode 100644 tests/sample_eventgen_conf/medium_test/eventgen.conf.scpoutput create mode 100644 tests/sample_eventgen_conf/scpout/eventgen.conf create mode 100644 tests/sample_eventgen_conf/scpout/splunk_cloud_platform_events.txt diff --git a/docs/REFERENCE.md b/docs/REFERENCE.md index 2c9c8a76..ccd77a81 100644 --- a/docs/REFERENCE.md +++ b/docs/REFERENCE.md @@ -121,8 +121,9 @@ outputWorkers = * Generally if using TCP based outputs like splunkstream, more could be required * Defaults to 1 -outputMode = modinput | s2s | file | splunkstream | stdout | devnull | spool | httpevent | syslogout | tcpout | udpout | metric_httpevent +outputMode = scpout | modinput | s2s | file | splunkstream | stdout | devnull | spool | httpevent | syslogout | tcpout | udpout | metric_httpevent * Specifies how to output log data. Modinput is default. + * If setting scpout, should set scpEndPoint and scpAccessToken. scpClientId, scpClientSecret, and scpRetryNum are optional. * If setting spool, should set spoolDir * If setting file, should set fileName * If setting splunkstream, should set splunkHost, splunkPort, splunkMethod, @@ -132,6 +133,26 @@ outputMode = modinput | s2s | file | splunkstream | stdout | devnull | spool | h * If setting httpevent, should set httpeventServers * If setting metric_httpevent, should set httpeventServers and make sure your index is a splunk metric index +scpEndPoint = + * Should be a full url to the scp endpoint + +scpAccessToken = + * Should be a scp access token. Do not include "Bearer". + +scpClientId = + * Optional + * SCP client id that is used to renew the access token if it expires during the data generation + * If not supplied, will not renew the access token and data transmission might fail + +scpClientSecret = + * Optional + * SCP client secret that is used to renew the access token if it expires during the data generation + * If not supplied, will not renew the access token and data transmission might fail + +scpRetryNum = + * Optional and defaults to 0 + * Retry a failing data transmission batch + syslogDestinationHost = * Defaults to 127.0.0.1 diff --git a/splunk_eventgen/lib/plugins/generator/replay.py b/splunk_eventgen/lib/plugins/generator/replay.py index 7cb7092d..dc80b6d3 100644 --- a/splunk_eventgen/lib/plugins/generator/replay.py +++ b/splunk_eventgen/lib/plugins/generator/replay.py @@ -112,7 +112,7 @@ def gen(self, count, earliest, latest, samplename=None): continue # Refer to the last event to calculate the new backfill time - time_difference = datetime.timedelta(seconds=(current_event_timestamp - previous_event_timestamp) .total_seconds() * self._sample.timeMultiple) + time_difference = datetime.timedelta(seconds=(current_event_timestamp - previous_event_timestamp).total_seconds() * self._sample.timeMultiple) if self.backfill_time + time_difference >= self.current_time: sleep_time = time_difference - (self.current_time - self.backfill_time) diff --git a/splunk_eventgen/lib/plugins/output/httpevent_core.py b/splunk_eventgen/lib/plugins/output/httpevent_core.py index e54fb869..bd47abc7 100644 --- a/splunk_eventgen/lib/plugins/output/httpevent_core.py +++ b/splunk_eventgen/lib/plugins/output/httpevent_core.py @@ -176,7 +176,7 @@ def _sendHTTPEvents(self, payload): try: self._transmitEvents(stringpayload) totalbytessent += len(stringpayload) - currentreadsize = 0 + currentreadsize = targetlinesize stringpayload = targetline except Exception as e: logger.exception(str(e)) diff --git a/splunk_eventgen/lib/plugins/output/scpout.py b/splunk_eventgen/lib/plugins/output/scpout.py new file mode 100644 index 00000000..723d157c --- /dev/null +++ b/splunk_eventgen/lib/plugins/output/scpout.py @@ -0,0 +1,154 @@ +from splunk_eventgen.lib.outputplugin import OutputPlugin +from splunk_eventgen.lib.logging_config import logger + +import logging +import requests +import time +import sys +import os + +import requests +from requests import Session +from requests_futures.sessions import FuturesSession +from concurrent.futures import ThreadPoolExecutor + +try: + import ujson as json +except: + import json + +class NoSCPEndPoint(Exception): + def __init__(self, *args, **kwargs): + Exception.__init__(self, *args, **kwargs) + +class NoSCPAccessToken(Exception): + def __init__(self, *args, **kwargs): + Exception.__init__(self, *args, **kwargs) + +class SCPOutputPlugin(OutputPlugin): + useOutputQueue = False + name = 'scpout' + MAXQUEUELENGTH = 1000 + + def __init__(self, sample, output_counter=None): + OutputPlugin.__init__(self, sample, output_counter) + + self.scpHttpPayloadMax = 150000 # Documentation recommends 20KB to 200KB. Going with 150KB. + self.scpEndPoint = getattr(self._sample, "scpEndPoint", None) + self.scpAccessToken = getattr(self._sample, "scpAccessToken", None) + self.scpClientId = getattr(self._sample, 'scpClientId', '') + self.scpClientSecret = getattr(self._sample, 'scpClientSecret', '') + self.scpRetryNum = int(getattr(self._sample, 'scpRetryNum', 0)) # By default, retry num is 0 + + if not self.scpEndPoint: + raise NoSCPEndPoint("Please specify your REST endpoint for the SCP tenant") + + if not self.scpAccessToken: + raise NoSCPAccessToken("Please specify your REST endpoint access token for the SCP tenant") + + if self.scpClientId and self.scpClientSecret: + logger.info("Both scpClientId and scpClientSecret are supplied. We will renew the expired token using these credentials.") + self.scpRenewToken = True + else: + self.scpRenewToken = False + + self.header = { + "Authorization": f"Bearer {self.scpAccessToken}", + "Content-Type": "application/json" + } + + self.accessTokenExpired = False + self.tokenRenewEndPoint = "https://auth.scp.splunk.com/token" + self.tokenRenewBody = { + "client_id": self.scpClientId, + "client_secret": self.scpClientSecret, + "grant_type": "client_credentials" + } + + self._setup_REST_workers() + + def _setup_REST_workers(self, session=None, workers=10): + # disable any "requests" warnings + requests.packages.urllib3.disable_warnings() + # Bind passed in samples to the outputter. + if not session: + session = Session() + self.session = FuturesSession(session=session, executor=ThreadPoolExecutor(max_workers=workers)) + self.active_sessions = [] + + def flush(self, events): + for i in range(self.scpRetryNum + 1): + logger.debug(f"Sending data to the scp endpoint. Num:{i}") + self._sendHTTPEvents(events) + + if not self.checkResults(): + if self.accessTokenExpired and self.scpRenewToken: + self.renewAccessToken() + self.active_sessions = [] + else: + break + + def checkResults(self): + for session in self.active_sessions: + response = session.result() + if response.status_code == 401 and "Invalid or Expired Bearer Token" in response.text: + logger.error("scpAccessToken is invalid or expired") + self.accessTokenExpired = True + return False + elif response.status_code != 200: + logger.error(f"Data transmisison failed with {response.status_code} and {response.text}") + return False + logger.debug(f"Data transmission successful") + return True + + def renewAccessToken(self): + response = requests.post(self.tokenRenewEndPoint, data=self.tokenRenewBody, timeout=5) + if response.status_code == 200: + logger.info("Renewal of the access token succesful") + self.scpAccessToken = response.json()["access_token"] + setattr(self._sample, "scpAccessToken", self.scpAccessToken) + self.accessTokenExpired = False + else: + logger.error("Renewal of the access token failed") + + def _sendHTTPEvents(self, events): + currentPayloadSize = 0 + currentPayload = [] + try: + for event in events: + # Reformat the event to fit the scp request spec + # TODO: Move this logic to generator + try: + event["body"] = event.pop("_raw") + event["timestamp"] = int(event.pop("_time") * 1000) + event.pop("index") + if "attributes" not in event: + event["attributes"] = {} + event["attributes"]["hostRegex"] = event.pop("hostRegex") + except: + pass + + targetline = json.dumps(event) + targetlinesize = len(targetline) + + # Continue building a current payload if the payload is less than the max size + if (currentPayloadSize + targetlinesize) < self.scpHttpPayloadMax: + currentPayload.append(event) + currentPayloadSize += targetlinesize + else: + self.active_sessions.append(self.session.post(url=self.scpEndPoint, data=json.dumps(currentPayload), headers=self.header, verify=False)) + currentPayloadSize = targetlinesize + currentPayload = [event] + + # Final flush of the leftover events + if currentPayloadSize > 0: + self.active_sessions.append(self.session.post(url=self.scpEndPoint, data=json.dumps(currentPayload), headers=self.header, verify=False)) + + except Exception as e: + logger.exception(str(e)) + raise e + + +def load(): + """Returns an instance of the plugin""" + return SCPOutputPlugin \ No newline at end of file diff --git a/tests/medium/plugins/test_scp_output.py b/tests/medium/plugins/test_scp_output.py new file mode 100644 index 00000000..9540bff6 --- /dev/null +++ b/tests/medium/plugins/test_scp_output.py @@ -0,0 +1,32 @@ +#!/usr/bin/env python3 +# encoding: utf-8 + +import os +import sys +import requests + +from mock import MagicMock, patch + +from splunk_eventgen.__main__ import parse_args +from splunk_eventgen.eventgen_core import EventGenerator +from splunk_eventgen.lib.plugins.output.scpout import SCPOutputPlugin + +FILE_DIR = os.path.dirname(os.path.abspath(__file__)) + + +class TestSCPOutputPlugin(object): + def test_output_data_to_scp(self): + configfile = "tests/sample_eventgen_conf/medium_test/eventgen.conf.scpoutput" + testargs = ["eventgen", "generate", configfile] + with patch.object(sys, 'argv', testargs): + pargs = parse_args() + assert pargs.subcommand == 'generate' + assert pargs.configfile == configfile + eventgen = EventGenerator(args=pargs) + with patch('requests_futures.sessions.FuturesSession.post') as mock_requests: + sample = MagicMock() + scpoutput = SCPOutputPlugin(sample) + + eventgen.start() + scpoutput.session.post.assert_called() + assert scpoutput.session.post.call_count == 1 diff --git a/tests/sample_eventgen_conf/medium_test/eventgen.conf.scpoutput b/tests/sample_eventgen_conf/medium_test/eventgen.conf.scpoutput new file mode 100644 index 00000000..6a5d08a1 --- /dev/null +++ b/tests/sample_eventgen_conf/medium_test/eventgen.conf.scpoutput @@ -0,0 +1,14 @@ +[windbag] +generator = windbag +earliest = -3s +latest = now +interval = 3 +count = 5 +end = 1 +outputMode = scpout +host = eventgen_scp_plugin +source = scp_plugin_test +sourcetype = scp_plugin_test_type + +scpEndPoint = http://127.0.0.1 +scpAccessToken = testToken diff --git a/tests/sample_eventgen_conf/scpout/eventgen.conf b/tests/sample_eventgen_conf/scpout/eventgen.conf new file mode 100644 index 00000000..44e069f7 --- /dev/null +++ b/tests/sample_eventgen_conf/scpout/eventgen.conf @@ -0,0 +1,16 @@ +[splunk_cloud_platform_events.txt] +sampleDir = . +interval = 1 +mode = replay +end = 1 +outputMode = scpout +host = eventgen_scp_plugin +source = scp_plugin_test +sourcetype = scp_plugin_test_type + +scpEndPoint = +scpAccessToken = + +token.0.token = \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2} +token.0.replacementType = replaytimestamp +token.0.replacement = %Y-%m-%d %H:%M:%S \ No newline at end of file diff --git a/tests/sample_eventgen_conf/scpout/splunk_cloud_platform_events.txt b/tests/sample_eventgen_conf/scpout/splunk_cloud_platform_events.txt new file mode 100644 index 00000000..5a3a8f05 --- /dev/null +++ b/tests/sample_eventgen_conf/scpout/splunk_cloud_platform_events.txt @@ -0,0 +1,5 @@ +2014-01-04 20:00:00 Event1 happened +2014-01-04 20:00:01 Event2 happened +2014-01-04 20:00:03 Event3 happened +2014-01-04 20:00:05 Event4 happened user bought @@item +2014-01-04 20:00:10 Event5 happened @@item \ No newline at end of file diff --git a/tests/sample_eventgen_conf/windbag/eventgen.conf.windbag b/tests/sample_eventgen_conf/windbag/eventgen.conf.windbag index b6e4292e..1a7a8c98 100644 --- a/tests/sample_eventgen_conf/windbag/eventgen.conf.windbag +++ b/tests/sample_eventgen_conf/windbag/eventgen.conf.windbag @@ -1,8 +1,10 @@ [windbag] generator = windbag -earliest = -3s -latest = now interval = 3 -count = 10 +count = 5 end = 3 -outputMode = stdout \ No newline at end of file +outputMode = stdout + +token.0.token = \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2} +token.0.replacementType = replaytimestamp +token.0.replacement = %Y-%m-%d %H:%M:%S \ No newline at end of file From 1b695d8df9c962fb6ec9c43aff2c079645a23e50 Mon Sep 17 00:00:00 2001 From: Tony Lee Date: Tue, 12 Nov 2019 18:59:26 -0800 Subject: [PATCH 40/53] revert (#347) --- dockerfiles/Dockerfile | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/dockerfiles/Dockerfile b/dockerfiles/Dockerfile index 4a60899b..9b631aa2 100644 --- a/dockerfiles/Dockerfile +++ b/dockerfiles/Dockerfile @@ -36,9 +36,7 @@ COPY dockerfiles/sshd_config /etc/ssh/sshd_config COPY dockerfiles/entrypoint.sh /sbin/entrypoint.sh COPY dist/* /root/splunk_eventgen.tgz RUN pip3 install /root/splunk_eventgen.tgz && \ - rm /root/splunk_eventgen.tgz && \ - pip3 uninstall ujson -y && \ - pip3 install git+https://github.com/esnme/ultrajson.git + rm /root/splunk_eventgen.tgz EXPOSE 2222 6379 9500 RUN chmod a+x /sbin/entrypoint.sh From fbe8b257cd91ba422ee694c99d5e8c3e734928f0 Mon Sep 17 00:00:00 2001 From: Tony Lee Date: Wed, 13 Nov 2019 18:03:11 -0800 Subject: [PATCH 41/53] changing name (#349) --- docs/REFERENCE.md | 22 +++---- .../plugins/output/{scpout.py => scsout.py} | 64 +++++++++---------- ...{test_scp_output.py => test_scs_output.py} | 14 ++-- .../medium_test/eventgen.conf.scpoutput | 14 ---- .../medium_test/eventgen.conf.scsoutput | 14 ++++ .../{scpout => scsout}/eventgen.conf | 12 ++-- .../splunk_cloud_platform_events.txt | 0 7 files changed, 70 insertions(+), 70 deletions(-) rename splunk_eventgen/lib/plugins/output/{scpout.py => scsout.py} (71%) rename tests/medium/plugins/{test_scp_output.py => test_scs_output.py} (71%) delete mode 100644 tests/sample_eventgen_conf/medium_test/eventgen.conf.scpoutput create mode 100644 tests/sample_eventgen_conf/medium_test/eventgen.conf.scsoutput rename tests/sample_eventgen_conf/{scpout => scsout}/eventgen.conf (59%) rename tests/sample_eventgen_conf/{scpout => scsout}/splunk_cloud_platform_events.txt (100%) diff --git a/docs/REFERENCE.md b/docs/REFERENCE.md index ccd77a81..4bdcda6e 100644 --- a/docs/REFERENCE.md +++ b/docs/REFERENCE.md @@ -121,9 +121,9 @@ outputWorkers = * Generally if using TCP based outputs like splunkstream, more could be required * Defaults to 1 -outputMode = scpout | modinput | s2s | file | splunkstream | stdout | devnull | spool | httpevent | syslogout | tcpout | udpout | metric_httpevent +outputMode = scsout | modinput | s2s | file | splunkstream | stdout | devnull | spool | httpevent | syslogout | tcpout | udpout | metric_httpevent * Specifies how to output log data. Modinput is default. - * If setting scpout, should set scpEndPoint and scpAccessToken. scpClientId, scpClientSecret, and scpRetryNum are optional. + * If setting scsout, should set scsEndPoint and scsAccessToken. scsClientId, scsClientSecret, and scsRetryNum are optional. * If setting spool, should set spoolDir * If setting file, should set fileName * If setting splunkstream, should set splunkHost, splunkPort, splunkMethod, @@ -133,23 +133,23 @@ outputMode = scpout | modinput | s2s | file | splunkstream | stdout | devnull | * If setting httpevent, should set httpeventServers * If setting metric_httpevent, should set httpeventServers and make sure your index is a splunk metric index -scpEndPoint = - * Should be a full url to the scp endpoint +scsEndPoint = + * Should be a full url to the scs endpoint -scpAccessToken = - * Should be a scp access token. Do not include "Bearer". +scsAccessToken = + * Should be a scs access token. Do not include "Bearer". -scpClientId = +scsClientId = * Optional - * SCP client id that is used to renew the access token if it expires during the data generation + * SCS client id that is used to renew the access token if it expires during the data generation * If not supplied, will not renew the access token and data transmission might fail -scpClientSecret = +scsClientSecret = * Optional - * SCP client secret that is used to renew the access token if it expires during the data generation + * SCS client secret that is used to renew the access token if it expires during the data generation * If not supplied, will not renew the access token and data transmission might fail -scpRetryNum = +scsRetryNum = * Optional and defaults to 0 * Retry a failing data transmission batch diff --git a/splunk_eventgen/lib/plugins/output/scpout.py b/splunk_eventgen/lib/plugins/output/scsout.py similarity index 71% rename from splunk_eventgen/lib/plugins/output/scpout.py rename to splunk_eventgen/lib/plugins/output/scsout.py index 723d157c..8adaa853 100644 --- a/splunk_eventgen/lib/plugins/output/scpout.py +++ b/splunk_eventgen/lib/plugins/output/scsout.py @@ -17,51 +17,51 @@ except: import json -class NoSCPEndPoint(Exception): +class NoSCSEndPoint(Exception): def __init__(self, *args, **kwargs): Exception.__init__(self, *args, **kwargs) -class NoSCPAccessToken(Exception): +class NoSCSAccessToken(Exception): def __init__(self, *args, **kwargs): Exception.__init__(self, *args, **kwargs) -class SCPOutputPlugin(OutputPlugin): +class SCSOutputPlugin(OutputPlugin): useOutputQueue = False - name = 'scpout' + name = 'scsout' MAXQUEUELENGTH = 1000 def __init__(self, sample, output_counter=None): OutputPlugin.__init__(self, sample, output_counter) - self.scpHttpPayloadMax = 150000 # Documentation recommends 20KB to 200KB. Going with 150KB. - self.scpEndPoint = getattr(self._sample, "scpEndPoint", None) - self.scpAccessToken = getattr(self._sample, "scpAccessToken", None) - self.scpClientId = getattr(self._sample, 'scpClientId', '') - self.scpClientSecret = getattr(self._sample, 'scpClientSecret', '') - self.scpRetryNum = int(getattr(self._sample, 'scpRetryNum', 0)) # By default, retry num is 0 + self.scsHttpPayloadMax = 150000 # Documentation recommends 20KB to 200KB. Going with 150KB. + self.scsEndPoint = getattr(self._sample, "scsEndPoint", None) + self.scsAccessToken = getattr(self._sample, "scsAccessToken", None) + self.scsClientId = getattr(self._sample, 'scsClientId', '') + self.scsClientSecret = getattr(self._sample, 'scsClientSecret', '') + self.scsRetryNum = int(getattr(self._sample, 'scsRetryNum', 0)) # By default, retry num is 0 - if not self.scpEndPoint: - raise NoSCPEndPoint("Please specify your REST endpoint for the SCP tenant") + if not self.scsEndPoint: + raise NoSCSEndPoint("Please specify your REST endpoint for the SCS tenant") - if not self.scpAccessToken: - raise NoSCPAccessToken("Please specify your REST endpoint access token for the SCP tenant") + if not self.scsAccessToken: + raise NoSCSAccessToken("Please specify your REST endpoint access token for the SCS tenant") - if self.scpClientId and self.scpClientSecret: - logger.info("Both scpClientId and scpClientSecret are supplied. We will renew the expired token using these credentials.") - self.scpRenewToken = True + if self.scsClientId and self.scsClientSecret: + logger.info("Both scsClientId and scsClientSecret are supplied. We will renew the expired token using these credentials.") + self.scsRenewToken = True else: - self.scpRenewToken = False + self.scsRenewToken = False self.header = { - "Authorization": f"Bearer {self.scpAccessToken}", + "Authorization": f"Bearer {self.scsAccessToken}", "Content-Type": "application/json" } self.accessTokenExpired = False self.tokenRenewEndPoint = "https://auth.scp.splunk.com/token" self.tokenRenewBody = { - "client_id": self.scpClientId, - "client_secret": self.scpClientSecret, + "client_id": self.scsClientId, + "client_secret": self.scsClientSecret, "grant_type": "client_credentials" } @@ -77,12 +77,12 @@ def _setup_REST_workers(self, session=None, workers=10): self.active_sessions = [] def flush(self, events): - for i in range(self.scpRetryNum + 1): - logger.debug(f"Sending data to the scp endpoint. Num:{i}") + for i in range(self.scsRetryNum + 1): + logger.debug(f"Sending data to the scs endpoint. Num:{i}") self._sendHTTPEvents(events) if not self.checkResults(): - if self.accessTokenExpired and self.scpRenewToken: + if self.accessTokenExpired and self.scsRenewToken: self.renewAccessToken() self.active_sessions = [] else: @@ -92,7 +92,7 @@ def checkResults(self): for session in self.active_sessions: response = session.result() if response.status_code == 401 and "Invalid or Expired Bearer Token" in response.text: - logger.error("scpAccessToken is invalid or expired") + logger.error("scsAccessToken is invalid or expired") self.accessTokenExpired = True return False elif response.status_code != 200: @@ -105,8 +105,8 @@ def renewAccessToken(self): response = requests.post(self.tokenRenewEndPoint, data=self.tokenRenewBody, timeout=5) if response.status_code == 200: logger.info("Renewal of the access token succesful") - self.scpAccessToken = response.json()["access_token"] - setattr(self._sample, "scpAccessToken", self.scpAccessToken) + self.scsAccessToken = response.json()["access_token"] + setattr(self._sample, "scsAccessToken", self.scsAccessToken) self.accessTokenExpired = False else: logger.error("Renewal of the access token failed") @@ -116,7 +116,7 @@ def _sendHTTPEvents(self, events): currentPayload = [] try: for event in events: - # Reformat the event to fit the scp request spec + # Reformat the event to fit the scs request spec # TODO: Move this logic to generator try: event["body"] = event.pop("_raw") @@ -132,17 +132,17 @@ def _sendHTTPEvents(self, events): targetlinesize = len(targetline) # Continue building a current payload if the payload is less than the max size - if (currentPayloadSize + targetlinesize) < self.scpHttpPayloadMax: + if (currentPayloadSize + targetlinesize) < self.scsHttpPayloadMax: currentPayload.append(event) currentPayloadSize += targetlinesize else: - self.active_sessions.append(self.session.post(url=self.scpEndPoint, data=json.dumps(currentPayload), headers=self.header, verify=False)) + self.active_sessions.append(self.session.post(url=self.scsEndPoint, data=json.dumps(currentPayload), headers=self.header, verify=False)) currentPayloadSize = targetlinesize currentPayload = [event] # Final flush of the leftover events if currentPayloadSize > 0: - self.active_sessions.append(self.session.post(url=self.scpEndPoint, data=json.dumps(currentPayload), headers=self.header, verify=False)) + self.active_sessions.append(self.session.post(url=self.scsEndPoint, data=json.dumps(currentPayload), headers=self.header, verify=False)) except Exception as e: logger.exception(str(e)) @@ -151,4 +151,4 @@ def _sendHTTPEvents(self, events): def load(): """Returns an instance of the plugin""" - return SCPOutputPlugin \ No newline at end of file + return SCSOutputPlugin \ No newline at end of file diff --git a/tests/medium/plugins/test_scp_output.py b/tests/medium/plugins/test_scs_output.py similarity index 71% rename from tests/medium/plugins/test_scp_output.py rename to tests/medium/plugins/test_scs_output.py index 9540bff6..46d899cc 100644 --- a/tests/medium/plugins/test_scp_output.py +++ b/tests/medium/plugins/test_scs_output.py @@ -9,14 +9,14 @@ from splunk_eventgen.__main__ import parse_args from splunk_eventgen.eventgen_core import EventGenerator -from splunk_eventgen.lib.plugins.output.scpout import SCPOutputPlugin +from splunk_eventgen.lib.plugins.output.scsout import SCSOutputPlugin FILE_DIR = os.path.dirname(os.path.abspath(__file__)) -class TestSCPOutputPlugin(object): - def test_output_data_to_scp(self): - configfile = "tests/sample_eventgen_conf/medium_test/eventgen.conf.scpoutput" +class TestSCSOutputPlugin(object): + def test_output_data_to_scs(self): + configfile = "tests/sample_eventgen_conf/medium_test/eventgen.conf.scsoutput" testargs = ["eventgen", "generate", configfile] with patch.object(sys, 'argv', testargs): pargs = parse_args() @@ -25,8 +25,8 @@ def test_output_data_to_scp(self): eventgen = EventGenerator(args=pargs) with patch('requests_futures.sessions.FuturesSession.post') as mock_requests: sample = MagicMock() - scpoutput = SCPOutputPlugin(sample) + scsoutput = SCSOutputPlugin(sample) eventgen.start() - scpoutput.session.post.assert_called() - assert scpoutput.session.post.call_count == 1 + scsoutput.session.post.assert_called() + assert scsoutput.session.post.call_count == 1 diff --git a/tests/sample_eventgen_conf/medium_test/eventgen.conf.scpoutput b/tests/sample_eventgen_conf/medium_test/eventgen.conf.scpoutput deleted file mode 100644 index 6a5d08a1..00000000 --- a/tests/sample_eventgen_conf/medium_test/eventgen.conf.scpoutput +++ /dev/null @@ -1,14 +0,0 @@ -[windbag] -generator = windbag -earliest = -3s -latest = now -interval = 3 -count = 5 -end = 1 -outputMode = scpout -host = eventgen_scp_plugin -source = scp_plugin_test -sourcetype = scp_plugin_test_type - -scpEndPoint = http://127.0.0.1 -scpAccessToken = testToken diff --git a/tests/sample_eventgen_conf/medium_test/eventgen.conf.scsoutput b/tests/sample_eventgen_conf/medium_test/eventgen.conf.scsoutput new file mode 100644 index 00000000..79dda19e --- /dev/null +++ b/tests/sample_eventgen_conf/medium_test/eventgen.conf.scsoutput @@ -0,0 +1,14 @@ +[windbag] +generator = windbag +earliest = -3s +latest = now +interval = 3 +count = 5 +end = 1 +outputMode = scsout +host = eventgen_scs_plugin +source = scs_plugin_test +sourcetype = scs_plugin_test_type + +scsEndPoint = http://127.0.0.1 +scsAccessToken = testToken diff --git a/tests/sample_eventgen_conf/scpout/eventgen.conf b/tests/sample_eventgen_conf/scsout/eventgen.conf similarity index 59% rename from tests/sample_eventgen_conf/scpout/eventgen.conf rename to tests/sample_eventgen_conf/scsout/eventgen.conf index 44e069f7..d16ec6d5 100644 --- a/tests/sample_eventgen_conf/scpout/eventgen.conf +++ b/tests/sample_eventgen_conf/scsout/eventgen.conf @@ -3,13 +3,13 @@ sampleDir = . interval = 1 mode = replay end = 1 -outputMode = scpout -host = eventgen_scp_plugin -source = scp_plugin_test -sourcetype = scp_plugin_test_type +outputMode = scsout +host = eventgen_scs_plugin +source = scs_plugin_test +sourcetype = scs_plugin_test_type -scpEndPoint = -scpAccessToken = +scsEndPoint = +scsAccessToken = token.0.token = \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2} token.0.replacementType = replaytimestamp diff --git a/tests/sample_eventgen_conf/scpout/splunk_cloud_platform_events.txt b/tests/sample_eventgen_conf/scsout/splunk_cloud_platform_events.txt similarity index 100% rename from tests/sample_eventgen_conf/scpout/splunk_cloud_platform_events.txt rename to tests/sample_eventgen_conf/scsout/splunk_cloud_platform_events.txt From 9730801133aa1dd15c5f8c73d359e3c68a249d9d Mon Sep 17 00:00:00 2001 From: Tony Lee Date: Wed, 20 Nov 2019 10:07:21 -0800 Subject: [PATCH 42/53] Change (#350) --- splunk_eventgen/lib/plugins/output/scsout.py | 46 ++++++++++++-------- 1 file changed, 29 insertions(+), 17 deletions(-) diff --git a/splunk_eventgen/lib/plugins/output/scsout.py b/splunk_eventgen/lib/plugins/output/scsout.py index 8adaa853..6a66c367 100644 --- a/splunk_eventgen/lib/plugins/output/scsout.py +++ b/splunk_eventgen/lib/plugins/output/scsout.py @@ -40,18 +40,42 @@ def __init__(self, sample, output_counter=None): self.scsClientSecret = getattr(self._sample, 'scsClientSecret', '') self.scsRetryNum = int(getattr(self._sample, 'scsRetryNum', 0)) # By default, retry num is 0 + self._setup_REST_workers() + + def _setup_REST_workers(self, session=None, workers=10): + # disable any "requests" warnings + requests.packages.urllib3.disable_warnings() + # Bind passed in samples to the outputter. + if not session: + session = Session() + self.session = FuturesSession(session=session, executor=ThreadPoolExecutor(max_workers=workers)) + self.active_sessions = [] + + def flush(self, events): if not self.scsEndPoint: - raise NoSCSEndPoint("Please specify your REST endpoint for the SCS tenant") + if getattr(self.config, 'scsEndPoint', None): + self.scsEndPoint = self.config.scsEndPoint + else: + raise NoSCSEndPoint("Please specify your REST endpoint for the SCS tenant") if not self.scsAccessToken: - raise NoSCSAccessToken("Please specify your REST endpoint access token for the SCS tenant") - + if getattr(self.config, 'scsAccessToken', None): + self.scsAccessToken = self.config.scsAccessToken + else: + raise NoSCSAccessToken("Please specify your REST endpoint access token for the SCS tenant") + if self.scsClientId and self.scsClientSecret: logger.info("Both scsClientId and scsClientSecret are supplied. We will renew the expired token using these credentials.") self.scsRenewToken = True else: - self.scsRenewToken = False - + if getattr(self.config, 'scsClientId', None) and getattr(self.config, 'scsClientSecret', None): + self.scsClientId = self.config.scsClientId + self.scsClientSecret = self.config.scsClientSecret + logger.info("Both scsClientId and scsClientSecret are supplied. We will renew the expired token using these credentials.") + self.scsRenewToken = True + else: + self.scsRenewToken = False + self.header = { "Authorization": f"Bearer {self.scsAccessToken}", "Content-Type": "application/json" @@ -65,18 +89,6 @@ def __init__(self, sample, output_counter=None): "grant_type": "client_credentials" } - self._setup_REST_workers() - - def _setup_REST_workers(self, session=None, workers=10): - # disable any "requests" warnings - requests.packages.urllib3.disable_warnings() - # Bind passed in samples to the outputter. - if not session: - session = Session() - self.session = FuturesSession(session=session, executor=ThreadPoolExecutor(max_workers=workers)) - self.active_sessions = [] - - def flush(self, events): for i in range(self.scsRetryNum + 1): logger.debug(f"Sending data to the scs endpoint. Num:{i}") self._sendHTTPEvents(events) From 775b1a8c76b153243b3c7f3d589c0799f068c2b7 Mon Sep 17 00:00:00 2001 From: Jack Meixensperger Date: Tue, 3 Dec 2019 15:54:14 -0800 Subject: [PATCH 43/53] change token type (#351) --- splunk_eventgen/README/eventgen.conf.tutorial1 | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/splunk_eventgen/README/eventgen.conf.tutorial1 b/splunk_eventgen/README/eventgen.conf.tutorial1 index 282d978e..4ea72bcf 100644 --- a/splunk_eventgen/README/eventgen.conf.tutorial1 +++ b/splunk_eventgen/README/eventgen.conf.tutorial1 @@ -5,21 +5,21 @@ timeMultiple = 2 outputMode = stdout token.0.token = \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2},\d{3,6} -token.0.replacementType = timestamp +token.0.replacementType = replaytimestamp token.0.replacement = %Y-%m-%d %H:%M:%S,%f token.1.token = \d{2}-\d{2}-\d{4} \d{2}:\d{2}:\d{2}.\d{3,6} -token.1.replacementType = timestamp +token.1.replacementType = replaytimestamp token.1.replacement = %m-%d-%Y %H:%M:%S.%f token.2.token = \d{2}/\w{3}/\d{4}:\d{2}:\d{2}:\d{2}.\d{3,6} -token.2.replacementType = timestamp +token.2.replacementType = replaytimestamp token.2.replacement = %d/%b/%Y:%H:%M:%S.%f token.3.token = \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2} -token.3.replacementType = timestamp +token.3.replacementType = replaytimestamp token.3.replacement = %Y-%m-%d %H:%M:%S token.4.token = \d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2} -token.4.replacementType = timestamp +token.4.replacementType = replaytimestamp token.4.replacement = %Y-%m-%dT%H:%M:%S From 20603c19cfb35d4e014328810253ec6d8e87f349 Mon Sep 17 00:00:00 2001 From: Li Wu Date: Tue, 17 Dec 2019 10:21:25 +0800 Subject: [PATCH 44/53] Add multithread support (#353) * Add multithread support * Fix typo --- splunk_eventgen/__main__.py | 3 +++ .../eventgen_api_server/eventgen_core_object.py | 8 ++++---- splunk_eventgen/eventgen_api_server/eventgen_server.py | 6 +++--- 3 files changed, 10 insertions(+), 7 deletions(-) diff --git a/splunk_eventgen/__main__.py b/splunk_eventgen/__main__.py index ce66a6d0..9a43bc41 100644 --- a/splunk_eventgen/__main__.py +++ b/splunk_eventgen/__main__.py @@ -76,6 +76,7 @@ def parse_args(): service_subparser.add_argument("--redis-host", type=str, default='127.0.0.1', help="Redis Host") service_subparser.add_argument("--redis-port", type=str, default='6379', help="Redis Port") service_subparser.add_argument("--web-server-port", type=str, default='9500', help="Port you want to run a web server on") + service_subparser.add_argument("--multithread", action="store_true", help="Use multi-thread instead of multi-process") # Help subparser # NOTE: Keep this at the end so we can use the subparser_dict.keys() to display valid commands help_subparser = subparsers.add_parser('help', help="Display usage on a subcommand") @@ -202,6 +203,8 @@ def gather_env_vars(args): env_vars["REDIS_HOST"] = os.environ.get("REDIS_HOST", args.redis_host) env_vars["REDIS_PORT"] = os.environ.get("REDIS_PORT", args.redis_port) env_vars["WEB_SERVER_PORT"] = os.environ.get("WEB_SERVER_PORT", args.web_server_port) + if "multithread" in args: + env_vars["multithread"] = args.multithread return env_vars diff --git a/splunk_eventgen/eventgen_api_server/eventgen_core_object.py b/splunk_eventgen/eventgen_api_server/eventgen_core_object.py index 49762c36..9b0eb6a1 100644 --- a/splunk_eventgen/eventgen_api_server/eventgen_core_object.py +++ b/splunk_eventgen/eventgen_api_server/eventgen_core_object.py @@ -9,9 +9,9 @@ class EventgenCoreObject: - def __init__(self): + def __init__(self, **kargs): self.logger = logging.getLogger('eventgen_server') - self.eventgen_core_object = eventgen_core.EventGenerator(self._create_args()) + self.eventgen_core_object = eventgen_core.EventGenerator(self._create_args(**kargs)) self.configured = False self.configfile = None self.check_and_configure_eventgen() @@ -30,7 +30,7 @@ def refresh_eventgen_core_object(self): self.check_and_configure_eventgen() self.logger.info("Refreshed the eventgen core object") - def _create_args(self): + def _create_args(self, **kargs): args = argparse.Namespace() args.daemon = False args.version = False @@ -43,7 +43,7 @@ def _create_args(self): args.interval = None args.keepoutput = False args.modinput = False - args.multiprocess = True + args.multiprocess = False if kargs.get("multithread") else True args.outputters = None args.profiler = False args.sample = None diff --git a/splunk_eventgen/eventgen_api_server/eventgen_server.py b/splunk_eventgen/eventgen_api_server/eventgen_server.py index f95493c8..7146694a 100644 --- a/splunk_eventgen/eventgen_api_server/eventgen_server.py +++ b/splunk_eventgen/eventgen_api_server/eventgen_server.py @@ -8,10 +8,10 @@ class EventgenServer: - def __init__(self, *args, **kwargs): - self.eventgen = eventgen_core_object.EventgenCoreObject() - self.mode = kwargs.get('mode', 'standalone') + def __init__(self, *args, **kwargs): self.env_vars = kwargs.get('env_vars') + self.eventgen = eventgen_core_object.EventgenCoreObject(mutithread=self.env_vars.get("multithread", False)) + self.mode = kwargs.get('mode', 'standalone') self.host = socket.gethostname() self.role = 'server' From 38a201a8d81bd5cf66badc461d93c45a90edfe02 Mon Sep 17 00:00:00 2001 From: Li Wu Date: Fri, 20 Dec 2019 06:29:22 +0800 Subject: [PATCH 45/53] Revert coverage version to 4.5.4 (#355) --- requirements.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/requirements.txt b/requirements.txt index fd0bfe6a..021af017 100644 --- a/requirements.txt +++ b/requirements.txt @@ -2,6 +2,7 @@ pytest==4.6.4 pytest-xdist mock pytest-cov +coverage==4.5.4 docker==3.7.3 pyOpenSSL lxml==4.3.4 From f33c07227182603d532db065a917a8faba1f01c9 Mon Sep 17 00:00:00 2001 From: Li Wu Date: Thu, 20 Feb 2020 10:09:25 +0800 Subject: [PATCH 46/53] Fix CI failure cased by jinja new version and update the log config to reduce log in app (#358) --- docs/REFERENCE.md | 2 +- requirements.txt | 2 +- splunk_eventgen/lib/logging_config/__init__.py | 4 ++-- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/docs/REFERENCE.md b/docs/REFERENCE.md index 4bdcda6e..1a9e62a8 100644 --- a/docs/REFERENCE.md +++ b/docs/REFERENCE.md @@ -129,7 +129,7 @@ outputMode = scsout | modinput | s2s | file | splunkstream | stdout | devnull | * If setting splunkstream, should set splunkHost, splunkPort, splunkMethod, splunkUser and splunkPassword if not Splunk embedded * If setting s2s, should set splunkHost and splunkPort - * If setting syslogout, should set syslogDestinationHost and syslogDestinationPort + * If setting syslogout, should set syslogDestinationHost and syslogDestinationPort. A UDP port listening on Splunk needs to be configured. https://docs.splunk.com/Documentation/Splunk/latest/Data/HowSplunkEnterprisehandlessyslogdata * If setting httpevent, should set httpeventServers * If setting metric_httpevent, should set httpeventServers and make sure your index is a splunk metric index diff --git a/requirements.txt b/requirements.txt index 021af017..46c88479 100644 --- a/requirements.txt +++ b/requirements.txt @@ -13,7 +13,7 @@ requests[security] ujson>=1.35 pyyaml httplib2 -jinja2 +jinja2==2.10.3 urllib3==1.24.2 pyOpenSSL flake8>=3.7.7 diff --git a/splunk_eventgen/lib/logging_config/__init__.py b/splunk_eventgen/lib/logging_config/__init__.py index 5a0e1c29..d602589b 100644 --- a/splunk_eventgen/lib/logging_config/__init__.py +++ b/splunk_eventgen/lib/logging_config/__init__.py @@ -2,7 +2,7 @@ import logging.config LOG_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', '..', 'logs') -DEFAULT_LOGGING_LEVEL = "DEBUG" +DEFAULT_LOGGING_LEVEL = "ERROR" LOGGING_CONFIG = { 'version': 1, @@ -76,7 +76,7 @@ 'loggers': { 'eventgen': { - 'handlers': ['console', 'eventgen_main'], + 'handlers': ['eventgen_main'], 'level': DEFAULT_LOGGING_LEVEL, 'propagate': False }, From 6cf054b1e40a1e67b190ecc4763dacd43e6ac0d8 Mon Sep 17 00:00:00 2001 From: Erwin Vrolijk Date: Thu, 27 Feb 2020 02:53:13 +0100 Subject: [PATCH 47/53] Add missing documentation for negative floats, as introduced in PR#127 (#360) --- docs/CONFIGURE.md | 3 +-- docs/REFERENCE.md | 3 +-- splunk_eventgen/splunk_app/README/eventgen.conf.spec | 3 +-- 3 files changed, 3 insertions(+), 6 deletions(-) diff --git a/docs/CONFIGURE.md b/docs/CONFIGURE.md index 449d1a8e..ea514871 100644 --- a/docs/CONFIGURE.md +++ b/docs/CONFIGURE.md @@ -323,8 +323,7 @@ Tokens in the default generator can override the sample to allow dynamic content and is a number greater than 0 and greater than or equal to . If rated, will be multiplied times hourOfDayRate and dayOfWeekRate. * For float[:], the token will be replaced with a random float between - start and end values where is a number greater than 0 - and is a number greater than 0 and greater than or equal to . + start and end values where is a number greater than or equal to . For floating point numbers, precision will be based off the precision specified in . For example, if we specify 1.0, precision will be one digit, if we specify 1.0000, precision will be four digits. If rated, will be multiplied times hourOfDayRate and dayOfWeekRate. diff --git a/docs/REFERENCE.md b/docs/REFERENCE.md index 1a9e62a8..f226cea4 100644 --- a/docs/REFERENCE.md +++ b/docs/REFERENCE.md @@ -564,8 +564,7 @@ token..replacement = | | ["list","of","strptime"] | guid and is a number greater than 0 and greater than or equal to . If rated, will be multiplied times hourOfDayRate and dayOfWeekRate. * For float[:], the token will be replaced with a random float between - start and end values where is a number greater than 0 - and is a number greater than 0 and greater than or equal to . + start and end values where is a number greater than or equal to . For floating point numbers, precision will be based off the precision specified in . For example, if we specify 1.0, precision will be one digit, if we specify 1.0000, precision will be four digits. If rated, diff --git a/splunk_eventgen/splunk_app/README/eventgen.conf.spec b/splunk_eventgen/splunk_app/README/eventgen.conf.spec index c713f2f5..ac23eef6 100644 --- a/splunk_eventgen/splunk_app/README/eventgen.conf.spec +++ b/splunk_eventgen/splunk_app/README/eventgen.conf.spec @@ -467,8 +467,7 @@ token..replacement = | | ["list","of","strptime"] | guid and is a number greater than 0 and greater than or equal to . If rated, will be multiplied times hourOfDayRate and dayOfWeekRate. * For float[:], the token will be replaced with a random float between - start and end values where is a number greater than 0 - and is a number greater than 0 and greater than or equal to . + start and end values where is a number greater than or equal to . For floating point numbers, precision will be based off the precision specified in . For example, if we specify 1.0, precision will be one digit, if we specify 1.0000, precision will be four digits. If rated, will be multiplied times hourOfDayRate and dayOfWeekRate. From 305552db50a1d0a9bed18bb417e6af1d78ca813e Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 27 Feb 2020 10:22:53 +0800 Subject: [PATCH 48/53] Bump nokogiri from 1.10.4 to 1.10.8 in /docs (#359) Bumps [nokogiri](https://github.com/sparklemotion/nokogiri) from 1.10.4 to 1.10.8. - [Release notes](https://github.com/sparklemotion/nokogiri/releases) - [Changelog](https://github.com/sparklemotion/nokogiri/blob/master/CHANGELOG.md) - [Commits](https://github.com/sparklemotion/nokogiri/compare/v1.10.4...v1.10.8) Signed-off-by: dependabot[bot] Co-authored-by: Li Wu --- docs/Gemfile.lock | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/Gemfile.lock b/docs/Gemfile.lock index d98655a0..df772d69 100644 --- a/docs/Gemfile.lock +++ b/docs/Gemfile.lock @@ -206,7 +206,7 @@ GEM jekyll-seo-tag (~> 2.1) minitest (5.12.2) multipart-post (2.1.1) - nokogiri (1.10.4) + nokogiri (1.10.8) mini_portile2 (~> 2.4.0) octokit (4.14.0) sawyer (~> 0.8.0, >= 0.5.3) From addcc9fdc2ab54c45e0e8739793744d067473f4b Mon Sep 17 00:00:00 2001 From: Li Wu Date: Mon, 2 Mar 2020 09:50:12 +0800 Subject: [PATCH 49/53] Fix jinja plugin bug and test addon sample (#361) --- splunk_eventgen/lib/requirements.txt | 4 +++- tests/sample_jinja_addon.zip | Bin 0 -> 1769 bytes 2 files changed, 3 insertions(+), 1 deletion(-) create mode 100644 tests/sample_jinja_addon.zip diff --git a/splunk_eventgen/lib/requirements.txt b/splunk_eventgen/lib/requirements.txt index 4bfb4b0b..51638740 100644 --- a/splunk_eventgen/lib/requirements.txt +++ b/splunk_eventgen/lib/requirements.txt @@ -1,2 +1,4 @@ ujson==1.35 -jinja2==2.10.1 +jinja2==2.10.3 +requests-futures==1.0.0 +urllib3==1.24.2 diff --git a/tests/sample_jinja_addon.zip b/tests/sample_jinja_addon.zip new file mode 100644 index 0000000000000000000000000000000000000000..2b958e6a19238360428d0c8cc7b829d5c21ed24f GIT binary patch literal 1769 zcmWIWW@h1H00I8khyXAHN(eIuFcc@|7UZPHXJzJPCB`SFq~z!6heq%)Ty%|yTk9Ge z_mxqEApmZ`CzBWy1EjGVkdm5~SejD;H%J3$7|b9M1`Z%ZH%J4&L8)b_c_rzod3wqD zd1)Z$X#!1znQGeO$aTnp$Mw6V>&v*69nuH6ca*6cE%~JxyiD@slG4Dc@^rSlEr0g> zFWRp-b9a^GPsa)2#hH9xdNxPgW#gElo8_0R894V`1p5qQpZ5paeAfP1S@3`Vzg^s` zpW7)sy>_cD$!g|uxm)x6W48-NG@q|}wmW&zqH78to#O+%**W5c{uovP!*c-;BLYgH z1vR8`1QaCji{Sxf-Gv_F=mx1`4=9*HC8@yJOe{&oZX_h^(Nlp1u|@)oErBEyJ(xY9 zxHxw~sAo2<>KJL1Q(M_Y66i&raxe)(j)tGSsl zFO+X7b8meFQ*_rvZAr#+8=jrIl(ePi;*M1dcKuhG6CGgV^yk{|JpCE(yU+6r{`Ak(lIhA0dmhJvy!#85<0fSidmkPK~T?80v-TEV~pFBp)Wjh=^4ot*`YbS%z> z<*3jIh_gXi6Q7}~NX~xixEa5puXPv_LYxp^=RVubUCZW6iQ0@GOh$ zA Date: Mon, 6 Apr 2020 01:50:54 -0700 Subject: [PATCH 50/53] regex stanza bugfix (#365) * prelim changes * fix regex length matching * fix wildcard sample + csv cases * small csv fix, add test case * revert accidental change * Upgraded test instance * add httpevent collector * forgot to update stanza name * add escape for csv matching + test case * add sample, remove stanza * push not working Co-authored-by: tonyl --- Makefile | 5 +++- splunk_eventgen/lib/eventgenconfig.py | 28 ++++++++++++------- .../large/conf/eventgen_sample_regex_csv.conf | 15 ++++++++++ .../conf/eventgen_sample_regex_integer.conf | 15 ++++++++++ .../conf/eventgen_sample_regex_wildcard.conf | 15 ++++++++++ tests/large/provision/Dockerfile | 11 +++----- .../provision/add_httpevent_collector.sh | 5 ++++ tests/large/provision/docker-compose.yml | 4 ++- tests/large/sample/sample1 | 12 ++++++++ tests/large/sample/sample2 | 12 ++++++++ tests/large/sample/timeorderXcsv | 11 ++++++++ tests/large/sample/timeorder_regex.csv | 11 ++++++++ .../large/sample/{tutorial1 => tutorial1.csv} | 0 tests/large/test_mode_replay.py | 2 +- tests/large/test_mode_sample.py | 18 ++++++++++++ 15 files changed, 144 insertions(+), 20 deletions(-) create mode 100755 tests/large/conf/eventgen_sample_regex_csv.conf create mode 100644 tests/large/conf/eventgen_sample_regex_integer.conf create mode 100644 tests/large/conf/eventgen_sample_regex_wildcard.conf create mode 100755 tests/large/provision/add_httpevent_collector.sh create mode 100755 tests/large/sample/sample1 create mode 100755 tests/large/sample/sample2 create mode 100644 tests/large/sample/timeorderXcsv create mode 100644 tests/large/sample/timeorder_regex.csv rename tests/large/sample/{tutorial1 => tutorial1.csv} (100%) diff --git a/Makefile b/Makefile index 5a65cb0a..b4c1914c 100644 --- a/Makefile +++ b/Makefile @@ -51,12 +51,15 @@ test_helper: @echo 'Installing docker-compose' bash install_docker_compose.sh + @echo 'Build a docker image' + docker build -t provision_splunk:latest -f tests/large/provision/Dockerfile tests/large/provision + @echo 'Start container with splunk' docker-compose -f tests/large/provision/docker-compose.yml up & sleep 120 @echo 'Provision splunk container' - docker-compose -f tests/large/provision/docker-compose.yml exec -T splunk sh -c 'cd /opt/splunk;./provision.sh;/opt/splunk/bin/splunk enable listen 9997 -auth admin:changeme;/opt/splunk/bin/splunk add index test_0;/opt/splunk/bin/splunk add index test_1;/opt/splunk/bin/splunk restart' + docker exec --user splunk provision_splunk_1 sh -c 'cd /opt/splunk;./provision.sh;./add_httpevent_collector.sh;/opt/splunk/bin/splunk enable listen 9997 -auth admin:changeme;/opt/splunk/bin/splunk add index test_0;/opt/splunk/bin/splunk add index test_1;/opt/splunk/bin/splunk restart' run_tests: @echo 'Running the super awesome tests' diff --git a/splunk_eventgen/lib/eventgenconfig.py b/splunk_eventgen/lib/eventgenconfig.py index 3acca7c5..d9078f9a 100644 --- a/splunk_eventgen/lib/eventgenconfig.py +++ b/splunk_eventgen/lib/eventgenconfig.py @@ -1,3 +1,4 @@ +import copy import datetime import json import logging.handlers @@ -520,15 +521,22 @@ def parse(self): if os.path.exists(s.sampleDir): sampleFiles = os.listdir(s.sampleDir) for sample in sampleFiles: - results = re.match(s.name, sample) + sample_name = s.name + # If we expect a .csv, append it to the file name - regex matching must include the extension + if s.sampletype == "csv" and not s.name.endswith(".csv"): + sample_name = s.name + "\.csv" + results = re.match(sample_name, sample) if results: - logger.debug("Matched file {0} with sample name {1}".format(results.group(0), s.name)) - samplePath = os.path.join(s.sampleDir, sample) - if os.path.isfile(samplePath): - logger.debug( - "Found sample file '%s' for app '%s' using config '%s' with priority '%s'" % - (sample, s.app, s.name, s._priority) + "; adding to list") - foundFiles.append(samplePath) + # Make sure the stanza name/regex matches the entire file name + match_start, match_end = results.regs[0] + if match_end - match_start == len(sample): + logger.debug("Matched file {0} with sample name {1}".format(results.group(0), s.name)) + samplePath = os.path.join(s.sampleDir, sample) + if os.path.isfile(samplePath): + logger.debug( + "Found sample file '%s' for app '%s' using config '%s' with priority '%s'" % + (sample, s.app, s.name, s._priority) + "; adding to list") + foundFiles.append(samplePath) # If we didn't find any files, log about it if len(foundFiles) == 0: @@ -539,8 +547,8 @@ def parse(self): tempsamples2.append(s) for f in foundFiles: - if s.name in f: - news = s + if re.search(s.name, f): + news = copy.copy(s) news.filePath = f # 12/3/13 CS TODO These are hard coded but should be handled via the modular config system # Maybe a generic callback for all plugins which will modify sample based on the filename diff --git a/tests/large/conf/eventgen_sample_regex_csv.conf b/tests/large/conf/eventgen_sample_regex_csv.conf new file mode 100755 index 00000000..2544c2bb --- /dev/null +++ b/tests/large/conf/eventgen_sample_regex_csv.conf @@ -0,0 +1,15 @@ +[timeorder.*] +sampleDir = ../sample +mode = sample +sampletype = csv +outputMode = stdout +end = 1 + +token.0.token = \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2} +token.0.replacementType = timestamp +token.0.replacement = %Y-%m-%d %H:%M:%S + +token.1.token = @@integer +token.1.replacementType = random +token.1.replacement = integer[0:10] + diff --git a/tests/large/conf/eventgen_sample_regex_integer.conf b/tests/large/conf/eventgen_sample_regex_integer.conf new file mode 100644 index 00000000..9af999b6 --- /dev/null +++ b/tests/large/conf/eventgen_sample_regex_integer.conf @@ -0,0 +1,15 @@ +[sample\d] +sampleDir = ../sample +mode = sample +earliest = -15s +sampletype = raw +outputMode = stdout +end = 1 + +token.0.token = \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2} +token.0.replacementType = timestamp +token.0.replacement = %Y-%m-%d %H:%M:%S + +token.1.token = @@integer +token.1.replacementType = random +token.1.replacement = integer[0:10] diff --git a/tests/large/conf/eventgen_sample_regex_wildcard.conf b/tests/large/conf/eventgen_sample_regex_wildcard.conf new file mode 100644 index 00000000..b57676aa --- /dev/null +++ b/tests/large/conf/eventgen_sample_regex_wildcard.conf @@ -0,0 +1,15 @@ +[sample.*] +sampleDir = ../sample +mode = sample +earliest = -15s +sampletype = raw +outputMode = stdout +end = 1 + +token.0.token = \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2} +token.0.replacementType = timestamp +token.0.replacement = %Y-%m-%d %H:%M:%S + +token.1.token = @@integer +token.1.replacementType = random +token.1.replacement = integer[0:10] diff --git a/tests/large/provision/Dockerfile b/tests/large/provision/Dockerfile index 3aa91ead..12ea2b09 100644 --- a/tests/large/provision/Dockerfile +++ b/tests/large/provision/Dockerfile @@ -1,11 +1,8 @@ -FROM splunk/splunk:7.0.3-monitor +FROM splunk/splunk:7.3-debian -# https://superuser.com/questions/1423486/issue-with-fetching-http-deb-debian-org-debian-dists-jessie-updates-inrelease -RUN printf "deb http://archive.debian.org/debian/ jessie main\ndeb-src http://archive.debian.org/debian/ jessie main\ndeb http://security.debian.org jessie/updates main\ndeb-src http://security.debian.org jessie/updates main" > /etc/apt/sources.list - -RUN apt-get update +RUN sudo apt-get update RUN echo "installing docker dependencies and development tools" && \ - apt-get --assume-yes install curl vim + sudo apt-get --assume-yes install curl vim -COPY provision.sh /opt/splunk/ +COPY ["provision.sh", "add_httpevent_collector.sh", "/opt/splunk/"] diff --git a/tests/large/provision/add_httpevent_collector.sh b/tests/large/provision/add_httpevent_collector.sh new file mode 100755 index 00000000..1f574e5f --- /dev/null +++ b/tests/large/provision/add_httpevent_collector.sh @@ -0,0 +1,5 @@ +HTTP_INPUTS_PATH=/opt/splunk/etc/apps/search/local/inputs.conf +echo "[http://test]" >> $HTTP_INPUTS_PATH +echo "disabled = 0" >> $HTTP_INPUTS_PATH +echo "token = 00000000-0000-0000-0000-000000000000" >> $HTTP_INPUTS_PATH +echo "indexes = main,test_0,test_1" >> $HTTP_INPUTS_PATH diff --git a/tests/large/provision/docker-compose.yml b/tests/large/provision/docker-compose.yml index f8165a8e..1762a7f6 100644 --- a/tests/large/provision/docker-compose.yml +++ b/tests/large/provision/docker-compose.yml @@ -2,7 +2,7 @@ version: "3.3" services: splunk: hostname: eventgensplunk - build: . + image: provision_splunk:latest ports: - 8000:8000 - 8089:8089 @@ -12,8 +12,10 @@ services: SPLUNK_START_ARGS: --answer-yes --no-prompt --accept-license # add `SHELL` env variable to make the `dircolors` happy SHELL: /bin/bash + SPLUNK_PASSWORD: changeme volumes: # the `docker` command in guest can talk to host docker daemon - "/var/run/docker.sock:/var/run/docker.sock" # to make terminal colorful tty: true + \ No newline at end of file diff --git a/tests/large/sample/sample1 b/tests/large/sample/sample1 new file mode 100755 index 00000000..b906dc98 --- /dev/null +++ b/tests/large/sample/sample1 @@ -0,0 +1,12 @@ +2014-01-04 20:00:00 WINDBAG Event 1 of 12 randint @@integer +2014-01-04 20:00:01 WINDBAG Event 2 of 12 randint @@integer +2014-01-04 20:00:02 WINDBAG Event 3 of 12 randint @@integer +2014-01-04 20:00:03 WINDBAG Event 4 of 12 randint @@integer +2014-01-04 20:00:03 WINDBAG Event 5 of 12 randint @@integer +2014-01-04 20:00:04 WINDBAG Event 6 of 12 randint @@integer +2014-01-04 20:00:05 WINDBAG Event 7 of 12 randint @@integer +2014-01-04 20:00:06 WINDBAG Event 8 of 12 randint @@integer +2014-01-04 20:00:08 WINDBAG Event 9 of 12 randint @@integer +2014-01-04 20:00:20 WINDBAG Event 10 of 12 randint @@integer +2014-01-04 20:00:21 WINDBAG Event 11 of 12 randint @@integer +2014-01-04 20:00:21 WINDBAG Event 12 of 12 randint @@integer diff --git a/tests/large/sample/sample2 b/tests/large/sample/sample2 new file mode 100755 index 00000000..b906dc98 --- /dev/null +++ b/tests/large/sample/sample2 @@ -0,0 +1,12 @@ +2014-01-04 20:00:00 WINDBAG Event 1 of 12 randint @@integer +2014-01-04 20:00:01 WINDBAG Event 2 of 12 randint @@integer +2014-01-04 20:00:02 WINDBAG Event 3 of 12 randint @@integer +2014-01-04 20:00:03 WINDBAG Event 4 of 12 randint @@integer +2014-01-04 20:00:03 WINDBAG Event 5 of 12 randint @@integer +2014-01-04 20:00:04 WINDBAG Event 6 of 12 randint @@integer +2014-01-04 20:00:05 WINDBAG Event 7 of 12 randint @@integer +2014-01-04 20:00:06 WINDBAG Event 8 of 12 randint @@integer +2014-01-04 20:00:08 WINDBAG Event 9 of 12 randint @@integer +2014-01-04 20:00:20 WINDBAG Event 10 of 12 randint @@integer +2014-01-04 20:00:21 WINDBAG Event 11 of 12 randint @@integer +2014-01-04 20:00:21 WINDBAG Event 12 of 12 randint @@integer diff --git a/tests/large/sample/timeorderXcsv b/tests/large/sample/timeorderXcsv new file mode 100644 index 00000000..72e2fc31 --- /dev/null +++ b/tests/large/sample/timeorderXcsv @@ -0,0 +1,11 @@ +_time,_raw,index,host,source,sourcetype +2015-08-18T16:28:54.695-0700,"127.0.0.1 - admin [18/Aug/2015:16:28:54.695 -0700] ""GET /en-US/api/shelper?snippet=true&snippetEmbedJS=false&namespace=search&search=search+index%3D_internal+%7C+fields+_time%2C+_raw%2C+index%2C+host%2C+source%2C+sourcetype+&useTypeahead=true&useAssistant=true&showCommandHelp=true&showCommandHistory=true&showFieldInfo=false&_=1439940537886 HTTP/1.1"" 200 994 ""https://host5.foobar.com:8000/en-US/app/search/search?q=search%20index%3D_internal%20%7C%20fields%20_time%2C%20_raw%2C%20index%2C%20host%2C%20source%2C%20sourcetype&sid=1439940529.1846224&earliest=&latest="" ""Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/43.0.2357.132 Safari/537.36"" - 55d3bfb6b17f7ff8270d50 33ms",_internal,host5.foobar.com,/usr/local/bamboo/itsi-demo/local/splunk/var/log/splunk/web_access.log,splunk_web_access +2015-08-18T16:28:54.569-0700,"2015-08-18 16:28:54,569 INFO streams_utils:24 - utils::readAsJson:: /usr/local/bamboo/itsi-demo/local/splunk/etc/apps/splunk_app_stream/local/apps",_internal,host5.foobar.com,/usr/local/bamboo/itsi-demo/local/splunk/var/log/splunk/splunk_app_stream.log,splunk_app_stream.log +2015-08-18T16:28:54.568-0700,"2015-08-18 16:28:54,568 INFO streams_utils:74 - create dir /usr/local/bamboo/itsi-demo/local/splunk/etc/apps/splunk_app_stream/local/",_internal,host5.foobar.com,/usr/local/bamboo/itsi-demo/local/splunk/var/log/splunk/splunk_app_stream.log,splunk_app_stream.log +2015-08-18T16:28:54.564-0700,"127.0.0.1 - - [18/Aug/2015:16:28:54.564 -0700] ""GET /en-us/custom/splunk_app_stream/ping/ HTTP/1.1"" 200 311 """" """" - 55d3bfb6907f7ff805f710 5ms",_internal,host5.foobar.com,/usr/local/bamboo/itsi-demo/local/splunk/var/log/splunk/web_access.log,splunk_web_access +2015-08-18T16:28:52.798-0700,"10.160.255.115 - admin [18/Aug/2015:16:28:52.798 -0700] ""GET /en-US/splunkd/__raw/servicesNS/nobody/search/search/jobs/1439940529.1846224/summary?output_mode=json&min_freq=0&_=1439940537880 HTTP/1.1"" 200 503 ""https://host5.foobar.com:8000/en-US/app/search/search?q=search%20index%3D_internal%20%7C%20fields%20_time%2C%20_raw%2C%20index%2C%20host%2C%20source%2C%20sourcetype&sid=1439940529.1846224&earliest=&latest="" ""Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/43.0.2357.132 Safari/537.36"" - 9f802569d5c3d77d468e897d34f8969f 6ms",_internal,host5.foobar.com,/usr/local/bamboo/itsi-demo/local/splunk/var/log/splunk/splunkd_ui_access.log,splunkd_ui_access +2015-08-18T16:28:52.798-0700,"10.160.255.115 - admin [18/Aug/2015:16:28:52.798 -0700] ""GET /en-US/splunkd/__raw/services/search/jobs/1439940529.1846224/timeline?offset=0&count=1000&_=1439940537881 HTTP/1.1"" 200 349 ""https://host5.foobar.com:8000/en-US/app/search/search?q=search%20index%3D_internal%20%7C%20fields%20_time%2C%20_raw%2C%20index%2C%20host%2C%20source%2C%20sourcetype&sid=1439940529.1846224&earliest=&latest="" ""Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/43.0.2357.132 Safari/537.36"" - 9f802569d5c3d77d468e897d34f8969f 4ms",_internal,host5.foobar.com,/usr/local/bamboo/itsi-demo/local/splunk/var/log/splunk/splunkd_ui_access.log,splunkd_ui_access +2015-08-18T16:28:52.754-0700,"10.160.255.115 - admin [18/Aug/2015:16:28:52.754 -0700] ""GET /en-US/splunkd/__raw/servicesNS/nobody/search/search/jobs/1439940529.1846224?output_mode=json&_=1439940537879 HTTP/1.1"" 200 1543 ""https://host5.foobar.com:8000/en-US/app/search/search?q=search%20index%3D_internal%20%7C%20fields%20_time%2C%20_raw%2C%20index%2C%20host%2C%20source%2C%20sourcetype&sid=1439940529.1846224&earliest=&latest="" ""Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/43.0.2357.132 Safari/537.36"" - 9f802569d5c3d77d468e897d34f8969f 4ms",_internal,host5.foobar.com,/usr/local/bamboo/itsi-demo/local/splunk/var/log/splunk/splunkd_ui_access.log,splunkd_ui_access +2015-08-18T16:28:52.270-0700,"2015-08-18 16:28:52,270 ERROR pid=16324 tid=MainThread file=__init__.py:execute:957 | Execution failed: [HTTP 401] Client is not authenticated +2015-08-18T16:28:52.268-0700,"127.0.0.1 - - [18/Aug/2015:16:28:52.268 -0700] ""GET /services/shcluster/config/config HTTP/1.0"" 401 148 - - - 0ms",_internal,host5.foobar.com,/usr/local/bamboo/itsi-demo/local/splunk/var/log/splunk/splunkd_access.log,splunkd_access +2015-08-18T16:28:52.247-0700,"2015-08-18 16:28:52,247 INFO pid=16324 tid=MainThread file=__init__.py:execute:906 | Execute called",_internal,host5.foobar.com,/usr/local/bamboo/itsi-demo/local/splunk/var/log/splunk/python_modular_input.log,python_modular_input diff --git a/tests/large/sample/timeorder_regex.csv b/tests/large/sample/timeorder_regex.csv new file mode 100644 index 00000000..72e2fc31 --- /dev/null +++ b/tests/large/sample/timeorder_regex.csv @@ -0,0 +1,11 @@ +_time,_raw,index,host,source,sourcetype +2015-08-18T16:28:54.695-0700,"127.0.0.1 - admin [18/Aug/2015:16:28:54.695 -0700] ""GET /en-US/api/shelper?snippet=true&snippetEmbedJS=false&namespace=search&search=search+index%3D_internal+%7C+fields+_time%2C+_raw%2C+index%2C+host%2C+source%2C+sourcetype+&useTypeahead=true&useAssistant=true&showCommandHelp=true&showCommandHistory=true&showFieldInfo=false&_=1439940537886 HTTP/1.1"" 200 994 ""https://host5.foobar.com:8000/en-US/app/search/search?q=search%20index%3D_internal%20%7C%20fields%20_time%2C%20_raw%2C%20index%2C%20host%2C%20source%2C%20sourcetype&sid=1439940529.1846224&earliest=&latest="" ""Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/43.0.2357.132 Safari/537.36"" - 55d3bfb6b17f7ff8270d50 33ms",_internal,host5.foobar.com,/usr/local/bamboo/itsi-demo/local/splunk/var/log/splunk/web_access.log,splunk_web_access +2015-08-18T16:28:54.569-0700,"2015-08-18 16:28:54,569 INFO streams_utils:24 - utils::readAsJson:: /usr/local/bamboo/itsi-demo/local/splunk/etc/apps/splunk_app_stream/local/apps",_internal,host5.foobar.com,/usr/local/bamboo/itsi-demo/local/splunk/var/log/splunk/splunk_app_stream.log,splunk_app_stream.log +2015-08-18T16:28:54.568-0700,"2015-08-18 16:28:54,568 INFO streams_utils:74 - create dir /usr/local/bamboo/itsi-demo/local/splunk/etc/apps/splunk_app_stream/local/",_internal,host5.foobar.com,/usr/local/bamboo/itsi-demo/local/splunk/var/log/splunk/splunk_app_stream.log,splunk_app_stream.log +2015-08-18T16:28:54.564-0700,"127.0.0.1 - - [18/Aug/2015:16:28:54.564 -0700] ""GET /en-us/custom/splunk_app_stream/ping/ HTTP/1.1"" 200 311 """" """" - 55d3bfb6907f7ff805f710 5ms",_internal,host5.foobar.com,/usr/local/bamboo/itsi-demo/local/splunk/var/log/splunk/web_access.log,splunk_web_access +2015-08-18T16:28:52.798-0700,"10.160.255.115 - admin [18/Aug/2015:16:28:52.798 -0700] ""GET /en-US/splunkd/__raw/servicesNS/nobody/search/search/jobs/1439940529.1846224/summary?output_mode=json&min_freq=0&_=1439940537880 HTTP/1.1"" 200 503 ""https://host5.foobar.com:8000/en-US/app/search/search?q=search%20index%3D_internal%20%7C%20fields%20_time%2C%20_raw%2C%20index%2C%20host%2C%20source%2C%20sourcetype&sid=1439940529.1846224&earliest=&latest="" ""Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/43.0.2357.132 Safari/537.36"" - 9f802569d5c3d77d468e897d34f8969f 6ms",_internal,host5.foobar.com,/usr/local/bamboo/itsi-demo/local/splunk/var/log/splunk/splunkd_ui_access.log,splunkd_ui_access +2015-08-18T16:28:52.798-0700,"10.160.255.115 - admin [18/Aug/2015:16:28:52.798 -0700] ""GET /en-US/splunkd/__raw/services/search/jobs/1439940529.1846224/timeline?offset=0&count=1000&_=1439940537881 HTTP/1.1"" 200 349 ""https://host5.foobar.com:8000/en-US/app/search/search?q=search%20index%3D_internal%20%7C%20fields%20_time%2C%20_raw%2C%20index%2C%20host%2C%20source%2C%20sourcetype&sid=1439940529.1846224&earliest=&latest="" ""Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/43.0.2357.132 Safari/537.36"" - 9f802569d5c3d77d468e897d34f8969f 4ms",_internal,host5.foobar.com,/usr/local/bamboo/itsi-demo/local/splunk/var/log/splunk/splunkd_ui_access.log,splunkd_ui_access +2015-08-18T16:28:52.754-0700,"10.160.255.115 - admin [18/Aug/2015:16:28:52.754 -0700] ""GET /en-US/splunkd/__raw/servicesNS/nobody/search/search/jobs/1439940529.1846224?output_mode=json&_=1439940537879 HTTP/1.1"" 200 1543 ""https://host5.foobar.com:8000/en-US/app/search/search?q=search%20index%3D_internal%20%7C%20fields%20_time%2C%20_raw%2C%20index%2C%20host%2C%20source%2C%20sourcetype&sid=1439940529.1846224&earliest=&latest="" ""Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/43.0.2357.132 Safari/537.36"" - 9f802569d5c3d77d468e897d34f8969f 4ms",_internal,host5.foobar.com,/usr/local/bamboo/itsi-demo/local/splunk/var/log/splunk/splunkd_ui_access.log,splunkd_ui_access +2015-08-18T16:28:52.270-0700,"2015-08-18 16:28:52,270 ERROR pid=16324 tid=MainThread file=__init__.py:execute:957 | Execution failed: [HTTP 401] Client is not authenticated +2015-08-18T16:28:52.268-0700,"127.0.0.1 - - [18/Aug/2015:16:28:52.268 -0700] ""GET /services/shcluster/config/config HTTP/1.0"" 401 148 - - - 0ms",_internal,host5.foobar.com,/usr/local/bamboo/itsi-demo/local/splunk/var/log/splunk/splunkd_access.log,splunkd_access +2015-08-18T16:28:52.247-0700,"2015-08-18 16:28:52,247 INFO pid=16324 tid=MainThread file=__init__.py:execute:906 | Execute called",_internal,host5.foobar.com,/usr/local/bamboo/itsi-demo/local/splunk/var/log/splunk/python_modular_input.log,python_modular_input diff --git a/tests/large/sample/tutorial1 b/tests/large/sample/tutorial1.csv similarity index 100% rename from tests/large/sample/tutorial1 rename to tests/large/sample/tutorial1.csv diff --git a/tests/large/test_mode_replay.py b/tests/large/test_mode_replay.py index b0576852..441b1de2 100644 --- a/tests/large/test_mode_replay.py +++ b/tests/large/test_mode_replay.py @@ -52,7 +52,7 @@ def test_mode_replay_backfill_greater_interval(eventgen_test_helper): def test_mode_replay_tutorial1(eventgen_test_helper): - """Test the replay mode with csv for sample file sample.tutorial1. https://github.com/splunk/eventgen/issues/244""" + """Test the replay mode with csv for sample file sample.tutorial1.csv""" events = eventgen_test_helper('eventgen_tutorial1.conf').get_events() assert len(events) == 2019 diff --git a/tests/large/test_mode_sample.py b/tests/large/test_mode_sample.py index daea33ff..f86db600 100644 --- a/tests/large/test_mode_sample.py +++ b/tests/large/test_mode_sample.py @@ -105,3 +105,21 @@ def test_mode_sample_generator_workers(eventgen_test_helper): """Test sample mode with generatorWorkers = 5, end = 5 and count = 10""" events = eventgen_test_helper("eventgen_sample_generatorWorkers.conf").get_events() assert len(events) == 50 + + +def test_mode_sample_regex_integer(eventgen_test_helper): + """Test sample mode with a regex pattern in the stanza name ('sample\d')""" + events = eventgen_test_helper("eventgen_sample_regex_integer.conf").get_events() + assert len(events) == 24 + + +def test_mode_sample_regex_wildcard(eventgen_test_helper): + """tTest sample mode with a regex wildcard pattern in the stanza name ('sample*')""" + events = eventgen_test_helper("eventgen_sample_regex_wildcard.conf").get_events() + assert len(events) == 36 + + +def test_mode_sample_regex_csv(eventgen_test_helper): + """tTest sample mode with a regex wildcard pattern in the stanza name ('sample*')""" + events = eventgen_test_helper("eventgen_sample_regex_csv.conf").get_events() + assert len(events) == 20 From d175b10c671509f19c28d139a8e9c8233118e24a Mon Sep 17 00:00:00 2001 From: Guodong Wang Date: Mon, 6 Apr 2020 17:21:15 +0800 Subject: [PATCH 51/53] Improvement/release script (#368) * add PR to master * update title in sent PR Co-authored-by: Li Wu --- release_tool/prepare_release_branch.py | 30 +++++++++++++++----------- 1 file changed, 18 insertions(+), 12 deletions(-) diff --git a/release_tool/prepare_release_branch.py b/release_tool/prepare_release_branch.py index f2d9df77..d227bc61 100644 --- a/release_tool/prepare_release_branch.py +++ b/release_tool/prepare_release_branch.py @@ -24,7 +24,7 @@ def validate_version_str(version): valid = False try: v = int(n) - valid = (v>=0) + valid = (v >= 0) except: valid = False if not valid: @@ -38,17 +38,19 @@ def validate_token(token): return t parser = argparse.ArgumentParser( - 'prepare_release_branch.py', - description='eventgen release branch tool.\ncreate the release branch, set the right version and push the pull request.') + 'prepare_release_branch.py', description= + 'eventgen release branch tool.\ncreate the release branch, set the right version and push the pull request.') parser.add_argument('-v', '--verbose', default=False, action='store_true', help='enable the verbose logging') parser.add_argument('-n', '--version_str', type=validate_version_str, required=True) parser.add_argument('-a', '--token', help='your github access token.', default=None, type=validate_token) return parser.parse_args(sys.argv[1:]) + def setup_logging(verbose=None): l = logging.DEBUG if verbose is True else logging.INFO logging.getLogger().setLevel(l) + def setup_env(): ''' by default, we use this hard code current working dir. @@ -60,6 +62,7 @@ def setup_env(): logging.debug(f'try to change current working directory to {root_repo_dir}') os.chdir(root_repo_dir) + def run_sh_cmd(args, exit_on_error=None): should_exit_on_error = True if exit_on_error is None else exit_on_error child = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE) @@ -75,10 +78,12 @@ def run_sh_cmd(args, exit_on_error=None): assert False, 'sh command fails.' return False + def get_release_branch_name(version_str): v = version_str.replace('.', '_') return f'release/{v}' + def replace_version(ver): ver_json_file = os.path.join(root_repo_dir, 'splunk_eventgen', 'version.json') with open(ver_json_file, 'w') as fp: @@ -121,15 +126,15 @@ def commit_updated_files(ver): run_sh_cmd(['git', 'commit', '-m', f'update eventgen version to {ver}'], False) logging.info('committed version files.') -def create_pr(ver, token): + +def create_pr(ver, token, target_branch): release_branch = get_release_branch_name(ver) response = requests.post( - 'https://api.github.com/repos/splunk/eventgen/pulls', - json={'title': f'Release eventgen {ver}', 'head': release_branch, 'base': 'develop', 'body': - 'As the title'}, headers={ - 'Accept': 'application/vnd.github.full+json', - 'Content-Type': 'application/json', - 'Authorization': f'token {token}'}) + 'https://api.github.com/repos/splunk/eventgen/pulls', json={ + 'title': f'Release eventgen {ver}. Merge to {target_branch} branch.', 'head': release_branch, 'base': + target_branch, 'body': 'As the title'}, headers={ + 'Accept': 'application/vnd.github.full+json', 'Content-Type': 'application/json', 'Authorization': + f'token {token}'}) response.raise_for_status() data = response.json() pr_url = data['url'] @@ -149,7 +154,7 @@ def create_pr(ver, token): logging.info('check out the release branch') release_branch = get_release_branch_name(arg_values.version_str) - branch_exist = run_sh_cmd(['git','show-ref','--verify',f'refs/heads/{release_branch}'], False) + branch_exist = run_sh_cmd(['git', 'show-ref', '--verify', f'refs/heads/{release_branch}'], False) if not branch_exist: run_sh_cmd(['git', 'checkout', '-b', release_branch]) else: @@ -164,7 +169,8 @@ def create_pr(ver, token): logging.info(f'release branch {release_branch} is pushed to remote repo.') if arg_values.token: - create_pr(arg_values.version_str, arg_values.token) + create_pr(arg_values.version_str, arg_values.token, 'develop') + create_pr(arg_values.version_str, arg_values.token, 'master') else: pr_url = 'https://github.com/splunk/eventgen/compare' logging.info('create pull reqeust manually by visiting this url:\n{pr_url}') From d96d1a4b455a3229208f89473fb5a2e44b7ec806 Mon Sep 17 00:00:00 2001 From: Ryan Faircloth <35384120+rfaircloth-splunk@users.noreply.github.com> Date: Mon, 6 Apr 2020 09:05:39 -0400 Subject: [PATCH 52/53] Correct python2/3 issue (#363) * msg must be a bytes like object * msg must be a bytes like object Co-authored-by: Li Wu --- splunk_eventgen/lib/plugins/output/tcpout.py | 3 ++- splunk_eventgen/lib/plugins/output/udpout.py | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/splunk_eventgen/lib/plugins/output/tcpout.py b/splunk_eventgen/lib/plugins/output/tcpout.py index 5843072d..488099b0 100644 --- a/splunk_eventgen/lib/plugins/output/tcpout.py +++ b/splunk_eventgen/lib/plugins/output/tcpout.py @@ -22,7 +22,8 @@ def flush(self, q): self.s.connect((self._tcpDestinationHost, int(self._tcpDestinationPort))) logger.info("Socket connected to {0}:{1}".format(self._tcpDestinationHost, self._tcpDestinationPort)) for x in q: - self.s.send(x['_raw'].rstrip() + '\n') + msg = x['_raw'].rstrip() + '\n' + self.s.send(str.encode(msg)) self.s.close() diff --git a/splunk_eventgen/lib/plugins/output/udpout.py b/splunk_eventgen/lib/plugins/output/udpout.py index 02699acd..8e3632e5 100644 --- a/splunk_eventgen/lib/plugins/output/udpout.py +++ b/splunk_eventgen/lib/plugins/output/udpout.py @@ -21,7 +21,7 @@ def __init__(self, sample, output_counter=None): def flush(self, q): for x in q: msg = x['_raw'].rstrip() + '\n' - self.s.sendto(msg, (self._udpDestinationHost, int(self._udpDestinationPort))) + self.s.sendto(str.encode(msg), (self._udpDestinationHost, int(self._udpDestinationPort))) logger.info("Flushing in udpout.") From 5a5eb178380fba55a228e50a3e269a1d756afbad Mon Sep 17 00:00:00 2001 From: Jack Meixensperger Date: Mon, 6 Apr 2020 16:18:58 -0700 Subject: [PATCH 53/53] update eventgen version to 7.1.0 --- docs/CHANGELOG.md | 4 ++++ splunk_eventgen/splunk_app/default/app.conf | 2 +- splunk_eventgen/version.json | 2 +- 3 files changed, 6 insertions(+), 2 deletions(-) diff --git a/docs/CHANGELOG.md b/docs/CHANGELOG.md index 39bae45d..5c65c381 100644 --- a/docs/CHANGELOG.md +++ b/docs/CHANGELOG.md @@ -1,3 +1,7 @@ +**7.1.0**: + +- Check the release note and download the package/source from [Here](https://github.com/splunk/eventgen/releases/tag/7.1.0) + **7.0.0**: - Check the release note and download the package/source from [Here](https://github.com/splunk/eventgen/releases/tag/7.0.0) diff --git a/splunk_eventgen/splunk_app/default/app.conf b/splunk_eventgen/splunk_app/default/app.conf index 880a9a25..3bb576d3 100644 --- a/splunk_eventgen/splunk_app/default/app.conf +++ b/splunk_eventgen/splunk_app/default/app.conf @@ -14,7 +14,7 @@ build = 1 [launcher] author = Splunk Inc. -version = 6.3.2 +version = 7.1.0 description = SA-Eventgen app for dynamic data generation [package] diff --git a/splunk_eventgen/version.json b/splunk_eventgen/version.json index 342b591f..dde1c1e3 100644 --- a/splunk_eventgen/version.json +++ b/splunk_eventgen/version.json @@ -1 +1 @@ -{"version": "7.0.0"} +{"version": "7.1.0"} \ No newline at end of file